vk: Set cb_state to NULL at cmd buffer create time
[mesa.git] / src / vulkan / device.c
1 /*
2 * Copyright © 2015 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include <assert.h>
25 #include <stdbool.h>
26 #include <string.h>
27 #include <unistd.h>
28 #include <fcntl.h>
29
30 #include "private.h"
31
32 static int
33 anv_env_get_int(const char *name)
34 {
35 const char *val = getenv(name);
36
37 if (!val)
38 return 0;
39
40 return strtol(val, NULL, 0);
41 }
42
43 static VkResult
44 fill_physical_device(struct anv_physical_device *device,
45 struct anv_instance *instance,
46 const char *path)
47 {
48 int fd;
49
50 fd = open("/dev/dri/renderD128", O_RDWR | O_CLOEXEC);
51 if (fd < 0)
52 return vk_error(VK_ERROR_UNAVAILABLE);
53
54 device->instance = instance;
55 device->path = path;
56
57 device->chipset_id = anv_env_get_int("INTEL_DEVID_OVERRIDE");
58 device->no_hw = false;
59 if (device->chipset_id) {
60 /* INTEL_DEVID_OVERRIDE implies INTEL_NO_HW. */
61 device->no_hw = true;
62 } else {
63 device->chipset_id = anv_gem_get_param(fd, I915_PARAM_CHIPSET_ID);
64 }
65 if (!device->chipset_id)
66 goto fail;
67
68 device->name = brw_get_device_name(device->chipset_id);
69 device->info = brw_get_device_info(device->chipset_id, -1);
70 if (!device->info)
71 goto fail;
72
73 if (!anv_gem_get_param(fd, I915_PARAM_HAS_WAIT_TIMEOUT))
74 goto fail;
75
76 if (!anv_gem_get_param(fd, I915_PARAM_HAS_EXECBUF2))
77 goto fail;
78
79 if (!anv_gem_get_param(fd, I915_PARAM_HAS_LLC))
80 goto fail;
81
82 if (!anv_gem_get_param(fd, I915_PARAM_HAS_EXEC_CONSTANTS))
83 goto fail;
84
85 close(fd);
86
87 return VK_SUCCESS;
88
89 fail:
90 close(fd);
91
92 return vk_error(VK_ERROR_UNAVAILABLE);
93 }
94
95 static void *default_alloc(
96 void* pUserData,
97 size_t size,
98 size_t alignment,
99 VkSystemAllocType allocType)
100 {
101 return malloc(size);
102 }
103
104 static void default_free(
105 void* pUserData,
106 void* pMem)
107 {
108 free(pMem);
109 }
110
111 static const VkAllocCallbacks default_alloc_callbacks = {
112 .pUserData = NULL,
113 .pfnAlloc = default_alloc,
114 .pfnFree = default_free
115 };
116
117 VkResult anv_CreateInstance(
118 const VkInstanceCreateInfo* pCreateInfo,
119 VkInstance* pInstance)
120 {
121 struct anv_instance *instance;
122 const VkAllocCallbacks *alloc_callbacks = &default_alloc_callbacks;
123 void *user_data = NULL;
124 VkResult result;
125
126 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO);
127
128 if (pCreateInfo->pAllocCb) {
129 alloc_callbacks = pCreateInfo->pAllocCb;
130 user_data = pCreateInfo->pAllocCb->pUserData;
131 }
132 instance = alloc_callbacks->pfnAlloc(user_data, sizeof(*instance), 8,
133 VK_SYSTEM_ALLOC_TYPE_API_OBJECT);
134 if (!instance)
135 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
136
137 instance->pAllocUserData = alloc_callbacks->pUserData;
138 instance->pfnAlloc = alloc_callbacks->pfnAlloc;
139 instance->pfnFree = alloc_callbacks->pfnFree;
140 instance->apiVersion = pCreateInfo->pAppInfo->apiVersion;
141
142 instance->physicalDeviceCount = 0;
143 result = fill_physical_device(&instance->physicalDevice,
144 instance, "/dev/dri/renderD128");
145
146 if (result != VK_SUCCESS)
147 return result;
148
149 instance->physicalDeviceCount++;
150 *pInstance = (VkInstance) instance;
151
152 return VK_SUCCESS;
153 }
154
155 VkResult anv_DestroyInstance(
156 VkInstance _instance)
157 {
158 struct anv_instance *instance = (struct anv_instance *) _instance;
159
160 instance->pfnFree(instance->pAllocUserData, instance);
161
162 return VK_SUCCESS;
163 }
164
165 VkResult anv_EnumeratePhysicalDevices(
166 VkInstance _instance,
167 uint32_t* pPhysicalDeviceCount,
168 VkPhysicalDevice* pPhysicalDevices)
169 {
170 struct anv_instance *instance = (struct anv_instance *) _instance;
171
172 if (*pPhysicalDeviceCount >= 1)
173 pPhysicalDevices[0] = (VkPhysicalDevice) &instance->physicalDevice;
174 *pPhysicalDeviceCount = instance->physicalDeviceCount;
175
176 return VK_SUCCESS;
177 }
178
179 VkResult anv_GetPhysicalDeviceInfo(
180 VkPhysicalDevice physicalDevice,
181 VkPhysicalDeviceInfoType infoType,
182 size_t* pDataSize,
183 void* pData)
184 {
185 struct anv_physical_device *device = (struct anv_physical_device *) physicalDevice;
186 VkPhysicalDeviceProperties *properties;
187 VkPhysicalDevicePerformance *performance;
188 VkPhysicalDeviceQueueProperties *queue_properties;
189 VkPhysicalDeviceMemoryProperties *memory_properties;
190 VkDisplayPropertiesWSI *display_properties;
191 uint64_t ns_per_tick = 80;
192
193 switch ((uint32_t) infoType) {
194 case VK_PHYSICAL_DEVICE_INFO_TYPE_PROPERTIES:
195 properties = pData;
196
197 *pDataSize = sizeof(*properties);
198 if (pData == NULL)
199 return VK_SUCCESS;
200
201 properties->apiVersion = 1;
202 properties->driverVersion = 1;
203 properties->vendorId = 0x8086;
204 properties->deviceId = device->chipset_id;
205 properties->deviceType = VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU;
206 strcpy(properties->deviceName, device->name);
207 properties->maxInlineMemoryUpdateSize = 0;
208 properties->maxBoundDescriptorSets = MAX_SETS;
209 properties->maxThreadGroupSize = 512;
210 properties->timestampFrequency = 1000 * 1000 * 1000 / ns_per_tick;
211 properties->multiColorAttachmentClears = true;
212 properties->maxDescriptorSets = 8;
213 properties->maxViewports = 16;
214 properties->maxColorAttachments = 8;
215 return VK_SUCCESS;
216
217 case VK_PHYSICAL_DEVICE_INFO_TYPE_PERFORMANCE:
218 performance = pData;
219
220 *pDataSize = sizeof(*performance);
221 if (pData == NULL)
222 return VK_SUCCESS;
223
224 performance->maxDeviceClock = 1.0;
225 performance->aluPerClock = 1.0;
226 performance->texPerClock = 1.0;
227 performance->primsPerClock = 1.0;
228 performance->pixelsPerClock = 1.0;
229 return VK_SUCCESS;
230
231 case VK_PHYSICAL_DEVICE_INFO_TYPE_QUEUE_PROPERTIES:
232 queue_properties = pData;
233
234 *pDataSize = sizeof(*queue_properties);
235 if (pData == NULL)
236 return VK_SUCCESS;
237
238 queue_properties->queueFlags = 0;
239 queue_properties->queueCount = 1;
240 queue_properties->maxAtomicCounters = 0;
241 queue_properties->supportsTimestamps = true;
242 queue_properties->maxMemReferences = 256;
243 return VK_SUCCESS;
244
245 case VK_PHYSICAL_DEVICE_INFO_TYPE_MEMORY_PROPERTIES:
246 memory_properties = pData;
247
248 *pDataSize = sizeof(*memory_properties);
249 if (pData == NULL)
250 return VK_SUCCESS;
251
252 memory_properties->supportsMigration = false;
253 memory_properties->supportsPinning = false;
254 return VK_SUCCESS;
255
256 case VK_PHYSICAL_DEVICE_INFO_TYPE_DISPLAY_PROPERTIES_WSI:
257 anv_finishme("VK_PHYSICAL_DEVICE_INFO_TYPE_DISPLAY_PROPERTIES_WSI");
258
259 *pDataSize = sizeof(*display_properties);
260 if (pData == NULL)
261 return VK_SUCCESS;
262
263 display_properties = pData;
264 display_properties->display = 0;
265 display_properties->physicalResolution = (VkExtent2D) { 0, 0 };
266 return VK_SUCCESS;
267
268 case VK_PHYSICAL_DEVICE_INFO_TYPE_QUEUE_PRESENT_PROPERTIES_WSI:
269 anv_finishme("VK_PHYSICAL_DEVICE_INFO_TYPE_QUEUE_PRESENT_PROPERTIES_WSI");
270 return VK_SUCCESS;
271
272
273 default:
274 return VK_UNSUPPORTED;
275 }
276
277 }
278
279 void * vkGetProcAddr(
280 VkPhysicalDevice physicalDevice,
281 const char* pName)
282 {
283 return anv_lookup_entrypoint(pName);
284 }
285
286 static void
287 parse_debug_flags(struct anv_device *device)
288 {
289 const char *debug, *p, *end;
290
291 debug = getenv("INTEL_DEBUG");
292 device->dump_aub = false;
293 if (debug) {
294 for (p = debug; *p; p = end + 1) {
295 end = strchrnul(p, ',');
296 if (end - p == 3 && memcmp(p, "aub", 3) == 0)
297 device->dump_aub = true;
298 if (end - p == 5 && memcmp(p, "no_hw", 5) == 0)
299 device->no_hw = true;
300 if (*end == '\0')
301 break;
302 }
303 }
304 }
305
306 static void
307 anv_device_init_border_colors(struct anv_device *device)
308 {
309 float float_border_colors[][4] = {
310 [VK_BORDER_COLOR_OPAQUE_WHITE] = { 1.0, 1.0, 1.0, 1.0 },
311 [VK_BORDER_COLOR_TRANSPARENT_BLACK] = { 0.0, 0.0, 0.0, 0.0 },
312 [VK_BORDER_COLOR_OPAQUE_BLACK] = { 0.0, 0.0, 0.0, 1.0 }
313 };
314
315 uint32_t uint32_border_colors[][4] = {
316 [VK_BORDER_COLOR_OPAQUE_WHITE] = { 1, 1, 1, 1 },
317 [VK_BORDER_COLOR_TRANSPARENT_BLACK] = { 0, 0, 0, 0 },
318 [VK_BORDER_COLOR_OPAQUE_BLACK] = { 0, 0, 0, 1 }
319 };
320
321 device->float_border_colors =
322 anv_state_pool_alloc(&device->dynamic_state_pool,
323 sizeof(float_border_colors), 32);
324 memcpy(device->float_border_colors.map,
325 float_border_colors, sizeof(float_border_colors));
326
327 device->uint32_border_colors =
328 anv_state_pool_alloc(&device->dynamic_state_pool,
329 sizeof(uint32_border_colors), 32);
330 memcpy(device->uint32_border_colors.map,
331 uint32_border_colors, sizeof(uint32_border_colors));
332
333 }
334
335 static const uint32_t BATCH_SIZE = 8192;
336
337 VkResult anv_CreateDevice(
338 VkPhysicalDevice _physicalDevice,
339 const VkDeviceCreateInfo* pCreateInfo,
340 VkDevice* pDevice)
341 {
342 struct anv_physical_device *physicalDevice =
343 (struct anv_physical_device *) _physicalDevice;
344 struct anv_instance *instance = physicalDevice->instance;
345 struct anv_device *device;
346
347 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO);
348
349 device = instance->pfnAlloc(instance->pAllocUserData,
350 sizeof(*device), 8,
351 VK_SYSTEM_ALLOC_TYPE_API_OBJECT);
352 if (!device)
353 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
354
355 device->no_hw = physicalDevice->no_hw;
356 parse_debug_flags(device);
357
358 device->instance = physicalDevice->instance;
359 device->fd = open("/dev/dri/renderD128", O_RDWR | O_CLOEXEC);
360 if (device->fd == -1)
361 goto fail_device;
362
363 device->context_id = anv_gem_create_context(device);
364 if (device->context_id == -1)
365 goto fail_fd;
366
367 anv_bo_pool_init(&device->batch_bo_pool, device, BATCH_SIZE);
368
369 anv_block_pool_init(&device->dynamic_state_block_pool, device, 2048);
370
371 anv_state_pool_init(&device->dynamic_state_pool,
372 &device->dynamic_state_block_pool);
373
374 anv_block_pool_init(&device->instruction_block_pool, device, 2048);
375 anv_block_pool_init(&device->surface_state_block_pool, device, 2048);
376
377
378 /* Binding table pointers are only 16 bits so we have to make sure that
379 * they get allocated at the beginning of the surface state BO. To
380 * handle this, we create a separate block pool that works out of the
381 * first 64 KB of the surface state BO.
382 */
383 anv_block_pool_init_slave(&device->binding_table_block_pool,
384 &device->surface_state_block_pool, 32);
385
386 anv_state_pool_init(&device->surface_state_pool,
387 &device->surface_state_block_pool);
388
389 device->compiler = anv_compiler_create(device->fd);
390 device->aub_writer = NULL;
391
392 device->info = *physicalDevice->info;
393
394 pthread_mutex_init(&device->mutex, NULL);
395
396 anv_device_init_meta(device);
397
398 anv_device_init_border_colors(device);
399
400 *pDevice = (VkDevice) device;
401
402 return VK_SUCCESS;
403
404 fail_fd:
405 close(device->fd);
406 fail_device:
407 anv_device_free(device, device);
408
409 return vk_error(VK_ERROR_UNAVAILABLE);
410 }
411
412 VkResult anv_DestroyDevice(
413 VkDevice _device)
414 {
415 struct anv_device *device = (struct anv_device *) _device;
416
417 anv_compiler_destroy(device->compiler);
418
419
420 anv_bo_pool_finish(&device->batch_bo_pool);
421 anv_block_pool_finish(&device->dynamic_state_block_pool);
422 anv_block_pool_finish(&device->instruction_block_pool);
423 anv_block_pool_finish(&device->surface_state_block_pool);
424
425 close(device->fd);
426
427 if (device->aub_writer)
428 anv_aub_writer_destroy(device->aub_writer);
429
430 anv_device_free(device, device);
431
432 return VK_SUCCESS;
433 }
434
435 VkResult anv_GetGlobalExtensionInfo(
436 VkExtensionInfoType infoType,
437 uint32_t extensionIndex,
438 size_t* pDataSize,
439 void* pData)
440 {
441 static const VkExtensionProperties extensions[] = {
442 {
443 .extName = "VK_WSI_LunarG",
444 .version = 3
445 }
446 };
447 uint32_t count = ARRAY_SIZE(extensions);
448
449 switch (infoType) {
450 case VK_EXTENSION_INFO_TYPE_COUNT:
451 memcpy(pData, &count, sizeof(count));
452 *pDataSize = sizeof(count);
453 return VK_SUCCESS;
454
455 case VK_EXTENSION_INFO_TYPE_PROPERTIES:
456 if (extensionIndex >= count)
457 return vk_error(VK_ERROR_INVALID_EXTENSION);
458
459 memcpy(pData, &extensions[extensionIndex], sizeof(extensions[0]));
460 *pDataSize = sizeof(extensions[0]);
461 return VK_SUCCESS;
462
463 default:
464 return VK_UNSUPPORTED;
465 }
466 }
467
468 VkResult anv_GetPhysicalDeviceExtensionInfo(
469 VkPhysicalDevice physicalDevice,
470 VkExtensionInfoType infoType,
471 uint32_t extensionIndex,
472 size_t* pDataSize,
473 void* pData)
474 {
475 uint32_t *count;
476
477 switch (infoType) {
478 case VK_EXTENSION_INFO_TYPE_COUNT:
479 *pDataSize = 4;
480 if (pData == NULL)
481 return VK_SUCCESS;
482
483 count = pData;
484 *count = 0;
485 return VK_SUCCESS;
486
487 case VK_EXTENSION_INFO_TYPE_PROPERTIES:
488 return vk_error(VK_ERROR_INVALID_EXTENSION);
489
490 default:
491 return VK_UNSUPPORTED;
492 }
493 }
494
495 VkResult anv_EnumerateLayers(
496 VkPhysicalDevice physicalDevice,
497 size_t maxStringSize,
498 size_t* pLayerCount,
499 char* const* pOutLayers,
500 void* pReserved)
501 {
502 *pLayerCount = 0;
503
504 return VK_SUCCESS;
505 }
506
507 VkResult anv_GetDeviceQueue(
508 VkDevice _device,
509 uint32_t queueNodeIndex,
510 uint32_t queueIndex,
511 VkQueue* pQueue)
512 {
513 struct anv_device *device = (struct anv_device *) _device;
514 struct anv_queue *queue;
515
516 /* FIXME: Should allocate these at device create time. */
517
518 queue = anv_device_alloc(device, sizeof(*queue), 8,
519 VK_SYSTEM_ALLOC_TYPE_API_OBJECT);
520 if (queue == NULL)
521 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
522
523 queue->device = device;
524 queue->pool = &device->surface_state_pool;
525
526 queue->completed_serial = anv_state_pool_alloc(queue->pool, 4, 4);
527 *(uint32_t *)queue->completed_serial.map = 0;
528 queue->next_serial = 1;
529
530 *pQueue = (VkQueue) queue;
531
532 return VK_SUCCESS;
533 }
534
535 VkResult
536 anv_reloc_list_init(struct anv_reloc_list *list, struct anv_device *device)
537 {
538 list->num_relocs = 0;
539 list->array_length = 256;
540 list->relocs =
541 anv_device_alloc(device, list->array_length * sizeof(*list->relocs), 8,
542 VK_SYSTEM_ALLOC_TYPE_INTERNAL);
543
544 if (list->relocs == NULL)
545 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
546
547 list->reloc_bos =
548 anv_device_alloc(device, list->array_length * sizeof(*list->reloc_bos), 8,
549 VK_SYSTEM_ALLOC_TYPE_INTERNAL);
550
551 if (list->relocs == NULL) {
552 anv_device_free(device, list->relocs);
553 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
554 }
555
556 return VK_SUCCESS;
557 }
558
559 void
560 anv_reloc_list_finish(struct anv_reloc_list *list, struct anv_device *device)
561 {
562 anv_device_free(device, list->relocs);
563 anv_device_free(device, list->reloc_bos);
564 }
565
566 static VkResult
567 anv_reloc_list_grow(struct anv_reloc_list *list, struct anv_device *device,
568 size_t num_additional_relocs)
569 {
570 if (list->num_relocs + num_additional_relocs <= list->array_length)
571 return VK_SUCCESS;
572
573 size_t new_length = list->array_length * 2;
574 while (new_length < list->num_relocs + num_additional_relocs)
575 new_length *= 2;
576
577 struct drm_i915_gem_relocation_entry *new_relocs =
578 anv_device_alloc(device, new_length * sizeof(*list->relocs), 8,
579 VK_SYSTEM_ALLOC_TYPE_INTERNAL);
580 if (new_relocs == NULL)
581 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
582
583 struct anv_bo **new_reloc_bos =
584 anv_device_alloc(device, new_length * sizeof(*list->reloc_bos), 8,
585 VK_SYSTEM_ALLOC_TYPE_INTERNAL);
586 if (new_relocs == NULL) {
587 anv_device_free(device, new_relocs);
588 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
589 }
590
591 memcpy(new_relocs, list->relocs, list->num_relocs * sizeof(*list->relocs));
592 memcpy(new_reloc_bos, list->reloc_bos,
593 list->num_relocs * sizeof(*list->reloc_bos));
594
595 anv_device_free(device, list->relocs);
596 anv_device_free(device, list->reloc_bos);
597
598 list->relocs = new_relocs;
599 list->reloc_bos = new_reloc_bos;
600
601 return VK_SUCCESS;
602 }
603
604 static VkResult
605 anv_batch_bo_create(struct anv_device *device, struct anv_batch_bo **bbo_out)
606 {
607 VkResult result;
608
609 struct anv_batch_bo *bbo =
610 anv_device_alloc(device, sizeof(*bbo), 8, VK_SYSTEM_ALLOC_TYPE_INTERNAL);
611 if (bbo == NULL)
612 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
613
614 bbo->num_relocs = 0;
615 bbo->prev_batch_bo = NULL;
616
617 result = anv_bo_pool_alloc(&device->batch_bo_pool, &bbo->bo);
618 if (result != VK_SUCCESS) {
619 anv_device_free(device, bbo);
620 return result;
621 }
622
623 *bbo_out = bbo;
624
625 return VK_SUCCESS;
626 }
627
628 static void
629 anv_batch_bo_start(struct anv_batch_bo *bbo, struct anv_batch *batch,
630 size_t batch_padding)
631 {
632 batch->next = batch->start = bbo->bo.map;
633 batch->end = bbo->bo.map + bbo->bo.size - batch_padding;
634 bbo->first_reloc = batch->relocs.num_relocs;
635 }
636
637 static void
638 anv_batch_bo_finish(struct anv_batch_bo *bbo, struct anv_batch *batch)
639 {
640 assert(batch->start == bbo->bo.map);
641 bbo->length = batch->next - batch->start;
642 bbo->num_relocs = batch->relocs.num_relocs - bbo->first_reloc;
643 }
644
645 static void
646 anv_batch_bo_destroy(struct anv_batch_bo *bbo, struct anv_device *device)
647 {
648 anv_bo_pool_free(&device->batch_bo_pool, &bbo->bo);
649 anv_device_free(device, bbo);
650 }
651
652 void *
653 anv_batch_emit_dwords(struct anv_batch *batch, int num_dwords)
654 {
655 if (batch->next + num_dwords * 4 > batch->end)
656 batch->extend_cb(batch, batch->user_data);
657
658 void *p = batch->next;
659
660 batch->next += num_dwords * 4;
661 assert(batch->next <= batch->end);
662
663 return p;
664 }
665
666 static void
667 anv_reloc_list_append(struct anv_reloc_list *list, struct anv_device *device,
668 struct anv_reloc_list *other, uint32_t offset)
669 {
670 anv_reloc_list_grow(list, device, other->num_relocs);
671 /* TODO: Handle failure */
672
673 memcpy(&list->relocs[list->num_relocs], &other->relocs[0],
674 other->num_relocs * sizeof(other->relocs[0]));
675 memcpy(&list->reloc_bos[list->num_relocs], &other->reloc_bos[0],
676 other->num_relocs * sizeof(other->reloc_bos[0]));
677
678 for (uint32_t i = 0; i < other->num_relocs; i++)
679 list->relocs[i + list->num_relocs].offset += offset;
680
681 list->num_relocs += other->num_relocs;
682 }
683
684 static uint64_t
685 anv_reloc_list_add(struct anv_reloc_list *list, struct anv_device *device,
686 uint32_t offset, struct anv_bo *target_bo, uint32_t delta)
687 {
688 struct drm_i915_gem_relocation_entry *entry;
689 int index;
690
691 anv_reloc_list_grow(list, device, 1);
692 /* TODO: Handle failure */
693
694 /* XXX: Can we use I915_EXEC_HANDLE_LUT? */
695 index = list->num_relocs++;
696 list->reloc_bos[index] = target_bo;
697 entry = &list->relocs[index];
698 entry->target_handle = target_bo->gem_handle;
699 entry->delta = delta;
700 entry->offset = offset;
701 entry->presumed_offset = target_bo->offset;
702 entry->read_domains = 0;
703 entry->write_domain = 0;
704
705 return target_bo->offset + delta;
706 }
707
708 void
709 anv_batch_emit_batch(struct anv_batch *batch, struct anv_batch *other)
710 {
711 uint32_t size, offset;
712
713 size = other->next - other->start;
714 assert(size % 4 == 0);
715
716 if (batch->next + size > batch->end)
717 batch->extend_cb(batch, batch->user_data);
718
719 assert(batch->next + size <= batch->end);
720
721 memcpy(batch->next, other->start, size);
722
723 offset = batch->next - batch->start;
724 anv_reloc_list_append(&batch->relocs, batch->device,
725 &other->relocs, offset);
726
727 batch->next += size;
728 }
729
730 uint64_t
731 anv_batch_emit_reloc(struct anv_batch *batch,
732 void *location, struct anv_bo *bo, uint32_t delta)
733 {
734 return anv_reloc_list_add(&batch->relocs, batch->device,
735 location - batch->start, bo, delta);
736 }
737
738 VkResult anv_QueueSubmit(
739 VkQueue _queue,
740 uint32_t cmdBufferCount,
741 const VkCmdBuffer* pCmdBuffers,
742 VkFence _fence)
743 {
744 struct anv_queue *queue = (struct anv_queue *) _queue;
745 struct anv_device *device = queue->device;
746 struct anv_fence *fence = (struct anv_fence *) _fence;
747 int ret;
748
749 for (uint32_t i = 0; i < cmdBufferCount; i++) {
750 struct anv_cmd_buffer *cmd_buffer =
751 (struct anv_cmd_buffer *) pCmdBuffers[i];
752
753 if (device->dump_aub)
754 anv_cmd_buffer_dump(cmd_buffer);
755
756 if (!device->no_hw) {
757 ret = anv_gem_execbuffer(device, &cmd_buffer->execbuf);
758 if (ret != 0)
759 return vk_error(VK_ERROR_UNKNOWN);
760
761 if (fence) {
762 ret = anv_gem_execbuffer(device, &fence->execbuf);
763 if (ret != 0)
764 return vk_error(VK_ERROR_UNKNOWN);
765 }
766
767 for (uint32_t i = 0; i < cmd_buffer->bo_count; i++)
768 cmd_buffer->exec2_bos[i]->offset = cmd_buffer->exec2_objects[i].offset;
769 } else {
770 *(uint32_t *)queue->completed_serial.map = cmd_buffer->serial;
771 }
772 }
773
774 return VK_SUCCESS;
775 }
776
777 VkResult anv_QueueAddMemReferences(
778 VkQueue queue,
779 uint32_t count,
780 const VkDeviceMemory* pMems)
781 {
782 return VK_SUCCESS;
783 }
784
785 VkResult anv_QueueRemoveMemReferences(
786 VkQueue queue,
787 uint32_t count,
788 const VkDeviceMemory* pMems)
789 {
790 return VK_SUCCESS;
791 }
792
793 VkResult anv_QueueWaitIdle(
794 VkQueue _queue)
795 {
796 struct anv_queue *queue = (struct anv_queue *) _queue;
797
798 return vkDeviceWaitIdle((VkDevice) queue->device);
799 }
800
801 VkResult anv_DeviceWaitIdle(
802 VkDevice _device)
803 {
804 struct anv_device *device = (struct anv_device *) _device;
805 struct anv_state state;
806 struct anv_batch batch;
807 struct drm_i915_gem_execbuffer2 execbuf;
808 struct drm_i915_gem_exec_object2 exec2_objects[1];
809 struct anv_bo *bo = NULL;
810 VkResult result;
811 int64_t timeout;
812 int ret;
813
814 state = anv_state_pool_alloc(&device->dynamic_state_pool, 32, 32);
815 bo = &device->dynamic_state_pool.block_pool->bo;
816 batch.start = batch.next = state.map;
817 batch.end = state.map + 32;
818 anv_batch_emit(&batch, GEN8_MI_BATCH_BUFFER_END);
819 anv_batch_emit(&batch, GEN8_MI_NOOP);
820
821 exec2_objects[0].handle = bo->gem_handle;
822 exec2_objects[0].relocation_count = 0;
823 exec2_objects[0].relocs_ptr = 0;
824 exec2_objects[0].alignment = 0;
825 exec2_objects[0].offset = bo->offset;
826 exec2_objects[0].flags = 0;
827 exec2_objects[0].rsvd1 = 0;
828 exec2_objects[0].rsvd2 = 0;
829
830 execbuf.buffers_ptr = (uintptr_t) exec2_objects;
831 execbuf.buffer_count = 1;
832 execbuf.batch_start_offset = state.offset;
833 execbuf.batch_len = batch.next - state.map;
834 execbuf.cliprects_ptr = 0;
835 execbuf.num_cliprects = 0;
836 execbuf.DR1 = 0;
837 execbuf.DR4 = 0;
838
839 execbuf.flags =
840 I915_EXEC_HANDLE_LUT | I915_EXEC_NO_RELOC | I915_EXEC_RENDER;
841 execbuf.rsvd1 = device->context_id;
842 execbuf.rsvd2 = 0;
843
844 if (!device->no_hw) {
845 ret = anv_gem_execbuffer(device, &execbuf);
846 if (ret != 0) {
847 result = vk_error(VK_ERROR_UNKNOWN);
848 goto fail;
849 }
850
851 timeout = INT64_MAX;
852 ret = anv_gem_wait(device, bo->gem_handle, &timeout);
853 if (ret != 0) {
854 result = vk_error(VK_ERROR_UNKNOWN);
855 goto fail;
856 }
857 }
858
859 anv_state_pool_free(&device->dynamic_state_pool, state);
860
861 return VK_SUCCESS;
862
863 fail:
864 anv_state_pool_free(&device->dynamic_state_pool, state);
865
866 return result;
867 }
868
869 void *
870 anv_device_alloc(struct anv_device * device,
871 size_t size,
872 size_t alignment,
873 VkSystemAllocType allocType)
874 {
875 return device->instance->pfnAlloc(device->instance->pAllocUserData,
876 size,
877 alignment,
878 allocType);
879 }
880
881 void
882 anv_device_free(struct anv_device * device,
883 void * mem)
884 {
885 return device->instance->pfnFree(device->instance->pAllocUserData,
886 mem);
887 }
888
889 VkResult
890 anv_bo_init_new(struct anv_bo *bo, struct anv_device *device, uint64_t size)
891 {
892 bo->gem_handle = anv_gem_create(device, size);
893 if (!bo->gem_handle)
894 return vk_error(VK_ERROR_OUT_OF_DEVICE_MEMORY);
895
896 bo->map = NULL;
897 bo->index = 0;
898 bo->offset = 0;
899 bo->size = size;
900
901 return VK_SUCCESS;
902 }
903
904 VkResult anv_AllocMemory(
905 VkDevice _device,
906 const VkMemoryAllocInfo* pAllocInfo,
907 VkDeviceMemory* pMem)
908 {
909 struct anv_device *device = (struct anv_device *) _device;
910 struct anv_device_memory *mem;
911 VkResult result;
912
913 assert(pAllocInfo->sType == VK_STRUCTURE_TYPE_MEMORY_ALLOC_INFO);
914
915 mem = anv_device_alloc(device, sizeof(*mem), 8,
916 VK_SYSTEM_ALLOC_TYPE_API_OBJECT);
917 if (mem == NULL)
918 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
919
920 result = anv_bo_init_new(&mem->bo, device, pAllocInfo->allocationSize);
921 if (result != VK_SUCCESS)
922 goto fail;
923
924 *pMem = (VkDeviceMemory) mem;
925
926 return VK_SUCCESS;
927
928 fail:
929 anv_device_free(device, mem);
930
931 return result;
932 }
933
934 VkResult anv_FreeMemory(
935 VkDevice _device,
936 VkDeviceMemory _mem)
937 {
938 struct anv_device *device = (struct anv_device *) _device;
939 struct anv_device_memory *mem = (struct anv_device_memory *) _mem;
940
941 if (mem->bo.map)
942 anv_gem_munmap(mem->bo.map, mem->bo.size);
943
944 if (mem->bo.gem_handle != 0)
945 anv_gem_close(device, mem->bo.gem_handle);
946
947 anv_device_free(device, mem);
948
949 return VK_SUCCESS;
950 }
951
952 VkResult anv_SetMemoryPriority(
953 VkDevice device,
954 VkDeviceMemory mem,
955 VkMemoryPriority priority)
956 {
957 return VK_SUCCESS;
958 }
959
960 VkResult anv_MapMemory(
961 VkDevice _device,
962 VkDeviceMemory _mem,
963 VkDeviceSize offset,
964 VkDeviceSize size,
965 VkMemoryMapFlags flags,
966 void** ppData)
967 {
968 struct anv_device *device = (struct anv_device *) _device;
969 struct anv_device_memory *mem = (struct anv_device_memory *) _mem;
970
971 /* FIXME: Is this supposed to be thread safe? Since vkUnmapMemory() only
972 * takes a VkDeviceMemory pointer, it seems like only one map of the memory
973 * at a time is valid. We could just mmap up front and return an offset
974 * pointer here, but that may exhaust virtual memory on 32 bit
975 * userspace. */
976
977 mem->map = anv_gem_mmap(device, mem->bo.gem_handle, offset, size);
978 mem->map_size = size;
979
980 *ppData = mem->map;
981
982 return VK_SUCCESS;
983 }
984
985 VkResult anv_UnmapMemory(
986 VkDevice _device,
987 VkDeviceMemory _mem)
988 {
989 struct anv_device_memory *mem = (struct anv_device_memory *) _mem;
990
991 anv_gem_munmap(mem->map, mem->map_size);
992
993 return VK_SUCCESS;
994 }
995
996 VkResult anv_FlushMappedMemory(
997 VkDevice device,
998 VkDeviceMemory mem,
999 VkDeviceSize offset,
1000 VkDeviceSize size)
1001 {
1002 /* clflush here for !llc platforms */
1003
1004 return VK_SUCCESS;
1005 }
1006
1007 VkResult anv_PinSystemMemory(
1008 VkDevice device,
1009 const void* pSysMem,
1010 size_t memSize,
1011 VkDeviceMemory* pMem)
1012 {
1013 return VK_SUCCESS;
1014 }
1015
1016 VkResult anv_GetMultiDeviceCompatibility(
1017 VkPhysicalDevice physicalDevice0,
1018 VkPhysicalDevice physicalDevice1,
1019 VkPhysicalDeviceCompatibilityInfo* pInfo)
1020 {
1021 return VK_UNSUPPORTED;
1022 }
1023
1024 VkResult anv_OpenSharedMemory(
1025 VkDevice device,
1026 const VkMemoryOpenInfo* pOpenInfo,
1027 VkDeviceMemory* pMem)
1028 {
1029 return VK_UNSUPPORTED;
1030 }
1031
1032 VkResult anv_OpenSharedSemaphore(
1033 VkDevice device,
1034 const VkSemaphoreOpenInfo* pOpenInfo,
1035 VkSemaphore* pSemaphore)
1036 {
1037 return VK_UNSUPPORTED;
1038 }
1039
1040 VkResult anv_OpenPeerMemory(
1041 VkDevice device,
1042 const VkPeerMemoryOpenInfo* pOpenInfo,
1043 VkDeviceMemory* pMem)
1044 {
1045 return VK_UNSUPPORTED;
1046 }
1047
1048 VkResult anv_OpenPeerImage(
1049 VkDevice device,
1050 const VkPeerImageOpenInfo* pOpenInfo,
1051 VkImage* pImage,
1052 VkDeviceMemory* pMem)
1053 {
1054 return VK_UNSUPPORTED;
1055 }
1056
1057 VkResult anv_DestroyObject(
1058 VkDevice _device,
1059 VkObjectType objType,
1060 VkObject _object)
1061 {
1062 struct anv_device *device = (struct anv_device *) _device;
1063 struct anv_object *object = (struct anv_object *) _object;
1064
1065 switch (objType) {
1066 case VK_OBJECT_TYPE_INSTANCE:
1067 return anv_DestroyInstance((VkInstance) _object);
1068
1069 case VK_OBJECT_TYPE_PHYSICAL_DEVICE:
1070 /* We don't want to actually destroy physical devices */
1071 return VK_SUCCESS;
1072
1073 case VK_OBJECT_TYPE_DEVICE:
1074 assert(_device == (VkDevice) _object);
1075 return anv_DestroyDevice((VkDevice) _object);
1076
1077 case VK_OBJECT_TYPE_QUEUE:
1078 /* TODO */
1079 return VK_SUCCESS;
1080
1081 case VK_OBJECT_TYPE_DEVICE_MEMORY:
1082 return anv_FreeMemory(_device, (VkDeviceMemory) _object);
1083
1084 case VK_OBJECT_TYPE_DESCRIPTOR_POOL:
1085 /* These are just dummys anyway, so we don't need to destroy them */
1086 return VK_SUCCESS;
1087
1088 case VK_OBJECT_TYPE_BUFFER:
1089 case VK_OBJECT_TYPE_BUFFER_VIEW:
1090 case VK_OBJECT_TYPE_IMAGE:
1091 case VK_OBJECT_TYPE_IMAGE_VIEW:
1092 case VK_OBJECT_TYPE_COLOR_ATTACHMENT_VIEW:
1093 case VK_OBJECT_TYPE_DEPTH_STENCIL_VIEW:
1094 case VK_OBJECT_TYPE_SHADER:
1095 case VK_OBJECT_TYPE_PIPELINE_LAYOUT:
1096 case VK_OBJECT_TYPE_SAMPLER:
1097 case VK_OBJECT_TYPE_DESCRIPTOR_SET:
1098 case VK_OBJECT_TYPE_DESCRIPTOR_SET_LAYOUT:
1099 case VK_OBJECT_TYPE_DYNAMIC_RS_STATE:
1100 case VK_OBJECT_TYPE_DYNAMIC_CB_STATE:
1101 case VK_OBJECT_TYPE_DYNAMIC_DS_STATE:
1102 case VK_OBJECT_TYPE_RENDER_PASS:
1103 /* These are trivially destroyable */
1104 anv_device_free(device, (void *) _object);
1105 return VK_SUCCESS;
1106
1107 case VK_OBJECT_TYPE_COMMAND_BUFFER:
1108 case VK_OBJECT_TYPE_PIPELINE:
1109 case VK_OBJECT_TYPE_DYNAMIC_VP_STATE:
1110 case VK_OBJECT_TYPE_FENCE:
1111 case VK_OBJECT_TYPE_QUERY_POOL:
1112 case VK_OBJECT_TYPE_FRAMEBUFFER:
1113 (object->destructor)(device, object, objType);
1114 return VK_SUCCESS;
1115
1116 case VK_OBJECT_TYPE_SEMAPHORE:
1117 case VK_OBJECT_TYPE_EVENT:
1118 stub_return(VK_UNSUPPORTED);
1119
1120 default:
1121 unreachable("Invalid object type");
1122 }
1123 }
1124
1125 static void
1126 fill_memory_requirements(
1127 VkObjectType objType,
1128 VkObject object,
1129 VkMemoryRequirements * memory_requirements)
1130 {
1131 struct anv_buffer *buffer;
1132 struct anv_image *image;
1133
1134 memory_requirements->memPropsAllowed =
1135 VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT |
1136 VK_MEMORY_PROPERTY_HOST_DEVICE_COHERENT_BIT |
1137 /* VK_MEMORY_PROPERTY_HOST_UNCACHED_BIT | */
1138 VK_MEMORY_PROPERTY_HOST_WRITE_COMBINED_BIT |
1139 VK_MEMORY_PROPERTY_PREFER_HOST_LOCAL |
1140 VK_MEMORY_PROPERTY_SHAREABLE_BIT;
1141
1142 memory_requirements->memPropsRequired = 0;
1143
1144 switch (objType) {
1145 case VK_OBJECT_TYPE_BUFFER:
1146 buffer = (struct anv_buffer *) object;
1147 memory_requirements->size = buffer->size;
1148 memory_requirements->alignment = 16;
1149 break;
1150 case VK_OBJECT_TYPE_IMAGE:
1151 image = (struct anv_image *) object;
1152 memory_requirements->size = image->size;
1153 memory_requirements->alignment = image->alignment;
1154 break;
1155 default:
1156 memory_requirements->size = 0;
1157 break;
1158 }
1159 }
1160
1161 static uint32_t
1162 get_allocation_count(VkObjectType objType)
1163 {
1164 switch (objType) {
1165 case VK_OBJECT_TYPE_BUFFER:
1166 case VK_OBJECT_TYPE_IMAGE:
1167 return 1;
1168 default:
1169 return 0;
1170 }
1171 }
1172
1173 VkResult anv_GetObjectInfo(
1174 VkDevice _device,
1175 VkObjectType objType,
1176 VkObject object,
1177 VkObjectInfoType infoType,
1178 size_t* pDataSize,
1179 void* pData)
1180 {
1181 VkMemoryRequirements memory_requirements;
1182 uint32_t *count;
1183
1184 switch (infoType) {
1185 case VK_OBJECT_INFO_TYPE_MEMORY_REQUIREMENTS:
1186 *pDataSize = sizeof(memory_requirements);
1187 if (pData == NULL)
1188 return VK_SUCCESS;
1189
1190 fill_memory_requirements(objType, object, pData);
1191 return VK_SUCCESS;
1192
1193 case VK_OBJECT_INFO_TYPE_MEMORY_ALLOCATION_COUNT:
1194 *pDataSize = sizeof(count);
1195 if (pData == NULL)
1196 return VK_SUCCESS;
1197
1198 count = pData;
1199 *count = get_allocation_count(objType);
1200 return VK_SUCCESS;
1201
1202 default:
1203 return VK_UNSUPPORTED;
1204 }
1205
1206 }
1207
1208 VkResult anv_QueueBindObjectMemory(
1209 VkQueue queue,
1210 VkObjectType objType,
1211 VkObject object,
1212 uint32_t allocationIdx,
1213 VkDeviceMemory _mem,
1214 VkDeviceSize memOffset)
1215 {
1216 struct anv_buffer *buffer;
1217 struct anv_image *image;
1218 struct anv_device_memory *mem = (struct anv_device_memory *) _mem;
1219
1220 switch (objType) {
1221 case VK_OBJECT_TYPE_BUFFER:
1222 buffer = (struct anv_buffer *) object;
1223 buffer->bo = &mem->bo;
1224 buffer->offset = memOffset;
1225 break;
1226 case VK_OBJECT_TYPE_IMAGE:
1227 image = (struct anv_image *) object;
1228 image->bo = &mem->bo;
1229 image->offset = memOffset;
1230 break;
1231 default:
1232 break;
1233 }
1234
1235 return VK_SUCCESS;
1236 }
1237
1238 VkResult anv_QueueBindObjectMemoryRange(
1239 VkQueue queue,
1240 VkObjectType objType,
1241 VkObject object,
1242 uint32_t allocationIdx,
1243 VkDeviceSize rangeOffset,
1244 VkDeviceSize rangeSize,
1245 VkDeviceMemory mem,
1246 VkDeviceSize memOffset)
1247 {
1248 stub_return(VK_UNSUPPORTED);
1249 }
1250
1251 VkResult anv_QueueBindImageMemoryRange(
1252 VkQueue queue,
1253 VkImage image,
1254 uint32_t allocationIdx,
1255 const VkImageMemoryBindInfo* pBindInfo,
1256 VkDeviceMemory mem,
1257 VkDeviceSize memOffset)
1258 {
1259 stub_return(VK_UNSUPPORTED);
1260 }
1261
1262 static void
1263 anv_fence_destroy(struct anv_device *device,
1264 struct anv_object *object,
1265 VkObjectType obj_type)
1266 {
1267 struct anv_fence *fence = (struct anv_fence *) object;
1268
1269 assert(obj_type == VK_OBJECT_TYPE_FENCE);
1270
1271 anv_gem_munmap(fence->bo.map, fence->bo.size);
1272 anv_gem_close(device, fence->bo.gem_handle);
1273 anv_device_free(device, fence);
1274 }
1275
1276 VkResult anv_CreateFence(
1277 VkDevice _device,
1278 const VkFenceCreateInfo* pCreateInfo,
1279 VkFence* pFence)
1280 {
1281 struct anv_device *device = (struct anv_device *) _device;
1282 struct anv_fence *fence;
1283 struct anv_batch batch;
1284 VkResult result;
1285
1286 const uint32_t fence_size = 128;
1287
1288 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_FENCE_CREATE_INFO);
1289
1290 fence = anv_device_alloc(device, sizeof(*fence), 8,
1291 VK_SYSTEM_ALLOC_TYPE_API_OBJECT);
1292 if (fence == NULL)
1293 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
1294
1295 result = anv_bo_init_new(&fence->bo, device, fence_size);
1296 if (result != VK_SUCCESS)
1297 goto fail;
1298
1299 fence->base.destructor = anv_fence_destroy;
1300
1301 fence->bo.map =
1302 anv_gem_mmap(device, fence->bo.gem_handle, 0, fence->bo.size);
1303 batch.next = batch.start = fence->bo.map;
1304 batch.end = fence->bo.map + fence->bo.size;
1305 anv_batch_emit(&batch, GEN8_MI_BATCH_BUFFER_END);
1306 anv_batch_emit(&batch, GEN8_MI_NOOP);
1307
1308 fence->exec2_objects[0].handle = fence->bo.gem_handle;
1309 fence->exec2_objects[0].relocation_count = 0;
1310 fence->exec2_objects[0].relocs_ptr = 0;
1311 fence->exec2_objects[0].alignment = 0;
1312 fence->exec2_objects[0].offset = fence->bo.offset;
1313 fence->exec2_objects[0].flags = 0;
1314 fence->exec2_objects[0].rsvd1 = 0;
1315 fence->exec2_objects[0].rsvd2 = 0;
1316
1317 fence->execbuf.buffers_ptr = (uintptr_t) fence->exec2_objects;
1318 fence->execbuf.buffer_count = 1;
1319 fence->execbuf.batch_start_offset = 0;
1320 fence->execbuf.batch_len = batch.next - fence->bo.map;
1321 fence->execbuf.cliprects_ptr = 0;
1322 fence->execbuf.num_cliprects = 0;
1323 fence->execbuf.DR1 = 0;
1324 fence->execbuf.DR4 = 0;
1325
1326 fence->execbuf.flags =
1327 I915_EXEC_HANDLE_LUT | I915_EXEC_NO_RELOC | I915_EXEC_RENDER;
1328 fence->execbuf.rsvd1 = device->context_id;
1329 fence->execbuf.rsvd2 = 0;
1330
1331 *pFence = (VkQueryPool) fence;
1332
1333 return VK_SUCCESS;
1334
1335 fail:
1336 anv_device_free(device, fence);
1337
1338 return result;
1339 }
1340
1341 VkResult anv_ResetFences(
1342 VkDevice _device,
1343 uint32_t fenceCount,
1344 VkFence* pFences)
1345 {
1346 struct anv_fence **fences = (struct anv_fence **) pFences;
1347
1348 for (uint32_t i; i < fenceCount; i++)
1349 fences[i]->ready = false;
1350
1351 return VK_SUCCESS;
1352 }
1353
1354 VkResult anv_GetFenceStatus(
1355 VkDevice _device,
1356 VkFence _fence)
1357 {
1358 struct anv_device *device = (struct anv_device *) _device;
1359 struct anv_fence *fence = (struct anv_fence *) _fence;
1360 int64_t t = 0;
1361 int ret;
1362
1363 if (fence->ready)
1364 return VK_SUCCESS;
1365
1366 ret = anv_gem_wait(device, fence->bo.gem_handle, &t);
1367 if (ret == 0) {
1368 fence->ready = true;
1369 return VK_SUCCESS;
1370 }
1371
1372 return VK_NOT_READY;
1373 }
1374
1375 VkResult anv_WaitForFences(
1376 VkDevice _device,
1377 uint32_t fenceCount,
1378 const VkFence* pFences,
1379 bool32_t waitAll,
1380 uint64_t timeout)
1381 {
1382 struct anv_device *device = (struct anv_device *) _device;
1383 struct anv_fence **fences = (struct anv_fence **) pFences;
1384 int64_t t = timeout;
1385 int ret;
1386
1387 /* FIXME: handle !waitAll */
1388
1389 for (uint32_t i = 0; i < fenceCount; i++) {
1390 ret = anv_gem_wait(device, fences[i]->bo.gem_handle, &t);
1391 if (ret == -1 && errno == ETIME)
1392 return VK_TIMEOUT;
1393 else if (ret == -1)
1394 return vk_error(VK_ERROR_UNKNOWN);
1395 }
1396
1397 return VK_SUCCESS;
1398 }
1399
1400 // Queue semaphore functions
1401
1402 VkResult anv_CreateSemaphore(
1403 VkDevice device,
1404 const VkSemaphoreCreateInfo* pCreateInfo,
1405 VkSemaphore* pSemaphore)
1406 {
1407 stub_return(VK_UNSUPPORTED);
1408 }
1409
1410 VkResult anv_QueueSignalSemaphore(
1411 VkQueue queue,
1412 VkSemaphore semaphore)
1413 {
1414 stub_return(VK_UNSUPPORTED);
1415 }
1416
1417 VkResult anv_QueueWaitSemaphore(
1418 VkQueue queue,
1419 VkSemaphore semaphore)
1420 {
1421 stub_return(VK_UNSUPPORTED);
1422 }
1423
1424 // Event functions
1425
1426 VkResult anv_CreateEvent(
1427 VkDevice device,
1428 const VkEventCreateInfo* pCreateInfo,
1429 VkEvent* pEvent)
1430 {
1431 stub_return(VK_UNSUPPORTED);
1432 }
1433
1434 VkResult anv_GetEventStatus(
1435 VkDevice device,
1436 VkEvent event)
1437 {
1438 stub_return(VK_UNSUPPORTED);
1439 }
1440
1441 VkResult anv_SetEvent(
1442 VkDevice device,
1443 VkEvent event)
1444 {
1445 stub_return(VK_UNSUPPORTED);
1446 }
1447
1448 VkResult anv_ResetEvent(
1449 VkDevice device,
1450 VkEvent event)
1451 {
1452 stub_return(VK_UNSUPPORTED);
1453 }
1454
1455 // Buffer functions
1456
1457 VkResult anv_CreateBuffer(
1458 VkDevice _device,
1459 const VkBufferCreateInfo* pCreateInfo,
1460 VkBuffer* pBuffer)
1461 {
1462 struct anv_device *device = (struct anv_device *) _device;
1463 struct anv_buffer *buffer;
1464
1465 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO);
1466
1467 buffer = anv_device_alloc(device, sizeof(*buffer), 8,
1468 VK_SYSTEM_ALLOC_TYPE_API_OBJECT);
1469 if (buffer == NULL)
1470 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
1471
1472 buffer->size = pCreateInfo->size;
1473 buffer->bo = NULL;
1474 buffer->offset = 0;
1475
1476 *pBuffer = (VkBuffer) buffer;
1477
1478 return VK_SUCCESS;
1479 }
1480
1481 // Buffer view functions
1482
1483 static void
1484 fill_buffer_surface_state(void *state, VkFormat format,
1485 uint32_t offset, uint32_t range)
1486 {
1487 const struct anv_format *info;
1488
1489 info = anv_format_for_vk_format(format);
1490 /* This assumes RGBA float format. */
1491 uint32_t stride = 4;
1492 uint32_t num_elements = range / stride;
1493
1494 struct GEN8_RENDER_SURFACE_STATE surface_state = {
1495 .SurfaceType = SURFTYPE_BUFFER,
1496 .SurfaceArray = false,
1497 .SurfaceFormat = info->format,
1498 .SurfaceVerticalAlignment = VALIGN4,
1499 .SurfaceHorizontalAlignment = HALIGN4,
1500 .TileMode = LINEAR,
1501 .VerticalLineStride = 0,
1502 .VerticalLineStrideOffset = 0,
1503 .SamplerL2BypassModeDisable = true,
1504 .RenderCacheReadWriteMode = WriteOnlyCache,
1505 .MemoryObjectControlState = GEN8_MOCS,
1506 .BaseMipLevel = 0,
1507 .SurfaceQPitch = 0,
1508 .Height = (num_elements >> 7) & 0x3fff,
1509 .Width = num_elements & 0x7f,
1510 .Depth = (num_elements >> 21) & 0x3f,
1511 .SurfacePitch = stride - 1,
1512 .MinimumArrayElement = 0,
1513 .NumberofMultisamples = MULTISAMPLECOUNT_1,
1514 .XOffset = 0,
1515 .YOffset = 0,
1516 .SurfaceMinLOD = 0,
1517 .MIPCountLOD = 0,
1518 .AuxiliarySurfaceMode = AUX_NONE,
1519 .RedClearColor = 0,
1520 .GreenClearColor = 0,
1521 .BlueClearColor = 0,
1522 .AlphaClearColor = 0,
1523 .ShaderChannelSelectRed = SCS_RED,
1524 .ShaderChannelSelectGreen = SCS_GREEN,
1525 .ShaderChannelSelectBlue = SCS_BLUE,
1526 .ShaderChannelSelectAlpha = SCS_ALPHA,
1527 .ResourceMinLOD = 0,
1528 /* FIXME: We assume that the image must be bound at this time. */
1529 .SurfaceBaseAddress = { NULL, offset },
1530 };
1531
1532 GEN8_RENDER_SURFACE_STATE_pack(NULL, state, &surface_state);
1533 }
1534
1535 VkResult anv_CreateBufferView(
1536 VkDevice _device,
1537 const VkBufferViewCreateInfo* pCreateInfo,
1538 VkBufferView* pView)
1539 {
1540 struct anv_device *device = (struct anv_device *) _device;
1541 struct anv_buffer *buffer = (struct anv_buffer *) pCreateInfo->buffer;
1542 struct anv_surface_view *view;
1543
1544 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_BUFFER_VIEW_CREATE_INFO);
1545
1546 view = anv_device_alloc(device, sizeof(*view), 8,
1547 VK_SYSTEM_ALLOC_TYPE_API_OBJECT);
1548 if (view == NULL)
1549 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
1550
1551 view->bo = buffer->bo;
1552 view->offset = buffer->offset + pCreateInfo->offset;
1553 view->surface_state =
1554 anv_state_pool_alloc(&device->surface_state_pool, 64, 64);
1555 view->format = pCreateInfo->format;
1556 view->range = pCreateInfo->range;
1557
1558 fill_buffer_surface_state(view->surface_state.map,
1559 pCreateInfo->format, view->offset, pCreateInfo->range);
1560
1561 *pView = (VkImageView) view;
1562
1563 return VK_SUCCESS;
1564 }
1565
1566 // Sampler functions
1567
1568 VkResult anv_CreateSampler(
1569 VkDevice _device,
1570 const VkSamplerCreateInfo* pCreateInfo,
1571 VkSampler* pSampler)
1572 {
1573 struct anv_device *device = (struct anv_device *) _device;
1574 struct anv_sampler *sampler;
1575 uint32_t mag_filter, min_filter, max_anisotropy;
1576
1577 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO);
1578
1579 sampler = anv_device_alloc(device, sizeof(*sampler), 8,
1580 VK_SYSTEM_ALLOC_TYPE_API_OBJECT);
1581 if (!sampler)
1582 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
1583
1584 static const uint32_t vk_to_gen_tex_filter[] = {
1585 [VK_TEX_FILTER_NEAREST] = MAPFILTER_NEAREST,
1586 [VK_TEX_FILTER_LINEAR] = MAPFILTER_LINEAR
1587 };
1588
1589 static const uint32_t vk_to_gen_mipmap_mode[] = {
1590 [VK_TEX_MIPMAP_MODE_BASE] = MIPFILTER_NONE,
1591 [VK_TEX_MIPMAP_MODE_NEAREST] = MIPFILTER_NEAREST,
1592 [VK_TEX_MIPMAP_MODE_LINEAR] = MIPFILTER_LINEAR
1593 };
1594
1595 static const uint32_t vk_to_gen_tex_address[] = {
1596 [VK_TEX_ADDRESS_WRAP] = TCM_WRAP,
1597 [VK_TEX_ADDRESS_MIRROR] = TCM_MIRROR,
1598 [VK_TEX_ADDRESS_CLAMP] = TCM_CLAMP,
1599 [VK_TEX_ADDRESS_MIRROR_ONCE] = TCM_MIRROR_ONCE,
1600 [VK_TEX_ADDRESS_CLAMP_BORDER] = TCM_CLAMP_BORDER,
1601 };
1602
1603 static const uint32_t vk_to_gen_compare_op[] = {
1604 [VK_COMPARE_OP_NEVER] = PREFILTEROPNEVER,
1605 [VK_COMPARE_OP_LESS] = PREFILTEROPLESS,
1606 [VK_COMPARE_OP_EQUAL] = PREFILTEROPEQUAL,
1607 [VK_COMPARE_OP_LESS_EQUAL] = PREFILTEROPLEQUAL,
1608 [VK_COMPARE_OP_GREATER] = PREFILTEROPGREATER,
1609 [VK_COMPARE_OP_NOT_EQUAL] = PREFILTEROPNOTEQUAL,
1610 [VK_COMPARE_OP_GREATER_EQUAL] = PREFILTEROPGEQUAL,
1611 [VK_COMPARE_OP_ALWAYS] = PREFILTEROPALWAYS,
1612 };
1613
1614 if (pCreateInfo->maxAnisotropy > 1) {
1615 mag_filter = MAPFILTER_ANISOTROPIC;
1616 min_filter = MAPFILTER_ANISOTROPIC;
1617 max_anisotropy = (pCreateInfo->maxAnisotropy - 2) / 2;
1618 } else {
1619 mag_filter = vk_to_gen_tex_filter[pCreateInfo->magFilter];
1620 min_filter = vk_to_gen_tex_filter[pCreateInfo->minFilter];
1621 max_anisotropy = RATIO21;
1622 }
1623
1624 struct GEN8_SAMPLER_STATE sampler_state = {
1625 .SamplerDisable = false,
1626 .TextureBorderColorMode = DX10OGL,
1627 .LODPreClampMode = 0,
1628 .BaseMipLevel = 0,
1629 .MipModeFilter = vk_to_gen_mipmap_mode[pCreateInfo->mipMode],
1630 .MagModeFilter = mag_filter,
1631 .MinModeFilter = min_filter,
1632 .TextureLODBias = pCreateInfo->mipLodBias * 256,
1633 .AnisotropicAlgorithm = EWAApproximation,
1634 .MinLOD = pCreateInfo->minLod * 256,
1635 .MaxLOD = pCreateInfo->maxLod * 256,
1636 .ChromaKeyEnable = 0,
1637 .ChromaKeyIndex = 0,
1638 .ChromaKeyMode = 0,
1639 .ShadowFunction = vk_to_gen_compare_op[pCreateInfo->compareOp],
1640 .CubeSurfaceControlMode = 0,
1641
1642 .IndirectStatePointer =
1643 device->float_border_colors.offset +
1644 pCreateInfo->borderColor * sizeof(float) * 4,
1645
1646 .LODClampMagnificationMode = MIPNONE,
1647 .MaximumAnisotropy = max_anisotropy,
1648 .RAddressMinFilterRoundingEnable = 0,
1649 .RAddressMagFilterRoundingEnable = 0,
1650 .VAddressMinFilterRoundingEnable = 0,
1651 .VAddressMagFilterRoundingEnable = 0,
1652 .UAddressMinFilterRoundingEnable = 0,
1653 .UAddressMagFilterRoundingEnable = 0,
1654 .TrilinearFilterQuality = 0,
1655 .NonnormalizedCoordinateEnable = 0,
1656 .TCXAddressControlMode = vk_to_gen_tex_address[pCreateInfo->addressU],
1657 .TCYAddressControlMode = vk_to_gen_tex_address[pCreateInfo->addressV],
1658 .TCZAddressControlMode = vk_to_gen_tex_address[pCreateInfo->addressW],
1659 };
1660
1661 GEN8_SAMPLER_STATE_pack(NULL, sampler->state, &sampler_state);
1662
1663 *pSampler = (VkSampler) sampler;
1664
1665 return VK_SUCCESS;
1666 }
1667
1668 // Descriptor set functions
1669
1670 VkResult anv_CreateDescriptorSetLayout(
1671 VkDevice _device,
1672 const VkDescriptorSetLayoutCreateInfo* pCreateInfo,
1673 VkDescriptorSetLayout* pSetLayout)
1674 {
1675 struct anv_device *device = (struct anv_device *) _device;
1676 struct anv_descriptor_set_layout *set_layout;
1677
1678 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO);
1679
1680 uint32_t sampler_count[VK_NUM_SHADER_STAGE] = { 0, };
1681 uint32_t surface_count[VK_NUM_SHADER_STAGE] = { 0, };
1682 uint32_t num_dynamic_buffers = 0;
1683 uint32_t count = 0;
1684 uint32_t stages = 0;
1685 uint32_t s;
1686
1687 for (uint32_t i = 0; i < pCreateInfo->count; i++) {
1688 switch (pCreateInfo->pBinding[i].descriptorType) {
1689 case VK_DESCRIPTOR_TYPE_SAMPLER:
1690 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
1691 for_each_bit(s, pCreateInfo->pBinding[i].stageFlags)
1692 sampler_count[s] += pCreateInfo->pBinding[i].count;
1693 break;
1694 default:
1695 break;
1696 }
1697
1698 switch (pCreateInfo->pBinding[i].descriptorType) {
1699 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
1700 case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
1701 case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
1702 case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
1703 case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
1704 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
1705 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
1706 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
1707 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
1708 for_each_bit(s, pCreateInfo->pBinding[i].stageFlags)
1709 surface_count[s] += pCreateInfo->pBinding[i].count;
1710 break;
1711 default:
1712 break;
1713 }
1714
1715 switch (pCreateInfo->pBinding[i].descriptorType) {
1716 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
1717 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
1718 num_dynamic_buffers += pCreateInfo->pBinding[i].count;
1719 break;
1720 default:
1721 break;
1722 }
1723
1724 stages |= pCreateInfo->pBinding[i].stageFlags;
1725 count += pCreateInfo->pBinding[i].count;
1726 }
1727
1728 uint32_t sampler_total = 0;
1729 uint32_t surface_total = 0;
1730 for (uint32_t s = 0; s < VK_NUM_SHADER_STAGE; s++) {
1731 sampler_total += sampler_count[s];
1732 surface_total += surface_count[s];
1733 }
1734
1735 size_t size = sizeof(*set_layout) +
1736 (sampler_total + surface_total) * sizeof(set_layout->entries[0]);
1737 set_layout = anv_device_alloc(device, size, 8,
1738 VK_SYSTEM_ALLOC_TYPE_API_OBJECT);
1739 if (!set_layout)
1740 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
1741
1742 set_layout->num_dynamic_buffers = num_dynamic_buffers;
1743 set_layout->count = count;
1744 set_layout->shader_stages = stages;
1745
1746 struct anv_descriptor_slot *p = set_layout->entries;
1747 struct anv_descriptor_slot *sampler[VK_NUM_SHADER_STAGE];
1748 struct anv_descriptor_slot *surface[VK_NUM_SHADER_STAGE];
1749 for (uint32_t s = 0; s < VK_NUM_SHADER_STAGE; s++) {
1750 set_layout->stage[s].surface_count = surface_count[s];
1751 set_layout->stage[s].surface_start = surface[s] = p;
1752 p += surface_count[s];
1753 set_layout->stage[s].sampler_count = sampler_count[s];
1754 set_layout->stage[s].sampler_start = sampler[s] = p;
1755 p += sampler_count[s];
1756 }
1757
1758 uint32_t descriptor = 0;
1759 int8_t dynamic_slot = 0;
1760 bool is_dynamic;
1761 for (uint32_t i = 0; i < pCreateInfo->count; i++) {
1762 switch (pCreateInfo->pBinding[i].descriptorType) {
1763 case VK_DESCRIPTOR_TYPE_SAMPLER:
1764 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
1765 for_each_bit(s, pCreateInfo->pBinding[i].stageFlags)
1766 for (uint32_t j = 0; j < pCreateInfo->pBinding[i].count; j++) {
1767 sampler[s]->index = descriptor + j;
1768 sampler[s]->dynamic_slot = -1;
1769 sampler[s]++;
1770 }
1771 break;
1772 default:
1773 break;
1774 }
1775
1776 switch (pCreateInfo->pBinding[i].descriptorType) {
1777 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
1778 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
1779 is_dynamic = true;
1780 break;
1781 default:
1782 is_dynamic = false;
1783 break;
1784 }
1785
1786 switch (pCreateInfo->pBinding[i].descriptorType) {
1787 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
1788 case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
1789 case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
1790 case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
1791 case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
1792 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
1793 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
1794 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
1795 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
1796 for_each_bit(s, pCreateInfo->pBinding[i].stageFlags)
1797 for (uint32_t j = 0; j < pCreateInfo->pBinding[i].count; j++) {
1798 surface[s]->index = descriptor + j;
1799 if (is_dynamic)
1800 surface[s]->dynamic_slot = dynamic_slot + j;
1801 else
1802 surface[s]->dynamic_slot = -1;
1803 surface[s]++;
1804 }
1805 break;
1806 default:
1807 break;
1808 }
1809
1810 if (is_dynamic)
1811 dynamic_slot += pCreateInfo->pBinding[i].count;
1812
1813 descriptor += pCreateInfo->pBinding[i].count;
1814 }
1815
1816 *pSetLayout = (VkDescriptorSetLayout) set_layout;
1817
1818 return VK_SUCCESS;
1819 }
1820
1821 VkResult anv_BeginDescriptorPoolUpdate(
1822 VkDevice device,
1823 VkDescriptorUpdateMode updateMode)
1824 {
1825 return VK_SUCCESS;
1826 }
1827
1828 VkResult anv_EndDescriptorPoolUpdate(
1829 VkDevice device,
1830 VkCmdBuffer cmd)
1831 {
1832 return VK_SUCCESS;
1833 }
1834
1835 VkResult anv_CreateDescriptorPool(
1836 VkDevice device,
1837 VkDescriptorPoolUsage poolUsage,
1838 uint32_t maxSets,
1839 const VkDescriptorPoolCreateInfo* pCreateInfo,
1840 VkDescriptorPool* pDescriptorPool)
1841 {
1842 *pDescriptorPool = 1;
1843
1844 return VK_SUCCESS;
1845 }
1846
1847 VkResult anv_ResetDescriptorPool(
1848 VkDevice device,
1849 VkDescriptorPool descriptorPool)
1850 {
1851 return VK_SUCCESS;
1852 }
1853
1854 VkResult anv_AllocDescriptorSets(
1855 VkDevice _device,
1856 VkDescriptorPool descriptorPool,
1857 VkDescriptorSetUsage setUsage,
1858 uint32_t count,
1859 const VkDescriptorSetLayout* pSetLayouts,
1860 VkDescriptorSet* pDescriptorSets,
1861 uint32_t* pCount)
1862 {
1863 struct anv_device *device = (struct anv_device *) _device;
1864 const struct anv_descriptor_set_layout *layout;
1865 struct anv_descriptor_set *set;
1866 size_t size;
1867
1868 for (uint32_t i = 0; i < count; i++) {
1869 layout = (struct anv_descriptor_set_layout *) pSetLayouts[i];
1870 size = sizeof(*set) + layout->count * sizeof(set->descriptors[0]);
1871 set = anv_device_alloc(device, size, 8,
1872 VK_SYSTEM_ALLOC_TYPE_API_OBJECT);
1873 if (!set) {
1874 *pCount = i;
1875 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
1876 }
1877
1878 /* Descriptor sets may not be 100% filled out so we need to memset to
1879 * ensure that we can properly detect and handle holes.
1880 */
1881 memset(set, 0, size);
1882
1883 pDescriptorSets[i] = (VkDescriptorSet) set;
1884 }
1885
1886 *pCount = count;
1887
1888 return VK_SUCCESS;
1889 }
1890
1891 void anv_ClearDescriptorSets(
1892 VkDevice device,
1893 VkDescriptorPool descriptorPool,
1894 uint32_t count,
1895 const VkDescriptorSet* pDescriptorSets)
1896 {
1897 }
1898
1899 void anv_UpdateDescriptors(
1900 VkDevice _device,
1901 VkDescriptorSet descriptorSet,
1902 uint32_t updateCount,
1903 const void** ppUpdateArray)
1904 {
1905 struct anv_descriptor_set *set = (struct anv_descriptor_set *) descriptorSet;
1906 VkUpdateSamplers *update_samplers;
1907 VkUpdateSamplerTextures *update_sampler_textures;
1908 VkUpdateImages *update_images;
1909 VkUpdateBuffers *update_buffers;
1910 VkUpdateAsCopy *update_as_copy;
1911
1912 for (uint32_t i = 0; i < updateCount; i++) {
1913 const struct anv_common *common = ppUpdateArray[i];
1914
1915 switch (common->sType) {
1916 case VK_STRUCTURE_TYPE_UPDATE_SAMPLERS:
1917 update_samplers = (VkUpdateSamplers *) common;
1918
1919 for (uint32_t j = 0; j < update_samplers->count; j++) {
1920 set->descriptors[update_samplers->binding + j].sampler =
1921 (struct anv_sampler *) update_samplers->pSamplers[j];
1922 }
1923 break;
1924
1925 case VK_STRUCTURE_TYPE_UPDATE_SAMPLER_TEXTURES:
1926 /* FIXME: Shouldn't this be *_UPDATE_SAMPLER_IMAGES? */
1927 update_sampler_textures = (VkUpdateSamplerTextures *) common;
1928
1929 for (uint32_t j = 0; j < update_sampler_textures->count; j++) {
1930 set->descriptors[update_sampler_textures->binding + j].view =
1931 (struct anv_surface_view *)
1932 update_sampler_textures->pSamplerImageViews[j].pImageView->view;
1933 set->descriptors[update_sampler_textures->binding + j].sampler =
1934 (struct anv_sampler *)
1935 update_sampler_textures->pSamplerImageViews[j].sampler;
1936 }
1937 break;
1938
1939 case VK_STRUCTURE_TYPE_UPDATE_IMAGES:
1940 update_images = (VkUpdateImages *) common;
1941
1942 for (uint32_t j = 0; j < update_images->count; j++) {
1943 set->descriptors[update_images->binding + j].view =
1944 (struct anv_surface_view *) update_images->pImageViews[j].view;
1945 }
1946 break;
1947
1948 case VK_STRUCTURE_TYPE_UPDATE_BUFFERS:
1949 update_buffers = (VkUpdateBuffers *) common;
1950
1951 for (uint32_t j = 0; j < update_buffers->count; j++) {
1952 set->descriptors[update_buffers->binding + j].view =
1953 (struct anv_surface_view *) update_buffers->pBufferViews[j].view;
1954 }
1955 /* FIXME: descriptor arrays? */
1956 break;
1957
1958 case VK_STRUCTURE_TYPE_UPDATE_AS_COPY:
1959 update_as_copy = (VkUpdateAsCopy *) common;
1960 (void) update_as_copy;
1961 break;
1962
1963 default:
1964 break;
1965 }
1966 }
1967 }
1968
1969 // State object functions
1970
1971 static inline int64_t
1972 clamp_int64(int64_t x, int64_t min, int64_t max)
1973 {
1974 if (x < min)
1975 return min;
1976 else if (x < max)
1977 return x;
1978 else
1979 return max;
1980 }
1981
1982 static void
1983 anv_dynamic_vp_state_destroy(struct anv_device *device,
1984 struct anv_object *object,
1985 VkObjectType obj_type)
1986 {
1987 struct anv_dynamic_vp_state *state = (void *)object;
1988
1989 assert(obj_type == VK_OBJECT_TYPE_DYNAMIC_VP_STATE);
1990
1991 anv_state_pool_free(&device->dynamic_state_pool, state->sf_clip_vp);
1992 anv_state_pool_free(&device->dynamic_state_pool, state->cc_vp);
1993 anv_state_pool_free(&device->dynamic_state_pool, state->scissor);
1994
1995 anv_device_free(device, state);
1996 }
1997
1998 VkResult anv_CreateDynamicViewportState(
1999 VkDevice _device,
2000 const VkDynamicVpStateCreateInfo* pCreateInfo,
2001 VkDynamicVpState* pState)
2002 {
2003 struct anv_device *device = (struct anv_device *) _device;
2004 struct anv_dynamic_vp_state *state;
2005
2006 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_DYNAMIC_VP_STATE_CREATE_INFO);
2007
2008 state = anv_device_alloc(device, sizeof(*state), 8,
2009 VK_SYSTEM_ALLOC_TYPE_API_OBJECT);
2010 if (state == NULL)
2011 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
2012
2013 state->base.destructor = anv_dynamic_vp_state_destroy;
2014
2015 unsigned count = pCreateInfo->viewportAndScissorCount;
2016 state->sf_clip_vp = anv_state_pool_alloc(&device->dynamic_state_pool,
2017 count * 64, 64);
2018 state->cc_vp = anv_state_pool_alloc(&device->dynamic_state_pool,
2019 count * 8, 32);
2020 state->scissor = anv_state_pool_alloc(&device->dynamic_state_pool,
2021 count * 32, 32);
2022
2023 for (uint32_t i = 0; i < pCreateInfo->viewportAndScissorCount; i++) {
2024 const VkViewport *vp = &pCreateInfo->pViewports[i];
2025 const VkRect *s = &pCreateInfo->pScissors[i];
2026
2027 struct GEN8_SF_CLIP_VIEWPORT sf_clip_viewport = {
2028 .ViewportMatrixElementm00 = vp->width / 2,
2029 .ViewportMatrixElementm11 = vp->height / 2,
2030 .ViewportMatrixElementm22 = (vp->maxDepth - vp->minDepth) / 2,
2031 .ViewportMatrixElementm30 = vp->originX + vp->width / 2,
2032 .ViewportMatrixElementm31 = vp->originY + vp->height / 2,
2033 .ViewportMatrixElementm32 = (vp->maxDepth + vp->minDepth) / 2,
2034 .XMinClipGuardband = -1.0f,
2035 .XMaxClipGuardband = 1.0f,
2036 .YMinClipGuardband = -1.0f,
2037 .YMaxClipGuardband = 1.0f,
2038 .XMinViewPort = vp->originX,
2039 .XMaxViewPort = vp->originX + vp->width - 1,
2040 .YMinViewPort = vp->originY,
2041 .YMaxViewPort = vp->originY + vp->height - 1,
2042 };
2043
2044 struct GEN8_CC_VIEWPORT cc_viewport = {
2045 .MinimumDepth = vp->minDepth,
2046 .MaximumDepth = vp->maxDepth
2047 };
2048
2049 /* Since xmax and ymax are inclusive, we have to have xmax < xmin or
2050 * ymax < ymin for empty clips. In case clip x, y, width height are all
2051 * 0, the clamps below produce 0 for xmin, ymin, xmax, ymax, which isn't
2052 * what we want. Just special case empty clips and produce a canonical
2053 * empty clip. */
2054 static const struct GEN8_SCISSOR_RECT empty_scissor = {
2055 .ScissorRectangleYMin = 1,
2056 .ScissorRectangleXMin = 1,
2057 .ScissorRectangleYMax = 0,
2058 .ScissorRectangleXMax = 0
2059 };
2060
2061 const int max = 0xffff;
2062 struct GEN8_SCISSOR_RECT scissor = {
2063 /* Do this math using int64_t so overflow gets clamped correctly. */
2064 .ScissorRectangleYMin = clamp_int64(s->offset.y, 0, max),
2065 .ScissorRectangleXMin = clamp_int64(s->offset.x, 0, max),
2066 .ScissorRectangleYMax = clamp_int64((uint64_t) s->offset.y + s->extent.height - 1, 0, max),
2067 .ScissorRectangleXMax = clamp_int64((uint64_t) s->offset.x + s->extent.width - 1, 0, max)
2068 };
2069
2070 GEN8_SF_CLIP_VIEWPORT_pack(NULL, state->sf_clip_vp.map + i * 64, &sf_clip_viewport);
2071 GEN8_CC_VIEWPORT_pack(NULL, state->cc_vp.map + i * 32, &cc_viewport);
2072
2073 if (s->extent.width <= 0 || s->extent.height <= 0) {
2074 GEN8_SCISSOR_RECT_pack(NULL, state->scissor.map + i * 32, &empty_scissor);
2075 } else {
2076 GEN8_SCISSOR_RECT_pack(NULL, state->scissor.map + i * 32, &scissor);
2077 }
2078 }
2079
2080 *pState = (VkDynamicVpState) state;
2081
2082 return VK_SUCCESS;
2083 }
2084
2085 VkResult anv_CreateDynamicRasterState(
2086 VkDevice _device,
2087 const VkDynamicRsStateCreateInfo* pCreateInfo,
2088 VkDynamicRsState* pState)
2089 {
2090 struct anv_device *device = (struct anv_device *) _device;
2091 struct anv_dynamic_rs_state *state;
2092
2093 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_DYNAMIC_RS_STATE_CREATE_INFO);
2094
2095 state = anv_device_alloc(device, sizeof(*state), 8,
2096 VK_SYSTEM_ALLOC_TYPE_API_OBJECT);
2097 if (state == NULL)
2098 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
2099
2100 /* Missing these:
2101 * float pointFadeThreshold;
2102 * // optional (GL45) - Size of point fade threshold
2103 */
2104
2105 struct GEN8_3DSTATE_SF sf = {
2106 GEN8_3DSTATE_SF_header,
2107 .LineWidth = pCreateInfo->lineWidth,
2108 .PointWidth = pCreateInfo->pointSize,
2109 };
2110
2111 GEN8_3DSTATE_SF_pack(NULL, state->state_sf, &sf);
2112
2113 bool enable_bias = pCreateInfo->depthBias != 0.0f ||
2114 pCreateInfo->slopeScaledDepthBias != 0.0f;
2115 struct GEN8_3DSTATE_RASTER raster = {
2116 .GlobalDepthOffsetEnableSolid = enable_bias,
2117 .GlobalDepthOffsetEnableWireframe = enable_bias,
2118 .GlobalDepthOffsetEnablePoint = enable_bias,
2119 .GlobalDepthOffsetConstant = pCreateInfo->depthBias,
2120 .GlobalDepthOffsetScale = pCreateInfo->slopeScaledDepthBias,
2121 .GlobalDepthOffsetClamp = pCreateInfo->depthBiasClamp
2122 };
2123
2124 GEN8_3DSTATE_RASTER_pack(NULL, state->state_raster, &raster);
2125
2126 *pState = (VkDynamicRsState) state;
2127
2128 return VK_SUCCESS;
2129 }
2130
2131 VkResult anv_CreateDynamicColorBlendState(
2132 VkDevice _device,
2133 const VkDynamicCbStateCreateInfo* pCreateInfo,
2134 VkDynamicCbState* pState)
2135 {
2136 struct anv_device *device = (struct anv_device *) _device;
2137 struct anv_dynamic_cb_state *state;
2138
2139 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_DYNAMIC_CB_STATE_CREATE_INFO);
2140
2141 state = anv_device_alloc(device, sizeof(*state), 8,
2142 VK_SYSTEM_ALLOC_TYPE_API_OBJECT);
2143 if (state == NULL)
2144 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
2145
2146 struct GEN8_COLOR_CALC_STATE color_calc_state = {
2147 .BlendConstantColorRed = pCreateInfo->blendConst[0],
2148 .BlendConstantColorGreen = pCreateInfo->blendConst[1],
2149 .BlendConstantColorBlue = pCreateInfo->blendConst[2],
2150 .BlendConstantColorAlpha = pCreateInfo->blendConst[3]
2151 };
2152
2153 GEN8_COLOR_CALC_STATE_pack(NULL, state->state_color_calc, &color_calc_state);
2154
2155 *pState = (VkDynamicCbState) state;
2156
2157 return VK_SUCCESS;
2158 }
2159
2160 VkResult anv_CreateDynamicDepthStencilState(
2161 VkDevice _device,
2162 const VkDynamicDsStateCreateInfo* pCreateInfo,
2163 VkDynamicDsState* pState)
2164 {
2165 struct anv_device *device = (struct anv_device *) _device;
2166 struct anv_dynamic_ds_state *state;
2167
2168 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_DYNAMIC_DS_STATE_CREATE_INFO);
2169
2170 state = anv_device_alloc(device, sizeof(*state), 8,
2171 VK_SYSTEM_ALLOC_TYPE_API_OBJECT);
2172 if (state == NULL)
2173 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
2174
2175 struct GEN8_3DSTATE_WM_DEPTH_STENCIL wm_depth_stencil = {
2176 GEN8_3DSTATE_WM_DEPTH_STENCIL_header,
2177
2178 /* Is this what we need to do? */
2179 .StencilBufferWriteEnable = pCreateInfo->stencilWriteMask != 0,
2180
2181 .StencilTestMask = pCreateInfo->stencilReadMask,
2182 .StencilWriteMask = pCreateInfo->stencilWriteMask,
2183
2184 .BackfaceStencilTestMask = pCreateInfo->stencilReadMask,
2185 .BackfaceStencilWriteMask = pCreateInfo->stencilWriteMask,
2186 };
2187
2188 GEN8_3DSTATE_WM_DEPTH_STENCIL_pack(NULL, state->state_wm_depth_stencil,
2189 &wm_depth_stencil);
2190
2191 struct GEN8_COLOR_CALC_STATE color_calc_state = {
2192 .StencilReferenceValue = pCreateInfo->stencilFrontRef,
2193 .BackFaceStencilReferenceValue = pCreateInfo->stencilBackRef
2194 };
2195
2196 GEN8_COLOR_CALC_STATE_pack(NULL, state->state_color_calc, &color_calc_state);
2197
2198 *pState = (VkDynamicDsState) state;
2199
2200 return VK_SUCCESS;
2201 }
2202
2203 // Command buffer functions
2204
2205 static void
2206 anv_cmd_buffer_destroy(struct anv_device *device,
2207 struct anv_object *object,
2208 VkObjectType obj_type)
2209 {
2210 struct anv_cmd_buffer *cmd_buffer = (struct anv_cmd_buffer *) object;
2211
2212 assert(obj_type == VK_OBJECT_TYPE_COMMAND_BUFFER);
2213
2214 /* Destroy all of the batch buffers */
2215 struct anv_batch_bo *bbo = cmd_buffer->last_batch_bo;
2216 while (bbo->prev_batch_bo) {
2217 struct anv_batch_bo *prev = bbo->prev_batch_bo;
2218 anv_batch_bo_destroy(bbo, device);
2219 bbo = prev;
2220 }
2221 anv_reloc_list_finish(&cmd_buffer->batch.relocs, device);
2222
2223 /* Destroy all of the surface state buffers */
2224 bbo = cmd_buffer->surface_batch_bo;
2225 while (bbo->prev_batch_bo) {
2226 struct anv_batch_bo *prev = bbo->prev_batch_bo;
2227 anv_batch_bo_destroy(bbo, device);
2228 bbo = prev;
2229 }
2230 anv_reloc_list_finish(&cmd_buffer->surface_relocs, device);
2231
2232 anv_state_stream_finish(&cmd_buffer->surface_state_stream);
2233 anv_state_stream_finish(&cmd_buffer->dynamic_state_stream);
2234 anv_state_stream_finish(&cmd_buffer->binding_table_state_stream);
2235 anv_device_free(device, cmd_buffer->exec2_objects);
2236 anv_device_free(device, cmd_buffer->exec2_bos);
2237 anv_device_free(device, cmd_buffer);
2238 }
2239
2240 static VkResult
2241 anv_cmd_buffer_chain_batch(struct anv_batch *batch, void *_data)
2242 {
2243 struct anv_cmd_buffer *cmd_buffer = _data;
2244
2245 struct anv_batch_bo *new_bbo, *old_bbo = cmd_buffer->last_batch_bo;
2246
2247 VkResult result = anv_batch_bo_create(cmd_buffer->device, &new_bbo);
2248 if (result != VK_SUCCESS)
2249 return result;
2250
2251 /* We set the end of the batch a little short so we would be sure we
2252 * have room for the chaining command. Since we're about to emit the
2253 * chaining command, let's set it back where it should go.
2254 */
2255 batch->end += GEN8_MI_BATCH_BUFFER_START_length * 4;
2256 assert(batch->end == old_bbo->bo.map + old_bbo->bo.size);
2257
2258 anv_batch_emit(batch, GEN8_MI_BATCH_BUFFER_START,
2259 GEN8_MI_BATCH_BUFFER_START_header,
2260 ._2ndLevelBatchBuffer = _1stlevelbatch,
2261 .AddressSpaceIndicator = ASI_PPGTT,
2262 .BatchBufferStartAddress = { &new_bbo->bo, 0 },
2263 );
2264
2265 /* Pad out to a 2-dword aligned boundary with zeros */
2266 if ((uintptr_t)batch->next % 8 != 0) {
2267 *(uint32_t *)batch->next = 0;
2268 batch->next += 4;
2269 }
2270
2271 anv_batch_bo_finish(cmd_buffer->last_batch_bo, batch);
2272
2273 new_bbo->prev_batch_bo = old_bbo;
2274 cmd_buffer->last_batch_bo = new_bbo;
2275
2276 anv_batch_bo_start(new_bbo, batch, GEN8_MI_BATCH_BUFFER_START_length * 4);
2277
2278 return VK_SUCCESS;
2279 }
2280
2281 VkResult anv_CreateCommandBuffer(
2282 VkDevice _device,
2283 const VkCmdBufferCreateInfo* pCreateInfo,
2284 VkCmdBuffer* pCmdBuffer)
2285 {
2286 struct anv_device *device = (struct anv_device *) _device;
2287 struct anv_cmd_buffer *cmd_buffer;
2288 VkResult result;
2289
2290 cmd_buffer = anv_device_alloc(device, sizeof(*cmd_buffer), 8,
2291 VK_SYSTEM_ALLOC_TYPE_API_OBJECT);
2292 if (cmd_buffer == NULL)
2293 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
2294
2295 cmd_buffer->base.destructor = anv_cmd_buffer_destroy;
2296
2297 cmd_buffer->device = device;
2298 cmd_buffer->rs_state = NULL;
2299 cmd_buffer->vp_state = NULL;
2300 cmd_buffer->cb_state = NULL;
2301 memset(&cmd_buffer->descriptors, 0, sizeof(cmd_buffer->descriptors));
2302
2303 result = anv_batch_bo_create(device, &cmd_buffer->last_batch_bo);
2304 if (result != VK_SUCCESS)
2305 goto fail;
2306
2307 result = anv_reloc_list_init(&cmd_buffer->batch.relocs, device);
2308 if (result != VK_SUCCESS)
2309 goto fail_batch_bo;
2310
2311 cmd_buffer->batch.device = device;
2312 cmd_buffer->batch.extend_cb = anv_cmd_buffer_chain_batch;
2313 cmd_buffer->batch.user_data = cmd_buffer;
2314
2315 anv_batch_bo_start(cmd_buffer->last_batch_bo, &cmd_buffer->batch,
2316 GEN8_MI_BATCH_BUFFER_START_length * 4);
2317
2318 result = anv_batch_bo_create(device, &cmd_buffer->surface_batch_bo);
2319 if (result != VK_SUCCESS)
2320 goto fail_batch_relocs;
2321 cmd_buffer->surface_batch_bo->first_reloc = 0;
2322
2323 result = anv_reloc_list_init(&cmd_buffer->surface_relocs, device);
2324 if (result != VK_SUCCESS)
2325 goto fail_ss_batch_bo;
2326
2327 /* Start surface_next at 1 so surface offset 0 is invalid. */
2328 cmd_buffer->surface_next = 1;
2329
2330 cmd_buffer->exec2_objects = NULL;
2331 cmd_buffer->exec2_bos = NULL;
2332 cmd_buffer->exec2_array_length = 0;
2333
2334 anv_state_stream_init(&cmd_buffer->binding_table_state_stream,
2335 &device->binding_table_block_pool);
2336 anv_state_stream_init(&cmd_buffer->surface_state_stream,
2337 &device->surface_state_block_pool);
2338 anv_state_stream_init(&cmd_buffer->dynamic_state_stream,
2339 &device->dynamic_state_block_pool);
2340
2341 cmd_buffer->dirty = 0;
2342 cmd_buffer->vb_dirty = 0;
2343 cmd_buffer->descriptors_dirty = 0;
2344 cmd_buffer->pipeline = NULL;
2345 cmd_buffer->vp_state = NULL;
2346 cmd_buffer->rs_state = NULL;
2347 cmd_buffer->ds_state = NULL;
2348
2349 *pCmdBuffer = (VkCmdBuffer) cmd_buffer;
2350
2351 return VK_SUCCESS;
2352
2353 fail_ss_batch_bo:
2354 anv_batch_bo_destroy(cmd_buffer->surface_batch_bo, device);
2355 fail_batch_relocs:
2356 anv_reloc_list_finish(&cmd_buffer->batch.relocs, device);
2357 fail_batch_bo:
2358 anv_batch_bo_destroy(cmd_buffer->last_batch_bo, device);
2359 fail:
2360 anv_device_free(device, cmd_buffer);
2361
2362 return result;
2363 }
2364
2365 static void
2366 anv_cmd_buffer_emit_state_base_address(struct anv_cmd_buffer *cmd_buffer)
2367 {
2368 struct anv_device *device = cmd_buffer->device;
2369
2370 anv_batch_emit(&cmd_buffer->batch, GEN8_STATE_BASE_ADDRESS,
2371 .GeneralStateBaseAddress = { NULL, 0 },
2372 .GeneralStateMemoryObjectControlState = GEN8_MOCS,
2373 .GeneralStateBaseAddressModifyEnable = true,
2374 .GeneralStateBufferSize = 0xfffff,
2375 .GeneralStateBufferSizeModifyEnable = true,
2376
2377 .SurfaceStateBaseAddress = { &cmd_buffer->surface_batch_bo->bo, 0 },
2378 .SurfaceStateMemoryObjectControlState = GEN8_MOCS,
2379 .SurfaceStateBaseAddressModifyEnable = true,
2380
2381 .DynamicStateBaseAddress = { &device->dynamic_state_block_pool.bo, 0 },
2382 .DynamicStateMemoryObjectControlState = GEN8_MOCS,
2383 .DynamicStateBaseAddressModifyEnable = true,
2384 .DynamicStateBufferSize = 0xfffff,
2385 .DynamicStateBufferSizeModifyEnable = true,
2386
2387 .IndirectObjectBaseAddress = { NULL, 0 },
2388 .IndirectObjectMemoryObjectControlState = GEN8_MOCS,
2389 .IndirectObjectBaseAddressModifyEnable = true,
2390 .IndirectObjectBufferSize = 0xfffff,
2391 .IndirectObjectBufferSizeModifyEnable = true,
2392
2393 .InstructionBaseAddress = { &device->instruction_block_pool.bo, 0 },
2394 .InstructionMemoryObjectControlState = GEN8_MOCS,
2395 .InstructionBaseAddressModifyEnable = true,
2396 .InstructionBufferSize = 0xfffff,
2397 .InstructionBuffersizeModifyEnable = true);
2398 }
2399
2400 VkResult anv_BeginCommandBuffer(
2401 VkCmdBuffer cmdBuffer,
2402 const VkCmdBufferBeginInfo* pBeginInfo)
2403 {
2404 struct anv_cmd_buffer *cmd_buffer = (struct anv_cmd_buffer *) cmdBuffer;
2405
2406 anv_batch_emit(&cmd_buffer->batch, GEN8_PIPELINE_SELECT,
2407 .PipelineSelection = _3D);
2408 anv_batch_emit(&cmd_buffer->batch, GEN8_STATE_SIP);
2409
2410 anv_cmd_buffer_emit_state_base_address(cmd_buffer);
2411
2412 anv_batch_emit(&cmd_buffer->batch, GEN8_3DSTATE_VF_STATISTICS,
2413 .StatisticsEnable = true);
2414 anv_batch_emit(&cmd_buffer->batch, GEN8_3DSTATE_HS, .Enable = false);
2415 anv_batch_emit(&cmd_buffer->batch, GEN8_3DSTATE_TE, .TEEnable = false);
2416 anv_batch_emit(&cmd_buffer->batch, GEN8_3DSTATE_DS, .FunctionEnable = false);
2417 anv_batch_emit(&cmd_buffer->batch, GEN8_3DSTATE_STREAMOUT, .SOFunctionEnable = false);
2418
2419 anv_batch_emit(&cmd_buffer->batch, GEN8_3DSTATE_PUSH_CONSTANT_ALLOC_VS,
2420 .ConstantBufferOffset = 0,
2421 .ConstantBufferSize = 4);
2422 anv_batch_emit(&cmd_buffer->batch, GEN8_3DSTATE_PUSH_CONSTANT_ALLOC_GS,
2423 .ConstantBufferOffset = 4,
2424 .ConstantBufferSize = 4);
2425 anv_batch_emit(&cmd_buffer->batch, GEN8_3DSTATE_PUSH_CONSTANT_ALLOC_PS,
2426 .ConstantBufferOffset = 8,
2427 .ConstantBufferSize = 4);
2428
2429 anv_batch_emit(&cmd_buffer->batch, GEN8_3DSTATE_WM_CHROMAKEY,
2430 .ChromaKeyKillEnable = false);
2431 anv_batch_emit(&cmd_buffer->batch, GEN8_3DSTATE_SBE_SWIZ);
2432 anv_batch_emit(&cmd_buffer->batch, GEN8_3DSTATE_AA_LINE_PARAMETERS);
2433
2434 return VK_SUCCESS;
2435 }
2436
2437 static VkResult
2438 anv_cmd_buffer_add_bo(struct anv_cmd_buffer *cmd_buffer,
2439 struct anv_bo *bo,
2440 struct drm_i915_gem_relocation_entry *relocs,
2441 size_t num_relocs)
2442 {
2443 struct drm_i915_gem_exec_object2 *obj;
2444
2445 if (bo->index < cmd_buffer->bo_count &&
2446 cmd_buffer->exec2_bos[bo->index] == bo)
2447 return VK_SUCCESS;
2448
2449 if (cmd_buffer->bo_count >= cmd_buffer->exec2_array_length) {
2450 uint32_t new_len = cmd_buffer->exec2_objects ?
2451 cmd_buffer->exec2_array_length * 2 : 64;
2452
2453 struct drm_i915_gem_exec_object2 *new_objects =
2454 anv_device_alloc(cmd_buffer->device, new_len * sizeof(*new_objects),
2455 8, VK_SYSTEM_ALLOC_TYPE_INTERNAL);
2456 if (new_objects == NULL)
2457 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
2458
2459 struct anv_bo **new_bos =
2460 anv_device_alloc(cmd_buffer->device, new_len * sizeof(*new_bos),
2461 8, VK_SYSTEM_ALLOC_TYPE_INTERNAL);
2462 if (new_objects == NULL) {
2463 anv_device_free(cmd_buffer->device, new_objects);
2464 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
2465 }
2466
2467 if (cmd_buffer->exec2_objects) {
2468 memcpy(new_objects, cmd_buffer->exec2_objects,
2469 cmd_buffer->bo_count * sizeof(*new_objects));
2470 memcpy(new_bos, cmd_buffer->exec2_bos,
2471 cmd_buffer->bo_count * sizeof(*new_bos));
2472 }
2473
2474 cmd_buffer->exec2_objects = new_objects;
2475 cmd_buffer->exec2_bos = new_bos;
2476 cmd_buffer->exec2_array_length = new_len;
2477 }
2478
2479 assert(cmd_buffer->bo_count < cmd_buffer->exec2_array_length);
2480
2481 bo->index = cmd_buffer->bo_count++;
2482 obj = &cmd_buffer->exec2_objects[bo->index];
2483 cmd_buffer->exec2_bos[bo->index] = bo;
2484
2485 obj->handle = bo->gem_handle;
2486 obj->relocation_count = 0;
2487 obj->relocs_ptr = 0;
2488 obj->alignment = 0;
2489 obj->offset = bo->offset;
2490 obj->flags = 0;
2491 obj->rsvd1 = 0;
2492 obj->rsvd2 = 0;
2493
2494 if (relocs) {
2495 obj->relocation_count = num_relocs;
2496 obj->relocs_ptr = (uintptr_t) relocs;
2497 }
2498
2499 return VK_SUCCESS;
2500 }
2501
2502 static void
2503 anv_cmd_buffer_add_validate_bos(struct anv_cmd_buffer *cmd_buffer,
2504 struct anv_reloc_list *list)
2505 {
2506 for (size_t i = 0; i < list->num_relocs; i++)
2507 anv_cmd_buffer_add_bo(cmd_buffer, list->reloc_bos[i], NULL, 0);
2508 }
2509
2510 static void
2511 anv_cmd_buffer_process_relocs(struct anv_cmd_buffer *cmd_buffer,
2512 struct anv_reloc_list *list)
2513 {
2514 struct anv_bo *bo;
2515
2516 /* If the kernel supports I915_EXEC_NO_RELOC, it will compare offset in
2517 * struct drm_i915_gem_exec_object2 against the bos current offset and if
2518 * all bos haven't moved it will skip relocation processing alltogether.
2519 * If I915_EXEC_NO_RELOC is not supported, the kernel ignores the incoming
2520 * value of offset so we can set it either way. For that to work we need
2521 * to make sure all relocs use the same presumed offset.
2522 */
2523
2524 for (size_t i = 0; i < list->num_relocs; i++) {
2525 bo = list->reloc_bos[i];
2526 if (bo->offset != list->relocs[i].presumed_offset)
2527 cmd_buffer->need_reloc = true;
2528
2529 list->relocs[i].target_handle = bo->index;
2530 }
2531 }
2532
2533 VkResult anv_EndCommandBuffer(
2534 VkCmdBuffer cmdBuffer)
2535 {
2536 struct anv_cmd_buffer *cmd_buffer = (struct anv_cmd_buffer *) cmdBuffer;
2537 struct anv_device *device = cmd_buffer->device;
2538 struct anv_batch *batch = &cmd_buffer->batch;
2539
2540 anv_batch_emit(batch, GEN8_MI_BATCH_BUFFER_END);
2541
2542 /* Round batch up to an even number of dwords. */
2543 if ((batch->next - batch->start) & 4)
2544 anv_batch_emit(batch, GEN8_MI_NOOP);
2545
2546 anv_batch_bo_finish(cmd_buffer->last_batch_bo, &cmd_buffer->batch);
2547 cmd_buffer->surface_batch_bo->num_relocs =
2548 cmd_buffer->surface_relocs.num_relocs - cmd_buffer->surface_batch_bo->first_reloc;
2549 cmd_buffer->surface_batch_bo->length = cmd_buffer->surface_next;
2550
2551 cmd_buffer->bo_count = 0;
2552 cmd_buffer->need_reloc = false;
2553
2554 /* Lock for access to bo->index. */
2555 pthread_mutex_lock(&device->mutex);
2556
2557 /* Add surface state bos first so we can add them with their relocs. */
2558 for (struct anv_batch_bo *bbo = cmd_buffer->surface_batch_bo;
2559 bbo != NULL; bbo = bbo->prev_batch_bo) {
2560 anv_cmd_buffer_add_bo(cmd_buffer, &bbo->bo,
2561 &cmd_buffer->surface_relocs.relocs[bbo->first_reloc],
2562 bbo->num_relocs);
2563 }
2564
2565 /* Add all of the BOs referenced by surface state */
2566 anv_cmd_buffer_add_validate_bos(cmd_buffer, &cmd_buffer->surface_relocs);
2567
2568 /* Add all but the first batch BO */
2569 struct anv_batch_bo *batch_bo = cmd_buffer->last_batch_bo;
2570 while (batch_bo->prev_batch_bo) {
2571 anv_cmd_buffer_add_bo(cmd_buffer, &batch_bo->bo,
2572 &batch->relocs.relocs[batch_bo->first_reloc],
2573 batch_bo->num_relocs);
2574 batch_bo = batch_bo->prev_batch_bo;
2575 }
2576
2577 /* Add everything referenced by the batches */
2578 anv_cmd_buffer_add_validate_bos(cmd_buffer, &batch->relocs);
2579
2580 /* Add the first batch bo last */
2581 assert(batch_bo->prev_batch_bo == NULL && batch_bo->first_reloc == 0);
2582 anv_cmd_buffer_add_bo(cmd_buffer, &batch_bo->bo,
2583 &batch->relocs.relocs[batch_bo->first_reloc],
2584 batch_bo->num_relocs);
2585 assert(batch_bo->bo.index == cmd_buffer->bo_count - 1);
2586
2587 anv_cmd_buffer_process_relocs(cmd_buffer, &cmd_buffer->surface_relocs);
2588 anv_cmd_buffer_process_relocs(cmd_buffer, &batch->relocs);
2589
2590 cmd_buffer->execbuf.buffers_ptr = (uintptr_t) cmd_buffer->exec2_objects;
2591 cmd_buffer->execbuf.buffer_count = cmd_buffer->bo_count;
2592 cmd_buffer->execbuf.batch_start_offset = 0;
2593 cmd_buffer->execbuf.batch_len = batch->next - batch->start;
2594 cmd_buffer->execbuf.cliprects_ptr = 0;
2595 cmd_buffer->execbuf.num_cliprects = 0;
2596 cmd_buffer->execbuf.DR1 = 0;
2597 cmd_buffer->execbuf.DR4 = 0;
2598
2599 cmd_buffer->execbuf.flags = I915_EXEC_HANDLE_LUT;
2600 if (!cmd_buffer->need_reloc)
2601 cmd_buffer->execbuf.flags |= I915_EXEC_NO_RELOC;
2602 cmd_buffer->execbuf.flags |= I915_EXEC_RENDER;
2603 cmd_buffer->execbuf.rsvd1 = device->context_id;
2604 cmd_buffer->execbuf.rsvd2 = 0;
2605
2606 pthread_mutex_unlock(&device->mutex);
2607
2608 return VK_SUCCESS;
2609 }
2610
2611 VkResult anv_ResetCommandBuffer(
2612 VkCmdBuffer cmdBuffer)
2613 {
2614 struct anv_cmd_buffer *cmd_buffer = (struct anv_cmd_buffer *) cmdBuffer;
2615
2616 /* Delete all but the first batch bo */
2617 while (cmd_buffer->last_batch_bo->prev_batch_bo) {
2618 struct anv_batch_bo *prev = cmd_buffer->last_batch_bo->prev_batch_bo;
2619 anv_batch_bo_destroy(cmd_buffer->last_batch_bo, cmd_buffer->device);
2620 cmd_buffer->last_batch_bo = prev;
2621 }
2622 assert(cmd_buffer->last_batch_bo->prev_batch_bo == NULL);
2623
2624 cmd_buffer->batch.relocs.num_relocs = 0;
2625 anv_batch_bo_start(cmd_buffer->last_batch_bo, &cmd_buffer->batch,
2626 GEN8_MI_BATCH_BUFFER_START_length * 4);
2627
2628 /* Delete all but the first batch bo */
2629 while (cmd_buffer->surface_batch_bo->prev_batch_bo) {
2630 struct anv_batch_bo *prev = cmd_buffer->surface_batch_bo->prev_batch_bo;
2631 anv_batch_bo_destroy(cmd_buffer->surface_batch_bo, cmd_buffer->device);
2632 cmd_buffer->surface_batch_bo = prev;
2633 }
2634 assert(cmd_buffer->surface_batch_bo->prev_batch_bo == NULL);
2635
2636 cmd_buffer->surface_next = 1;
2637 cmd_buffer->surface_relocs.num_relocs = 0;
2638
2639 return VK_SUCCESS;
2640 }
2641
2642 // Command buffer building functions
2643
2644 void anv_CmdBindPipeline(
2645 VkCmdBuffer cmdBuffer,
2646 VkPipelineBindPoint pipelineBindPoint,
2647 VkPipeline _pipeline)
2648 {
2649 struct anv_cmd_buffer *cmd_buffer = (struct anv_cmd_buffer *) cmdBuffer;
2650 struct anv_pipeline *pipeline = (struct anv_pipeline *) _pipeline;
2651
2652 cmd_buffer->pipeline = pipeline;
2653 cmd_buffer->vb_dirty |= pipeline->vb_used;
2654 cmd_buffer->dirty |= ANV_CMD_BUFFER_PIPELINE_DIRTY;
2655 }
2656
2657 void anv_CmdBindDynamicStateObject(
2658 VkCmdBuffer cmdBuffer,
2659 VkStateBindPoint stateBindPoint,
2660 VkDynamicStateObject dynamicState)
2661 {
2662 struct anv_cmd_buffer *cmd_buffer = (struct anv_cmd_buffer *) cmdBuffer;
2663 struct anv_dynamic_vp_state *vp_state;
2664
2665 switch (stateBindPoint) {
2666 case VK_STATE_BIND_POINT_VIEWPORT:
2667 vp_state = (struct anv_dynamic_vp_state *) dynamicState;
2668 /* We emit state immediately, but set cmd_buffer->vp_state to indicate
2669 * that vp state has been set in this command buffer. */
2670 cmd_buffer->vp_state = vp_state;
2671 anv_batch_emit(&cmd_buffer->batch, GEN8_3DSTATE_SCISSOR_STATE_POINTERS,
2672 .ScissorRectPointer = vp_state->scissor.offset);
2673 anv_batch_emit(&cmd_buffer->batch, GEN8_3DSTATE_VIEWPORT_STATE_POINTERS_CC,
2674 .CCViewportPointer = vp_state->cc_vp.offset);
2675 anv_batch_emit(&cmd_buffer->batch, GEN8_3DSTATE_VIEWPORT_STATE_POINTERS_SF_CLIP,
2676 .SFClipViewportPointer = vp_state->sf_clip_vp.offset);
2677 break;
2678 case VK_STATE_BIND_POINT_RASTER:
2679 cmd_buffer->rs_state = (struct anv_dynamic_rs_state *) dynamicState;
2680 cmd_buffer->dirty |= ANV_CMD_BUFFER_RS_DIRTY;
2681 break;
2682 case VK_STATE_BIND_POINT_COLOR_BLEND:
2683 cmd_buffer->cb_state = (struct anv_dynamic_cb_state *) dynamicState;
2684 cmd_buffer->dirty |= ANV_CMD_BUFFER_CB_DIRTY;
2685 break;
2686 case VK_STATE_BIND_POINT_DEPTH_STENCIL:
2687 cmd_buffer->ds_state = (struct anv_dynamic_ds_state *) dynamicState;
2688 cmd_buffer->dirty |= ANV_CMD_BUFFER_DS_DIRTY;
2689 break;
2690 default:
2691 break;
2692 };
2693 }
2694
2695 static struct anv_state
2696 anv_cmd_buffer_alloc_surface_state(struct anv_cmd_buffer *cmd_buffer,
2697 uint32_t size, uint32_t alignment)
2698 {
2699 struct anv_state state;
2700
2701 state.offset = ALIGN_U32(cmd_buffer->surface_next, alignment);
2702 if (state.offset + size > cmd_buffer->surface_batch_bo->bo.size)
2703 return (struct anv_state) { 0 };
2704
2705 state.map = cmd_buffer->surface_batch_bo->bo.map + state.offset;
2706 state.alloc_size = size;
2707 cmd_buffer->surface_next = state.offset + size;
2708
2709 assert(state.offset + size <= cmd_buffer->surface_batch_bo->bo.size);
2710
2711 return state;
2712 }
2713
2714 static VkResult
2715 anv_cmd_buffer_new_surface_state_bo(struct anv_cmd_buffer *cmd_buffer)
2716 {
2717 struct anv_batch_bo *new_bbo, *old_bbo = cmd_buffer->surface_batch_bo;
2718
2719 /* Finish off the old buffer */
2720 old_bbo->num_relocs =
2721 cmd_buffer->surface_relocs.num_relocs - old_bbo->first_reloc;
2722 old_bbo->length = cmd_buffer->surface_next;
2723
2724 VkResult result = anv_batch_bo_create(cmd_buffer->device, &new_bbo);
2725 if (result != VK_SUCCESS)
2726 return result;
2727
2728 new_bbo->first_reloc = cmd_buffer->surface_relocs.num_relocs;
2729 cmd_buffer->surface_next = 1;
2730
2731 new_bbo->prev_batch_bo = old_bbo;
2732 cmd_buffer->surface_batch_bo = new_bbo;
2733
2734 /* Re-emit state base addresses so we get the new surface state base
2735 * address before we start emitting binding tables etc.
2736 */
2737 anv_cmd_buffer_emit_state_base_address(cmd_buffer);
2738
2739 /* It seems like just changing the state base addresses isn't enough.
2740 * Invalidating the cache seems to be enough to cause things to
2741 * propagate. However, I'm not 100% sure what we're supposed to do.
2742 */
2743 anv_batch_emit(&cmd_buffer->batch, GEN8_PIPE_CONTROL,
2744 .TextureCacheInvalidationEnable = true);
2745
2746 return VK_SUCCESS;
2747 }
2748
2749 void anv_CmdBindDescriptorSets(
2750 VkCmdBuffer cmdBuffer,
2751 VkPipelineBindPoint pipelineBindPoint,
2752 uint32_t firstSet,
2753 uint32_t setCount,
2754 const VkDescriptorSet* pDescriptorSets,
2755 uint32_t dynamicOffsetCount,
2756 const uint32_t* pDynamicOffsets)
2757 {
2758 struct anv_cmd_buffer *cmd_buffer = (struct anv_cmd_buffer *) cmdBuffer;
2759 struct anv_pipeline_layout *layout = cmd_buffer->pipeline->layout;
2760 struct anv_descriptor_set *set;
2761 struct anv_descriptor_set_layout *set_layout;
2762
2763 assert(firstSet + setCount < MAX_SETS);
2764
2765 uint32_t dynamic_slot = 0;
2766 for (uint32_t i = 0; i < setCount; i++) {
2767 set = (struct anv_descriptor_set *) pDescriptorSets[i];
2768 set_layout = layout->set[firstSet + i].layout;
2769
2770 cmd_buffer->descriptors[firstSet + i].set = set;
2771
2772 assert(set_layout->num_dynamic_buffers <
2773 ARRAY_SIZE(cmd_buffer->descriptors[0].dynamic_offsets));
2774 memcpy(cmd_buffer->descriptors[firstSet + i].dynamic_offsets,
2775 pDynamicOffsets + dynamic_slot,
2776 set_layout->num_dynamic_buffers * sizeof(*pDynamicOffsets));
2777
2778 cmd_buffer->descriptors_dirty |= set_layout->shader_stages;
2779
2780 dynamic_slot += set_layout->num_dynamic_buffers;
2781 }
2782 }
2783
2784 void anv_CmdBindIndexBuffer(
2785 VkCmdBuffer cmdBuffer,
2786 VkBuffer _buffer,
2787 VkDeviceSize offset,
2788 VkIndexType indexType)
2789 {
2790 struct anv_cmd_buffer *cmd_buffer = (struct anv_cmd_buffer *) cmdBuffer;
2791 struct anv_buffer *buffer = (struct anv_buffer *) _buffer;
2792
2793 static const uint32_t vk_to_gen_index_type[] = {
2794 [VK_INDEX_TYPE_UINT8] = INDEX_BYTE,
2795 [VK_INDEX_TYPE_UINT16] = INDEX_WORD,
2796 [VK_INDEX_TYPE_UINT32] = INDEX_DWORD,
2797 };
2798
2799 anv_batch_emit(&cmd_buffer->batch, GEN8_3DSTATE_INDEX_BUFFER,
2800 .IndexFormat = vk_to_gen_index_type[indexType],
2801 .MemoryObjectControlState = GEN8_MOCS,
2802 .BufferStartingAddress = { buffer->bo, buffer->offset + offset },
2803 .BufferSize = buffer->size - offset);
2804 }
2805
2806 void anv_CmdBindVertexBuffers(
2807 VkCmdBuffer cmdBuffer,
2808 uint32_t startBinding,
2809 uint32_t bindingCount,
2810 const VkBuffer* pBuffers,
2811 const VkDeviceSize* pOffsets)
2812 {
2813 struct anv_cmd_buffer *cmd_buffer = (struct anv_cmd_buffer *) cmdBuffer;
2814 struct anv_vertex_binding *vb = cmd_buffer->vertex_bindings;
2815
2816 /* We have to defer setting up vertex buffer since we need the buffer
2817 * stride from the pipeline. */
2818
2819 assert(startBinding + bindingCount < MAX_VBS);
2820 for (uint32_t i = 0; i < bindingCount; i++) {
2821 vb[startBinding + i].buffer = (struct anv_buffer *) pBuffers[i];
2822 vb[startBinding + i].offset = pOffsets[i];
2823 cmd_buffer->vb_dirty |= 1 << (startBinding + i);
2824 }
2825 }
2826
2827 static VkResult
2828 cmd_buffer_emit_binding_table(struct anv_cmd_buffer *cmd_buffer,
2829 unsigned stage)
2830 {
2831 struct anv_pipeline_layout *layout = cmd_buffer->pipeline->layout;
2832 uint32_t color_attachments, bias, size;
2833 struct anv_state bt_state;
2834
2835 if (stage == VK_SHADER_STAGE_FRAGMENT) {
2836 bias = MAX_RTS;
2837 color_attachments = cmd_buffer->framebuffer->color_attachment_count;
2838 } else {
2839 bias = 0;
2840 color_attachments = 0;
2841 }
2842
2843 /* This is a little awkward: layout can be NULL but we still have to
2844 * allocate and set a binding table for the PS stage for render
2845 * targets. */
2846 uint32_t surface_count = layout ? layout->stage[stage].surface_count : 0;
2847
2848 if (color_attachments + surface_count == 0)
2849 return VK_SUCCESS;
2850
2851 size = (bias + surface_count) * sizeof(uint32_t);
2852 bt_state = anv_cmd_buffer_alloc_surface_state(cmd_buffer, size, 32);
2853 uint32_t *bt_map = bt_state.map;
2854
2855 if (bt_state.map == NULL)
2856 return VK_ERROR_OUT_OF_DEVICE_MEMORY;
2857
2858 static const uint32_t binding_table_opcodes[] = {
2859 [VK_SHADER_STAGE_VERTEX] = 38,
2860 [VK_SHADER_STAGE_TESS_CONTROL] = 39,
2861 [VK_SHADER_STAGE_TESS_EVALUATION] = 40,
2862 [VK_SHADER_STAGE_GEOMETRY] = 41,
2863 [VK_SHADER_STAGE_FRAGMENT] = 42,
2864 [VK_SHADER_STAGE_COMPUTE] = 0,
2865 };
2866
2867 anv_batch_emit(&cmd_buffer->batch,
2868 GEN8_3DSTATE_BINDING_TABLE_POINTERS_VS,
2869 ._3DCommandSubOpcode = binding_table_opcodes[stage],
2870 .PointertoVSBindingTable = bt_state.offset);
2871
2872 for (uint32_t ca = 0; ca < color_attachments; ca++) {
2873 const struct anv_surface_view *view =
2874 cmd_buffer->framebuffer->color_attachments[ca];
2875
2876 struct anv_state state =
2877 anv_cmd_buffer_alloc_surface_state(cmd_buffer, 64, 64);
2878
2879 if (state.map == NULL)
2880 return VK_ERROR_OUT_OF_DEVICE_MEMORY;
2881
2882 memcpy(state.map, view->surface_state.map, 64);
2883
2884 /* The address goes in dwords 8 and 9 of the SURFACE_STATE */
2885 *(uint64_t *)(state.map + 8 * 4) =
2886 anv_reloc_list_add(&cmd_buffer->surface_relocs,
2887 cmd_buffer->device,
2888 state.offset + 8 * 4,
2889 view->bo, view->offset);
2890
2891 bt_map[ca] = state.offset;
2892 }
2893
2894 if (layout == NULL)
2895 return VK_SUCCESS;
2896
2897 for (uint32_t set = 0; set < layout->num_sets; set++) {
2898 struct anv_descriptor_set_binding *d = &cmd_buffer->descriptors[set];
2899 struct anv_descriptor_set_layout *set_layout = layout->set[set].layout;
2900 struct anv_descriptor_slot *surface_slots =
2901 set_layout->stage[stage].surface_start;
2902
2903 uint32_t start = bias + layout->set[set].surface_start[stage];
2904
2905 for (uint32_t b = 0; b < set_layout->stage[stage].surface_count; b++) {
2906 struct anv_surface_view *view =
2907 d->set->descriptors[surface_slots[b].index].view;
2908
2909 if (!view)
2910 continue;
2911
2912 struct anv_state state =
2913 anv_cmd_buffer_alloc_surface_state(cmd_buffer, 64, 64);
2914
2915 if (state.map == NULL)
2916 return VK_ERROR_OUT_OF_DEVICE_MEMORY;
2917
2918 uint32_t offset;
2919 if (surface_slots[b].dynamic_slot >= 0) {
2920 uint32_t dynamic_offset =
2921 d->dynamic_offsets[surface_slots[b].dynamic_slot];
2922
2923 offset = view->offset + dynamic_offset;
2924 fill_buffer_surface_state(state.map, view->format, offset,
2925 view->range - dynamic_offset);
2926 } else {
2927 offset = view->offset;
2928 memcpy(state.map, view->surface_state.map, 64);
2929 }
2930
2931 /* The address goes in dwords 8 and 9 of the SURFACE_STATE */
2932 *(uint64_t *)(state.map + 8 * 4) =
2933 anv_reloc_list_add(&cmd_buffer->surface_relocs,
2934 cmd_buffer->device,
2935 state.offset + 8 * 4,
2936 view->bo, offset);
2937
2938 bt_map[start + b] = state.offset;
2939 }
2940 }
2941
2942 return VK_SUCCESS;
2943 }
2944
2945 static VkResult
2946 cmd_buffer_emit_samplers(struct anv_cmd_buffer *cmd_buffer, unsigned stage)
2947 {
2948 struct anv_pipeline_layout *layout = cmd_buffer->pipeline->layout;
2949 struct anv_state state;
2950
2951 if (!layout)
2952 return VK_SUCCESS;
2953
2954 uint32_t sampler_count = layout->stage[stage].sampler_count;
2955
2956 if (sampler_count == 0)
2957 return VK_SUCCESS;
2958
2959 uint32_t size = sampler_count * 16;
2960 state = anv_state_stream_alloc(&cmd_buffer->dynamic_state_stream, size, 32);
2961
2962 if (state.map == NULL)
2963 return VK_ERROR_OUT_OF_DEVICE_MEMORY;
2964
2965 static const uint32_t sampler_state_opcodes[] = {
2966 [VK_SHADER_STAGE_VERTEX] = 43,
2967 [VK_SHADER_STAGE_TESS_CONTROL] = 44, /* HS */
2968 [VK_SHADER_STAGE_TESS_EVALUATION] = 45, /* DS */
2969 [VK_SHADER_STAGE_GEOMETRY] = 46,
2970 [VK_SHADER_STAGE_FRAGMENT] = 47,
2971 [VK_SHADER_STAGE_COMPUTE] = 0,
2972 };
2973
2974 anv_batch_emit(&cmd_buffer->batch,
2975 GEN8_3DSTATE_SAMPLER_STATE_POINTERS_VS,
2976 ._3DCommandSubOpcode = sampler_state_opcodes[stage],
2977 .PointertoVSSamplerState = state.offset);
2978
2979 for (uint32_t set = 0; set < layout->num_sets; set++) {
2980 struct anv_descriptor_set_binding *d = &cmd_buffer->descriptors[set];
2981 struct anv_descriptor_set_layout *set_layout = layout->set[set].layout;
2982 struct anv_descriptor_slot *sampler_slots =
2983 set_layout->stage[stage].sampler_start;
2984
2985 uint32_t start = layout->set[set].sampler_start[stage];
2986
2987 for (uint32_t b = 0; b < set_layout->stage[stage].sampler_count; b++) {
2988 struct anv_sampler *sampler =
2989 d->set->descriptors[sampler_slots[b].index].sampler;
2990
2991 if (!sampler)
2992 continue;
2993
2994 memcpy(state.map + (start + b) * 16,
2995 sampler->state, sizeof(sampler->state));
2996 }
2997 }
2998
2999 return VK_SUCCESS;
3000 }
3001
3002 static void
3003 flush_descriptor_sets(struct anv_cmd_buffer *cmd_buffer)
3004 {
3005 uint32_t s, dirty = cmd_buffer->descriptors_dirty &
3006 cmd_buffer->pipeline->active_stages;
3007
3008 VkResult result;
3009 for_each_bit(s, dirty) {
3010 result = cmd_buffer_emit_binding_table(cmd_buffer, s);
3011 if (result != VK_SUCCESS)
3012 break;
3013
3014 result = cmd_buffer_emit_samplers(cmd_buffer, s);
3015 if (result != VK_SUCCESS)
3016 break;
3017 }
3018
3019 if (result != VK_SUCCESS) {
3020 assert(result == VK_ERROR_OUT_OF_DEVICE_MEMORY);
3021
3022 result = anv_cmd_buffer_new_surface_state_bo(cmd_buffer);
3023 assert(result == VK_SUCCESS);
3024
3025 /* Re-emit all active binding tables */
3026 for_each_bit(s, cmd_buffer->pipeline->active_stages) {
3027 result = cmd_buffer_emit_binding_table(cmd_buffer, s);
3028 result = cmd_buffer_emit_samplers(cmd_buffer, s);
3029 }
3030
3031 /* It had better succeed this time */
3032 assert(result == VK_SUCCESS);
3033 }
3034
3035 cmd_buffer->descriptors_dirty &= ~cmd_buffer->pipeline->active_stages;
3036 }
3037
3038 static struct anv_state
3039 anv_cmd_buffer_emit_dynamic(struct anv_cmd_buffer *cmd_buffer,
3040 uint32_t *a, uint32_t dwords, uint32_t alignment)
3041 {
3042 struct anv_device *device = cmd_buffer->device;
3043 struct anv_state state;
3044
3045 state = anv_state_pool_alloc(&device->dynamic_state_pool, dwords * 4, alignment);
3046 memcpy(state.map, a, dwords * 4);
3047
3048 return state;
3049 }
3050
3051 static struct anv_state
3052 anv_cmd_buffer_merge_dynamic(struct anv_cmd_buffer *cmd_buffer,
3053 uint32_t *a, uint32_t *b, uint32_t dwords, uint32_t alignment)
3054 {
3055 struct anv_device *device = cmd_buffer->device;
3056 struct anv_state state;
3057 uint32_t *p;
3058
3059 state = anv_state_pool_alloc(&device->dynamic_state_pool, dwords * 4, alignment);
3060 p = state.map;
3061 for (uint32_t i = 0; i < dwords; i++)
3062 p[i] = a[i] | b[i];
3063
3064 return state;
3065 }
3066
3067 static void
3068 anv_cmd_buffer_flush_state(struct anv_cmd_buffer *cmd_buffer)
3069 {
3070 struct anv_pipeline *pipeline = cmd_buffer->pipeline;
3071 uint32_t *p;
3072
3073 uint32_t vb_emit = cmd_buffer->vb_dirty & pipeline->vb_used;
3074
3075 if (vb_emit) {
3076 const uint32_t num_buffers = __builtin_popcount(vb_emit);
3077 const uint32_t num_dwords = 1 + num_buffers * 4;
3078
3079 p = anv_batch_emitn(&cmd_buffer->batch, num_dwords,
3080 GEN8_3DSTATE_VERTEX_BUFFERS);
3081 uint32_t vb, i = 0;
3082 for_each_bit(vb, vb_emit) {
3083 struct anv_buffer *buffer = cmd_buffer->vertex_bindings[vb].buffer;
3084 uint32_t offset = cmd_buffer->vertex_bindings[vb].offset;
3085
3086 struct GEN8_VERTEX_BUFFER_STATE state = {
3087 .VertexBufferIndex = vb,
3088 .MemoryObjectControlState = GEN8_MOCS,
3089 .AddressModifyEnable = true,
3090 .BufferPitch = pipeline->binding_stride[vb],
3091 .BufferStartingAddress = { buffer->bo, buffer->offset + offset },
3092 .BufferSize = buffer->size - offset
3093 };
3094
3095 GEN8_VERTEX_BUFFER_STATE_pack(&cmd_buffer->batch, &p[1 + i * 4], &state);
3096 i++;
3097 }
3098 }
3099
3100 if (cmd_buffer->dirty & ANV_CMD_BUFFER_PIPELINE_DIRTY)
3101 anv_batch_emit_batch(&cmd_buffer->batch, &pipeline->batch);
3102
3103 if (cmd_buffer->descriptors_dirty)
3104 flush_descriptor_sets(cmd_buffer);
3105
3106 if (cmd_buffer->dirty & (ANV_CMD_BUFFER_PIPELINE_DIRTY | ANV_CMD_BUFFER_RS_DIRTY)) {
3107 anv_batch_emit_merge(&cmd_buffer->batch,
3108 cmd_buffer->rs_state->state_sf, pipeline->state_sf);
3109 anv_batch_emit_merge(&cmd_buffer->batch,
3110 cmd_buffer->rs_state->state_raster, pipeline->state_raster);
3111 }
3112
3113 if (cmd_buffer->ds_state &&
3114 (cmd_buffer->dirty & (ANV_CMD_BUFFER_PIPELINE_DIRTY | ANV_CMD_BUFFER_DS_DIRTY)))
3115 anv_batch_emit_merge(&cmd_buffer->batch,
3116 cmd_buffer->ds_state->state_wm_depth_stencil,
3117 pipeline->state_wm_depth_stencil);
3118
3119 if (cmd_buffer->dirty & (ANV_CMD_BUFFER_CB_DIRTY | ANV_CMD_BUFFER_DS_DIRTY)) {
3120 struct anv_state state;
3121 if (cmd_buffer->ds_state == NULL)
3122 state = anv_cmd_buffer_emit_dynamic(cmd_buffer,
3123 cmd_buffer->cb_state->state_color_calc,
3124 GEN8_COLOR_CALC_STATE_length, 32);
3125 else if (cmd_buffer->cb_state == NULL)
3126 state = anv_cmd_buffer_emit_dynamic(cmd_buffer,
3127 cmd_buffer->ds_state->state_color_calc,
3128 GEN8_COLOR_CALC_STATE_length, 32);
3129 else
3130 state = anv_cmd_buffer_merge_dynamic(cmd_buffer,
3131 cmd_buffer->ds_state->state_color_calc,
3132 cmd_buffer->cb_state->state_color_calc,
3133 GEN8_COLOR_CALC_STATE_length, 32);
3134
3135 anv_batch_emit(&cmd_buffer->batch,
3136 GEN8_3DSTATE_CC_STATE_POINTERS,
3137 .ColorCalcStatePointer = state.offset,
3138 .ColorCalcStatePointerValid = true);
3139 }
3140
3141 cmd_buffer->vb_dirty &= ~vb_emit;
3142 cmd_buffer->dirty = 0;
3143 }
3144
3145 void anv_CmdDraw(
3146 VkCmdBuffer cmdBuffer,
3147 uint32_t firstVertex,
3148 uint32_t vertexCount,
3149 uint32_t firstInstance,
3150 uint32_t instanceCount)
3151 {
3152 struct anv_cmd_buffer *cmd_buffer = (struct anv_cmd_buffer *) cmdBuffer;
3153
3154 anv_cmd_buffer_flush_state(cmd_buffer);
3155
3156 anv_batch_emit(&cmd_buffer->batch, GEN8_3DPRIMITIVE,
3157 .VertexAccessType = SEQUENTIAL,
3158 .VertexCountPerInstance = vertexCount,
3159 .StartVertexLocation = firstVertex,
3160 .InstanceCount = instanceCount,
3161 .StartInstanceLocation = firstInstance,
3162 .BaseVertexLocation = 0);
3163 }
3164
3165 void anv_CmdDrawIndexed(
3166 VkCmdBuffer cmdBuffer,
3167 uint32_t firstIndex,
3168 uint32_t indexCount,
3169 int32_t vertexOffset,
3170 uint32_t firstInstance,
3171 uint32_t instanceCount)
3172 {
3173 struct anv_cmd_buffer *cmd_buffer = (struct anv_cmd_buffer *) cmdBuffer;
3174
3175 anv_cmd_buffer_flush_state(cmd_buffer);
3176
3177 anv_batch_emit(&cmd_buffer->batch, GEN8_3DPRIMITIVE,
3178 .VertexAccessType = RANDOM,
3179 .VertexCountPerInstance = indexCount,
3180 .StartVertexLocation = firstIndex,
3181 .InstanceCount = instanceCount,
3182 .StartInstanceLocation = firstInstance,
3183 .BaseVertexLocation = vertexOffset);
3184 }
3185
3186 static void
3187 anv_batch_lrm(struct anv_batch *batch,
3188 uint32_t reg, struct anv_bo *bo, uint32_t offset)
3189 {
3190 anv_batch_emit(batch, GEN8_MI_LOAD_REGISTER_MEM,
3191 .RegisterAddress = reg,
3192 .MemoryAddress = { bo, offset });
3193 }
3194
3195 static void
3196 anv_batch_lri(struct anv_batch *batch, uint32_t reg, uint32_t imm)
3197 {
3198 anv_batch_emit(batch, GEN8_MI_LOAD_REGISTER_IMM,
3199 .RegisterOffset = reg,
3200 .DataDWord = imm);
3201 }
3202
3203 /* Auto-Draw / Indirect Registers */
3204 #define GEN7_3DPRIM_END_OFFSET 0x2420
3205 #define GEN7_3DPRIM_START_VERTEX 0x2430
3206 #define GEN7_3DPRIM_VERTEX_COUNT 0x2434
3207 #define GEN7_3DPRIM_INSTANCE_COUNT 0x2438
3208 #define GEN7_3DPRIM_START_INSTANCE 0x243C
3209 #define GEN7_3DPRIM_BASE_VERTEX 0x2440
3210
3211 void anv_CmdDrawIndirect(
3212 VkCmdBuffer cmdBuffer,
3213 VkBuffer _buffer,
3214 VkDeviceSize offset,
3215 uint32_t count,
3216 uint32_t stride)
3217 {
3218 struct anv_cmd_buffer *cmd_buffer = (struct anv_cmd_buffer *) cmdBuffer;
3219 struct anv_buffer *buffer = (struct anv_buffer *) _buffer;
3220 struct anv_bo *bo = buffer->bo;
3221 uint32_t bo_offset = buffer->offset + offset;
3222
3223 anv_cmd_buffer_flush_state(cmd_buffer);
3224
3225 anv_batch_lrm(&cmd_buffer->batch, GEN7_3DPRIM_VERTEX_COUNT, bo, bo_offset);
3226 anv_batch_lrm(&cmd_buffer->batch, GEN7_3DPRIM_INSTANCE_COUNT, bo, bo_offset + 4);
3227 anv_batch_lrm(&cmd_buffer->batch, GEN7_3DPRIM_START_VERTEX, bo, bo_offset + 8);
3228 anv_batch_lrm(&cmd_buffer->batch, GEN7_3DPRIM_START_INSTANCE, bo, bo_offset + 12);
3229 anv_batch_lri(&cmd_buffer->batch, GEN7_3DPRIM_BASE_VERTEX, 0);
3230
3231 anv_batch_emit(&cmd_buffer->batch, GEN8_3DPRIMITIVE,
3232 .IndirectParameterEnable = true,
3233 .VertexAccessType = SEQUENTIAL);
3234 }
3235
3236 void anv_CmdDrawIndexedIndirect(
3237 VkCmdBuffer cmdBuffer,
3238 VkBuffer _buffer,
3239 VkDeviceSize offset,
3240 uint32_t count,
3241 uint32_t stride)
3242 {
3243 struct anv_cmd_buffer *cmd_buffer = (struct anv_cmd_buffer *) cmdBuffer;
3244 struct anv_buffer *buffer = (struct anv_buffer *) _buffer;
3245 struct anv_bo *bo = buffer->bo;
3246 uint32_t bo_offset = buffer->offset + offset;
3247
3248 anv_cmd_buffer_flush_state(cmd_buffer);
3249
3250 anv_batch_lrm(&cmd_buffer->batch, GEN7_3DPRIM_VERTEX_COUNT, bo, bo_offset);
3251 anv_batch_lrm(&cmd_buffer->batch, GEN7_3DPRIM_INSTANCE_COUNT, bo, bo_offset + 4);
3252 anv_batch_lrm(&cmd_buffer->batch, GEN7_3DPRIM_START_VERTEX, bo, bo_offset + 8);
3253 anv_batch_lrm(&cmd_buffer->batch, GEN7_3DPRIM_BASE_VERTEX, bo, bo_offset + 12);
3254 anv_batch_lrm(&cmd_buffer->batch, GEN7_3DPRIM_START_INSTANCE, bo, bo_offset + 16);
3255
3256 anv_batch_emit(&cmd_buffer->batch, GEN8_3DPRIMITIVE,
3257 .IndirectParameterEnable = true,
3258 .VertexAccessType = RANDOM);
3259 }
3260
3261 void anv_CmdDispatch(
3262 VkCmdBuffer cmdBuffer,
3263 uint32_t x,
3264 uint32_t y,
3265 uint32_t z)
3266 {
3267 stub();
3268 }
3269
3270 void anv_CmdDispatchIndirect(
3271 VkCmdBuffer cmdBuffer,
3272 VkBuffer buffer,
3273 VkDeviceSize offset)
3274 {
3275 stub();
3276 }
3277
3278 void anv_CmdSetEvent(
3279 VkCmdBuffer cmdBuffer,
3280 VkEvent event,
3281 VkPipeEvent pipeEvent)
3282 {
3283 stub();
3284 }
3285
3286 void anv_CmdResetEvent(
3287 VkCmdBuffer cmdBuffer,
3288 VkEvent event,
3289 VkPipeEvent pipeEvent)
3290 {
3291 stub();
3292 }
3293
3294 void anv_CmdWaitEvents(
3295 VkCmdBuffer cmdBuffer,
3296 VkWaitEvent waitEvent,
3297 uint32_t eventCount,
3298 const VkEvent* pEvents,
3299 uint32_t memBarrierCount,
3300 const void** ppMemBarriers)
3301 {
3302 stub();
3303 }
3304
3305 void anv_CmdPipelineBarrier(
3306 VkCmdBuffer cmdBuffer,
3307 VkWaitEvent waitEvent,
3308 uint32_t pipeEventCount,
3309 const VkPipeEvent* pPipeEvents,
3310 uint32_t memBarrierCount,
3311 const void** ppMemBarriers)
3312 {
3313 stub();
3314 }
3315
3316 void anv_CmdInitAtomicCounters(
3317 VkCmdBuffer cmdBuffer,
3318 VkPipelineBindPoint pipelineBindPoint,
3319 uint32_t startCounter,
3320 uint32_t counterCount,
3321 const uint32_t* pData)
3322 {
3323 stub();
3324 }
3325
3326 void anv_CmdLoadAtomicCounters(
3327 VkCmdBuffer cmdBuffer,
3328 VkPipelineBindPoint pipelineBindPoint,
3329 uint32_t startCounter,
3330 uint32_t counterCount,
3331 VkBuffer srcBuffer,
3332 VkDeviceSize srcOffset)
3333 {
3334 stub();
3335 }
3336
3337 void anv_CmdSaveAtomicCounters(
3338 VkCmdBuffer cmdBuffer,
3339 VkPipelineBindPoint pipelineBindPoint,
3340 uint32_t startCounter,
3341 uint32_t counterCount,
3342 VkBuffer destBuffer,
3343 VkDeviceSize destOffset)
3344 {
3345 stub();
3346 }
3347
3348 static void
3349 anv_framebuffer_destroy(struct anv_device *device,
3350 struct anv_object *object,
3351 VkObjectType obj_type)
3352 {
3353 struct anv_framebuffer *fb = (struct anv_framebuffer *)object;
3354
3355 assert(obj_type == VK_OBJECT_TYPE_FRAMEBUFFER);
3356
3357 anv_DestroyObject((VkDevice) device,
3358 VK_OBJECT_TYPE_DYNAMIC_VP_STATE,
3359 fb->vp_state);
3360
3361 anv_device_free(device, fb);
3362 }
3363
3364 VkResult anv_CreateFramebuffer(
3365 VkDevice _device,
3366 const VkFramebufferCreateInfo* pCreateInfo,
3367 VkFramebuffer* pFramebuffer)
3368 {
3369 struct anv_device *device = (struct anv_device *) _device;
3370 struct anv_framebuffer *framebuffer;
3371
3372 static const struct anv_depth_stencil_view null_view =
3373 { .depth_format = D16_UNORM, .depth_stride = 0, .stencil_stride = 0 };
3374
3375 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO);
3376
3377 framebuffer = anv_device_alloc(device, sizeof(*framebuffer), 8,
3378 VK_SYSTEM_ALLOC_TYPE_API_OBJECT);
3379 if (framebuffer == NULL)
3380 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
3381
3382 framebuffer->base.destructor = anv_framebuffer_destroy;
3383
3384 framebuffer->color_attachment_count = pCreateInfo->colorAttachmentCount;
3385 for (uint32_t i = 0; i < pCreateInfo->colorAttachmentCount; i++) {
3386 framebuffer->color_attachments[i] =
3387 (struct anv_surface_view *) pCreateInfo->pColorAttachments[i].view;
3388 }
3389
3390 if (pCreateInfo->pDepthStencilAttachment) {
3391 framebuffer->depth_stencil =
3392 (struct anv_depth_stencil_view *) pCreateInfo->pDepthStencilAttachment->view;
3393 } else {
3394 framebuffer->depth_stencil = &null_view;
3395 }
3396
3397 framebuffer->sample_count = pCreateInfo->sampleCount;
3398 framebuffer->width = pCreateInfo->width;
3399 framebuffer->height = pCreateInfo->height;
3400 framebuffer->layers = pCreateInfo->layers;
3401
3402 vkCreateDynamicViewportState((VkDevice) device,
3403 &(VkDynamicVpStateCreateInfo) {
3404 .sType = VK_STRUCTURE_TYPE_DYNAMIC_VP_STATE_CREATE_INFO,
3405 .viewportAndScissorCount = 1,
3406 .pViewports = (VkViewport[]) {
3407 {
3408 .originX = 0,
3409 .originY = 0,
3410 .width = pCreateInfo->width,
3411 .height = pCreateInfo->height,
3412 .minDepth = 0,
3413 .maxDepth = 1
3414 },
3415 },
3416 .pScissors = (VkRect[]) {
3417 { { 0, 0 },
3418 { pCreateInfo->width, pCreateInfo->height } },
3419 }
3420 },
3421 &framebuffer->vp_state);
3422
3423 *pFramebuffer = (VkFramebuffer) framebuffer;
3424
3425 return VK_SUCCESS;
3426 }
3427
3428 VkResult anv_CreateRenderPass(
3429 VkDevice _device,
3430 const VkRenderPassCreateInfo* pCreateInfo,
3431 VkRenderPass* pRenderPass)
3432 {
3433 struct anv_device *device = (struct anv_device *) _device;
3434 struct anv_render_pass *pass;
3435 size_t size;
3436
3437 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO);
3438
3439 size = sizeof(*pass) +
3440 pCreateInfo->layers * sizeof(struct anv_render_pass_layer);
3441 pass = anv_device_alloc(device, size, 8,
3442 VK_SYSTEM_ALLOC_TYPE_API_OBJECT);
3443 if (pass == NULL)
3444 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
3445
3446 pass->render_area = pCreateInfo->renderArea;
3447
3448 pass->num_layers = pCreateInfo->layers;
3449
3450 pass->num_clear_layers = 0;
3451 for (uint32_t i = 0; i < pCreateInfo->layers; i++) {
3452 pass->layers[i].color_load_op = pCreateInfo->pColorLoadOps[i];
3453 pass->layers[i].clear_color = pCreateInfo->pColorLoadClearValues[i];
3454 if (pass->layers[i].color_load_op == VK_ATTACHMENT_LOAD_OP_CLEAR)
3455 pass->num_clear_layers++;
3456 }
3457
3458 *pRenderPass = (VkRenderPass) pass;
3459
3460 return VK_SUCCESS;
3461 }
3462
3463 static void
3464 anv_cmd_buffer_emit_depth_stencil(struct anv_cmd_buffer *cmd_buffer,
3465 struct anv_render_pass *pass)
3466 {
3467 const struct anv_depth_stencil_view *view =
3468 cmd_buffer->framebuffer->depth_stencil;
3469
3470 /* FIXME: Implement the PMA stall W/A */
3471
3472 anv_batch_emit(&cmd_buffer->batch, GEN8_3DSTATE_DEPTH_BUFFER,
3473 .SurfaceType = SURFTYPE_2D,
3474 .DepthWriteEnable = view->depth_stride > 0,
3475 .StencilWriteEnable = view->stencil_stride > 0,
3476 .HierarchicalDepthBufferEnable = false,
3477 .SurfaceFormat = view->depth_format,
3478 .SurfacePitch = view->depth_stride > 0 ? view->depth_stride - 1 : 0,
3479 .SurfaceBaseAddress = { view->bo, view->depth_offset },
3480 .Height = pass->render_area.extent.height - 1,
3481 .Width = pass->render_area.extent.width - 1,
3482 .LOD = 0,
3483 .Depth = 1 - 1,
3484 .MinimumArrayElement = 0,
3485 .DepthBufferObjectControlState = GEN8_MOCS,
3486 .RenderTargetViewExtent = 1 - 1,
3487 .SurfaceQPitch = 0);
3488
3489 /* Disable hierarchial depth buffers. */
3490 anv_batch_emit(&cmd_buffer->batch, GEN8_3DSTATE_HIER_DEPTH_BUFFER);
3491
3492 anv_batch_emit(&cmd_buffer->batch, GEN8_3DSTATE_STENCIL_BUFFER,
3493 .StencilBufferEnable = view->stencil_stride > 0,
3494 .StencilBufferObjectControlState = GEN8_MOCS,
3495 .SurfacePitch = view->stencil_stride > 0 ? view->stencil_stride - 1 : 0,
3496 .SurfaceBaseAddress = { view->bo, view->stencil_offset },
3497 .SurfaceQPitch = 0);
3498
3499 /* Clear the clear params. */
3500 anv_batch_emit(&cmd_buffer->batch, GEN8_3DSTATE_CLEAR_PARAMS);
3501 }
3502
3503 void anv_CmdBeginRenderPass(
3504 VkCmdBuffer cmdBuffer,
3505 const VkRenderPassBegin* pRenderPassBegin)
3506 {
3507 struct anv_cmd_buffer *cmd_buffer = (struct anv_cmd_buffer *) cmdBuffer;
3508 struct anv_render_pass *pass = (struct anv_render_pass *) pRenderPassBegin->renderPass;
3509 struct anv_framebuffer *framebuffer =
3510 (struct anv_framebuffer *) pRenderPassBegin->framebuffer;
3511
3512 cmd_buffer->framebuffer = framebuffer;
3513
3514 cmd_buffer->descriptors_dirty |= VK_SHADER_STAGE_FRAGMENT_BIT;
3515
3516 anv_batch_emit(&cmd_buffer->batch, GEN8_3DSTATE_DRAWING_RECTANGLE,
3517 .ClippedDrawingRectangleYMin = pass->render_area.offset.y,
3518 .ClippedDrawingRectangleXMin = pass->render_area.offset.x,
3519 .ClippedDrawingRectangleYMax =
3520 pass->render_area.offset.y + pass->render_area.extent.height - 1,
3521 .ClippedDrawingRectangleXMax =
3522 pass->render_area.offset.x + pass->render_area.extent.width - 1,
3523 .DrawingRectangleOriginY = 0,
3524 .DrawingRectangleOriginX = 0);
3525
3526 anv_cmd_buffer_emit_depth_stencil(cmd_buffer, pass);
3527
3528 anv_cmd_buffer_clear(cmd_buffer, pass);
3529 }
3530
3531 void anv_CmdEndRenderPass(
3532 VkCmdBuffer cmdBuffer,
3533 VkRenderPass renderPass)
3534 {
3535 /* Emit a flushing pipe control at the end of a pass. This is kind of a
3536 * hack but it ensures that render targets always actually get written.
3537 * Eventually, we should do flushing based on image format transitions
3538 * or something of that nature.
3539 */
3540 struct anv_cmd_buffer *cmd_buffer = (struct anv_cmd_buffer *)cmdBuffer;
3541 anv_batch_emit(&cmd_buffer->batch, GEN8_PIPE_CONTROL,
3542 .PostSyncOperation = NoWrite,
3543 .RenderTargetCacheFlushEnable = true,
3544 .InstructionCacheInvalidateEnable = true,
3545 .DepthCacheFlushEnable = true,
3546 .VFCacheInvalidationEnable = true,
3547 .TextureCacheInvalidationEnable = true,
3548 .CommandStreamerStallEnable = true);
3549 }
3550
3551 void vkCmdDbgMarkerBegin(
3552 VkCmdBuffer cmdBuffer,
3553 const char* pMarker)
3554 __attribute__ ((visibility ("default")));
3555
3556 void vkCmdDbgMarkerEnd(
3557 VkCmdBuffer cmdBuffer)
3558 __attribute__ ((visibility ("default")));
3559
3560 VkResult vkDbgSetObjectTag(
3561 VkDevice device,
3562 VkObject object,
3563 size_t tagSize,
3564 const void* pTag)
3565 __attribute__ ((visibility ("default")));
3566
3567
3568 void vkCmdDbgMarkerBegin(
3569 VkCmdBuffer cmdBuffer,
3570 const char* pMarker)
3571 {
3572 }
3573
3574 void vkCmdDbgMarkerEnd(
3575 VkCmdBuffer cmdBuffer)
3576 {
3577 }
3578
3579 VkResult vkDbgSetObjectTag(
3580 VkDevice device,
3581 VkObject object,
3582 size_t tagSize,
3583 const void* pTag)
3584 {
3585 return VK_SUCCESS;
3586 }