2 * Copyright © 2015 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
33 anv_env_get_int(const char *name
)
35 const char *val
= getenv(name
);
40 return strtol(val
, NULL
, 0);
44 fill_physical_device(struct anv_physical_device
*device
,
45 struct anv_instance
*instance
,
50 fd
= open("/dev/dri/renderD128", O_RDWR
| O_CLOEXEC
);
52 return vk_error(VK_ERROR_UNAVAILABLE
);
54 device
->instance
= instance
;
57 device
->chipset_id
= anv_env_get_int("INTEL_DEVID_OVERRIDE");
58 device
->no_hw
= false;
59 if (device
->chipset_id
) {
60 /* INTEL_DEVID_OVERRIDE implies INTEL_NO_HW. */
63 device
->chipset_id
= anv_gem_get_param(fd
, I915_PARAM_CHIPSET_ID
);
65 if (!device
->chipset_id
)
68 device
->name
= brw_get_device_name(device
->chipset_id
);
69 device
->info
= brw_get_device_info(device
->chipset_id
, -1);
73 if (!anv_gem_get_param(fd
, I915_PARAM_HAS_WAIT_TIMEOUT
))
76 if (!anv_gem_get_param(fd
, I915_PARAM_HAS_EXECBUF2
))
79 if (!anv_gem_get_param(fd
, I915_PARAM_HAS_LLC
))
82 if (!anv_gem_get_param(fd
, I915_PARAM_HAS_EXEC_CONSTANTS
))
92 return vk_error(VK_ERROR_UNAVAILABLE
);
95 static void *default_alloc(
99 VkSystemAllocType allocType
)
104 static void default_free(
111 static const VkAllocCallbacks default_alloc_callbacks
= {
113 .pfnAlloc
= default_alloc
,
114 .pfnFree
= default_free
117 VkResult VKAPI
vkCreateInstance(
118 const VkInstanceCreateInfo
* pCreateInfo
,
119 VkInstance
* pInstance
)
121 struct anv_instance
*instance
;
122 const VkAllocCallbacks
*alloc_callbacks
= &default_alloc_callbacks
;
123 void *user_data
= NULL
;
126 assert(pCreateInfo
->sType
== VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO
);
128 if (pCreateInfo
->pAllocCb
) {
129 alloc_callbacks
= pCreateInfo
->pAllocCb
;
130 user_data
= pCreateInfo
->pAllocCb
->pUserData
;
132 instance
= alloc_callbacks
->pfnAlloc(user_data
, sizeof(*instance
), 8,
133 VK_SYSTEM_ALLOC_TYPE_API_OBJECT
);
135 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
137 instance
->pAllocUserData
= alloc_callbacks
->pUserData
;
138 instance
->pfnAlloc
= alloc_callbacks
->pfnAlloc
;
139 instance
->pfnFree
= alloc_callbacks
->pfnFree
;
140 instance
->apiVersion
= pCreateInfo
->pAppInfo
->apiVersion
;
142 instance
->physicalDeviceCount
= 0;
143 result
= fill_physical_device(&instance
->physicalDevice
,
144 instance
, "/dev/dri/renderD128");
145 if (result
== VK_SUCCESS
)
146 instance
->physicalDeviceCount
++;
148 *pInstance
= (VkInstance
) instance
;
153 VkResult VKAPI
vkDestroyInstance(
154 VkInstance _instance
)
156 struct anv_instance
*instance
= (struct anv_instance
*) _instance
;
158 instance
->pfnFree(instance
->pAllocUserData
, instance
);
163 VkResult VKAPI
vkEnumeratePhysicalDevices(
164 VkInstance _instance
,
165 uint32_t* pPhysicalDeviceCount
,
166 VkPhysicalDevice
* pPhysicalDevices
)
168 struct anv_instance
*instance
= (struct anv_instance
*) _instance
;
170 if (*pPhysicalDeviceCount
>= 1)
171 pPhysicalDevices
[0] = (VkPhysicalDevice
) &instance
->physicalDevice
;
172 *pPhysicalDeviceCount
= instance
->physicalDeviceCount
;
177 VkResult VKAPI
vkGetPhysicalDeviceInfo(
178 VkPhysicalDevice physicalDevice
,
179 VkPhysicalDeviceInfoType infoType
,
183 struct anv_physical_device
*device
= (struct anv_physical_device
*) physicalDevice
;
184 VkPhysicalDeviceProperties
*properties
;
185 VkPhysicalDevicePerformance
*performance
;
186 VkPhysicalDeviceQueueProperties
*queue_properties
;
187 VkPhysicalDeviceMemoryProperties
*memory_properties
;
188 uint64_t ns_per_tick
= 80;
191 case VK_PHYSICAL_DEVICE_INFO_TYPE_PROPERTIES
:
193 assert(*pDataSize
>= sizeof(*properties
));
194 *pDataSize
= sizeof(*properties
); /* Assuming we have to return the size of our struct. */
196 properties
->apiVersion
= 1;
197 properties
->driverVersion
= 1;
198 properties
->vendorId
= 0x8086;
199 properties
->deviceId
= device
->chipset_id
;
200 properties
->deviceType
= VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU
;
201 strcpy(properties
->deviceName
, device
->name
);
202 properties
->maxInlineMemoryUpdateSize
= 0;
203 properties
->maxBoundDescriptorSets
= 0;
204 properties
->maxThreadGroupSize
= 0;
205 properties
->timestampFrequency
= 1000 * 1000 * 1000 / ns_per_tick
;
206 properties
->multiColorAttachmentClears
= 0;
207 properties
->maxDescriptorSets
= 2;
208 properties
->maxViewports
= 16;
209 properties
->maxColorAttachments
= 8;
212 case VK_PHYSICAL_DEVICE_INFO_TYPE_PERFORMANCE
:
214 assert(*pDataSize
>= sizeof(*performance
));
215 *pDataSize
= sizeof(*performance
); /* Assuming we have to return the size of our struct. */
217 performance
->maxDeviceClock
= 1.0;
218 performance
->aluPerClock
= 1.0;
219 performance
->texPerClock
= 1.0;
220 performance
->primsPerClock
= 1.0;
221 performance
->pixelsPerClock
= 1.0;
224 case VK_PHYSICAL_DEVICE_INFO_TYPE_QUEUE_PROPERTIES
:
225 queue_properties
= pData
;
226 assert(*pDataSize
>= sizeof(*queue_properties
));
227 *pDataSize
= sizeof(*queue_properties
);
229 queue_properties
->queueFlags
= 0;
230 queue_properties
->queueCount
= 1;
231 queue_properties
->maxAtomicCounters
= 0;
232 queue_properties
->supportsTimestamps
= 0;
233 queue_properties
->maxMemReferences
= 0;
236 case VK_PHYSICAL_DEVICE_INFO_TYPE_MEMORY_PROPERTIES
:
237 memory_properties
= pData
;
238 assert(*pDataSize
>= sizeof(*memory_properties
));
239 *pDataSize
= sizeof(*memory_properties
);
241 memory_properties
->supportsMigration
= false;
242 memory_properties
->supportsPinning
= false;
246 return VK_UNSUPPORTED
;
251 void * vkGetProcAddr(
252 VkPhysicalDevice physicalDevice
,
259 parse_debug_flags(struct anv_device
*device
)
261 const char *debug
, *p
, *end
;
263 debug
= getenv("INTEL_DEBUG");
264 device
->dump_aub
= false;
266 for (p
= debug
; *p
; p
= end
+ 1) {
267 end
= strchrnul(p
, ',');
268 if (end
- p
== 3 && memcmp(p
, "aub", 3) == 0)
269 device
->dump_aub
= true;
270 if (end
- p
== 5 && memcmp(p
, "no_hw", 5) == 0)
271 device
->no_hw
= true;
278 VkResult VKAPI
vkCreateDevice(
279 VkPhysicalDevice _physicalDevice
,
280 const VkDeviceCreateInfo
* pCreateInfo
,
283 struct anv_physical_device
*physicalDevice
=
284 (struct anv_physical_device
*) _physicalDevice
;
285 struct anv_instance
*instance
= physicalDevice
->instance
;
286 struct anv_device
*device
;
288 assert(pCreateInfo
->sType
== VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO
);
290 device
= instance
->pfnAlloc(instance
->pAllocUserData
,
292 VK_SYSTEM_ALLOC_TYPE_API_OBJECT
);
294 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
296 device
->no_hw
= physicalDevice
->no_hw
;
297 parse_debug_flags(device
);
299 device
->instance
= physicalDevice
->instance
;
300 device
->fd
= open("/dev/dri/renderD128", O_RDWR
| O_CLOEXEC
);
301 if (device
->fd
== -1)
304 device
->context_id
= anv_gem_create_context(device
);
305 if (device
->context_id
== -1)
308 anv_block_pool_init(&device
->dynamic_state_block_pool
, device
, 2048);
310 anv_state_pool_init(&device
->dynamic_state_pool
,
311 &device
->dynamic_state_block_pool
);
313 anv_block_pool_init(&device
->instruction_block_pool
, device
, 2048);
314 anv_block_pool_init(&device
->surface_state_block_pool
, device
, 2048);
316 anv_state_pool_init(&device
->surface_state_pool
,
317 &device
->surface_state_block_pool
);
319 device
->compiler
= anv_compiler_create(device
->fd
);
320 device
->aub_writer
= NULL
;
322 device
->info
= *physicalDevice
->info
;
324 pthread_mutex_init(&device
->mutex
, NULL
);
326 anv_device_init_meta(device
);
328 *pDevice
= (VkDevice
) device
;
335 anv_device_free(device
, device
);
337 return vk_error(VK_ERROR_UNAVAILABLE
);
340 VkResult VKAPI
vkDestroyDevice(
343 struct anv_device
*device
= (struct anv_device
*) _device
;
345 anv_compiler_destroy(device
->compiler
);
347 anv_block_pool_finish(&device
->dynamic_state_block_pool
);
348 anv_block_pool_finish(&device
->instruction_block_pool
);
349 anv_block_pool_finish(&device
->surface_state_block_pool
);
353 if (device
->aub_writer
)
354 anv_aub_writer_destroy(device
->aub_writer
);
356 anv_device_free(device
, device
);
361 VkResult VKAPI
vkGetGlobalExtensionInfo(
362 VkExtensionInfoType infoType
,
363 uint32_t extensionIndex
,
370 case VK_EXTENSION_INFO_TYPE_COUNT
:
372 assert(*pDataSize
== 4);
376 case VK_EXTENSION_INFO_TYPE_PROPERTIES
:
377 return vk_error(VK_ERROR_INVALID_EXTENSION
);
380 return VK_UNSUPPORTED
;
384 VkResult VKAPI
vkGetPhysicalDeviceExtensionInfo(
385 VkPhysicalDevice physicalDevice
,
386 VkExtensionInfoType infoType
,
387 uint32_t extensionIndex
,
394 case VK_EXTENSION_INFO_TYPE_COUNT
:
396 assert(*pDataSize
== 4);
400 case VK_EXTENSION_INFO_TYPE_PROPERTIES
:
401 return vk_error(VK_ERROR_INVALID_EXTENSION
);
404 return VK_UNSUPPORTED
;
408 VkResult VKAPI
vkEnumerateLayers(
409 VkPhysicalDevice physicalDevice
,
410 size_t maxStringSize
,
412 char* const* pOutLayers
,
420 VkResult VKAPI
vkGetDeviceQueue(
422 uint32_t queueNodeIndex
,
426 struct anv_device
*device
= (struct anv_device
*) _device
;
427 struct anv_queue
*queue
;
429 /* FIXME: Should allocate these at device create time. */
431 queue
= anv_device_alloc(device
, sizeof(*queue
), 8,
432 VK_SYSTEM_ALLOC_TYPE_API_OBJECT
);
434 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
436 queue
->device
= device
;
437 queue
->pool
= &device
->surface_state_pool
;
439 queue
->completed_serial
= anv_state_pool_alloc(queue
->pool
, 4, 4);
440 *(uint32_t *)queue
->completed_serial
.map
= 0;
441 queue
->next_serial
= 1;
443 *pQueue
= (VkQueue
) queue
;
448 static const uint32_t BATCH_SIZE
= 8192;
451 anv_batch_init(struct anv_batch
*batch
, struct anv_device
*device
)
455 result
= anv_bo_init_new(&batch
->bo
, device
, BATCH_SIZE
);
456 if (result
!= VK_SUCCESS
)
460 anv_gem_mmap(device
, batch
->bo
.gem_handle
, 0, BATCH_SIZE
);
461 if (batch
->bo
.map
== NULL
) {
462 anv_gem_close(device
, batch
->bo
.gem_handle
);
463 return vk_error(VK_ERROR_MEMORY_MAP_FAILED
);
466 batch
->cmd_relocs
.num_relocs
= 0;
467 batch
->surf_relocs
.num_relocs
= 0;
468 batch
->next
= batch
->bo
.map
;
474 anv_batch_finish(struct anv_batch
*batch
, struct anv_device
*device
)
476 anv_gem_munmap(batch
->bo
.map
, BATCH_SIZE
);
477 anv_gem_close(device
, batch
->bo
.gem_handle
);
481 anv_batch_reset(struct anv_batch
*batch
)
483 batch
->next
= batch
->bo
.map
;
484 batch
->cmd_relocs
.num_relocs
= 0;
485 batch
->surf_relocs
.num_relocs
= 0;
489 anv_batch_emit_dwords(struct anv_batch
*batch
, int num_dwords
)
491 void *p
= batch
->next
;
493 batch
->next
+= num_dwords
* 4;
499 anv_reloc_list_append(struct anv_reloc_list
*list
,
500 struct anv_reloc_list
*other
, uint32_t offset
)
504 count
= list
->num_relocs
;
505 memcpy(&list
->relocs
[count
], &other
->relocs
[0],
506 other
->num_relocs
* sizeof(other
->relocs
[0]));
507 memcpy(&list
->reloc_bos
[count
], &other
->reloc_bos
[0],
508 other
->num_relocs
* sizeof(other
->reloc_bos
[0]));
509 for (i
= 0; i
< other
->num_relocs
; i
++)
510 list
->relocs
[i
+ count
].offset
+= offset
;
512 count
+= other
->num_relocs
;
516 anv_reloc_list_add(struct anv_reloc_list
*list
,
518 struct anv_bo
*target_bo
, uint32_t delta
)
520 struct drm_i915_gem_relocation_entry
*entry
;
523 assert(list
->num_relocs
< ANV_BATCH_MAX_RELOCS
);
525 /* XXX: Can we use I915_EXEC_HANDLE_LUT? */
526 index
= list
->num_relocs
++;
527 list
->reloc_bos
[index
] = target_bo
;
528 entry
= &list
->relocs
[index
];
529 entry
->target_handle
= target_bo
->gem_handle
;
530 entry
->delta
= delta
;
531 entry
->offset
= offset
;
532 entry
->presumed_offset
= target_bo
->offset
;
533 entry
->read_domains
= 0;
534 entry
->write_domain
= 0;
536 return target_bo
->offset
+ delta
;
540 anv_batch_emit_batch(struct anv_batch
*batch
, struct anv_batch
*other
)
542 uint32_t size
, offset
;
544 size
= other
->next
- other
->bo
.map
;
545 memcpy(batch
->next
, other
->bo
.map
, size
);
547 offset
= batch
->next
- batch
->bo
.map
;
548 anv_reloc_list_append(&batch
->cmd_relocs
, &other
->cmd_relocs
, offset
);
549 anv_reloc_list_append(&batch
->surf_relocs
, &other
->surf_relocs
, offset
);
555 anv_batch_emit_reloc(struct anv_batch
*batch
,
556 void *location
, struct anv_bo
*bo
, uint32_t delta
)
558 return anv_reloc_list_add(&batch
->cmd_relocs
,
559 location
- batch
->bo
.map
, bo
, delta
);
562 VkResult VKAPI
vkQueueSubmit(
564 uint32_t cmdBufferCount
,
565 const VkCmdBuffer
* pCmdBuffers
,
568 struct anv_queue
*queue
= (struct anv_queue
*) _queue
;
569 struct anv_device
*device
= queue
->device
;
572 for (uint32_t i
= 0; i
< cmdBufferCount
; i
++) {
573 struct anv_cmd_buffer
*cmd_buffer
=
574 (struct anv_cmd_buffer
*) pCmdBuffers
[i
];
576 if (device
->dump_aub
)
577 anv_cmd_buffer_dump(cmd_buffer
);
579 if (!device
->no_hw
) {
580 ret
= anv_gem_execbuffer(device
, &cmd_buffer
->execbuf
);
582 return vk_error(VK_ERROR_UNKNOWN
);
584 for (uint32_t i
= 0; i
< cmd_buffer
->bo_count
; i
++)
585 cmd_buffer
->exec2_bos
[i
]->offset
= cmd_buffer
->exec2_objects
[i
].offset
;
587 *(uint32_t *)queue
->completed_serial
.map
= cmd_buffer
->serial
;
594 VkResult VKAPI
vkQueueAddMemReferences(
597 const VkDeviceMemory
* pMems
)
602 VkResult
vkQueueRemoveMemReferences(
605 const VkDeviceMemory
* pMems
)
610 VkResult VKAPI
vkQueueWaitIdle(
613 struct anv_queue
*queue
= (struct anv_queue
*) _queue
;
615 return vkDeviceWaitIdle((VkDevice
) queue
->device
);
618 VkResult VKAPI
vkDeviceWaitIdle(
621 struct anv_device
*device
= (struct anv_device
*) _device
;
622 struct anv_state state
;
623 struct anv_batch batch
;
624 struct drm_i915_gem_execbuffer2 execbuf
;
625 struct drm_i915_gem_exec_object2 exec2_objects
[1];
626 struct anv_bo
*bo
= NULL
;
631 state
= anv_state_pool_alloc(&device
->dynamic_state_pool
, 32, 32);
632 bo
= &device
->dynamic_state_pool
.block_pool
->bo
;
633 batch
.next
= state
.map
;
634 anv_batch_emit(&batch
, GEN8_MI_BATCH_BUFFER_END
);
635 anv_batch_emit(&batch
, GEN8_MI_NOOP
);
637 exec2_objects
[0].handle
= bo
->gem_handle
;
638 exec2_objects
[0].relocation_count
= 0;
639 exec2_objects
[0].relocs_ptr
= 0;
640 exec2_objects
[0].alignment
= 0;
641 exec2_objects
[0].offset
= bo
->offset
;
642 exec2_objects
[0].flags
= 0;
643 exec2_objects
[0].rsvd1
= 0;
644 exec2_objects
[0].rsvd2
= 0;
646 execbuf
.buffers_ptr
= (uintptr_t) exec2_objects
;
647 execbuf
.buffer_count
= 1;
648 execbuf
.batch_start_offset
= state
.offset
;
649 execbuf
.batch_len
= batch
.next
- state
.map
;
650 execbuf
.cliprects_ptr
= 0;
651 execbuf
.num_cliprects
= 0;
656 I915_EXEC_HANDLE_LUT
| I915_EXEC_NO_RELOC
| I915_EXEC_RENDER
;
657 execbuf
.rsvd1
= device
->context_id
;
660 if (!device
->no_hw
) {
661 ret
= anv_gem_execbuffer(device
, &execbuf
);
663 result
= vk_error(VK_ERROR_UNKNOWN
);
668 ret
= anv_gem_wait(device
, bo
->gem_handle
, &timeout
);
670 result
= vk_error(VK_ERROR_UNKNOWN
);
675 anv_state_pool_free(&device
->dynamic_state_pool
, state
);
680 anv_state_pool_free(&device
->dynamic_state_pool
, state
);
686 anv_device_alloc(struct anv_device
* device
,
689 VkSystemAllocType allocType
)
691 return device
->instance
->pfnAlloc(device
->instance
->pAllocUserData
,
698 anv_device_free(struct anv_device
* device
,
701 return device
->instance
->pfnFree(device
->instance
->pAllocUserData
,
706 anv_bo_init_new(struct anv_bo
*bo
, struct anv_device
*device
, uint64_t size
)
708 bo
->gem_handle
= anv_gem_create(device
, size
);
710 return vk_error(VK_ERROR_OUT_OF_DEVICE_MEMORY
);
720 VkResult VKAPI
vkAllocMemory(
722 const VkMemoryAllocInfo
* pAllocInfo
,
723 VkDeviceMemory
* pMem
)
725 struct anv_device
*device
= (struct anv_device
*) _device
;
726 struct anv_device_memory
*mem
;
729 assert(pAllocInfo
->sType
== VK_STRUCTURE_TYPE_MEMORY_ALLOC_INFO
);
731 mem
= anv_device_alloc(device
, sizeof(*mem
), 8,
732 VK_SYSTEM_ALLOC_TYPE_API_OBJECT
);
734 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
736 result
= anv_bo_init_new(&mem
->bo
, device
, pAllocInfo
->allocationSize
);
737 if (result
!= VK_SUCCESS
)
740 *pMem
= (VkDeviceMemory
) mem
;
745 anv_device_free(device
, mem
);
750 VkResult VKAPI
vkFreeMemory(
754 struct anv_device
*device
= (struct anv_device
*) _device
;
755 struct anv_device_memory
*mem
= (struct anv_device_memory
*) _mem
;
758 anv_gem_munmap(mem
->bo
.map
, mem
->bo
.size
);
760 if (mem
->bo
.gem_handle
!= 0)
761 anv_gem_close(device
, mem
->bo
.gem_handle
);
763 anv_device_free(device
, mem
);
768 VkResult VKAPI
vkSetMemoryPriority(
771 VkMemoryPriority priority
)
776 VkResult VKAPI
vkMapMemory(
781 VkMemoryMapFlags flags
,
784 struct anv_device
*device
= (struct anv_device
*) _device
;
785 struct anv_device_memory
*mem
= (struct anv_device_memory
*) _mem
;
787 /* FIXME: Is this supposed to be thread safe? Since vkUnmapMemory() only
788 * takes a VkDeviceMemory pointer, it seems like only one map of the memory
789 * at a time is valid. We could just mmap up front and return an offset
790 * pointer here, but that may exhaust virtual memory on 32 bit
793 mem
->map
= anv_gem_mmap(device
, mem
->bo
.gem_handle
, offset
, size
);
794 mem
->map_size
= size
;
801 VkResult VKAPI
vkUnmapMemory(
805 struct anv_device_memory
*mem
= (struct anv_device_memory
*) _mem
;
807 anv_gem_munmap(mem
->map
, mem
->map_size
);
812 VkResult VKAPI
vkFlushMappedMemory(
818 /* clflush here for !llc platforms */
823 VkResult VKAPI
vkPinSystemMemory(
827 VkDeviceMemory
* pMem
)
832 VkResult VKAPI
vkGetMultiDeviceCompatibility(
833 VkPhysicalDevice physicalDevice0
,
834 VkPhysicalDevice physicalDevice1
,
835 VkPhysicalDeviceCompatibilityInfo
* pInfo
)
837 return VK_UNSUPPORTED
;
840 VkResult VKAPI
vkOpenSharedMemory(
842 const VkMemoryOpenInfo
* pOpenInfo
,
843 VkDeviceMemory
* pMem
)
845 return VK_UNSUPPORTED
;
848 VkResult VKAPI
vkOpenSharedSemaphore(
850 const VkSemaphoreOpenInfo
* pOpenInfo
,
851 VkSemaphore
* pSemaphore
)
853 return VK_UNSUPPORTED
;
856 VkResult VKAPI
vkOpenPeerMemory(
858 const VkPeerMemoryOpenInfo
* pOpenInfo
,
859 VkDeviceMemory
* pMem
)
861 return VK_UNSUPPORTED
;
864 VkResult VKAPI
vkOpenPeerImage(
866 const VkPeerImageOpenInfo
* pOpenInfo
,
868 VkDeviceMemory
* pMem
)
870 return VK_UNSUPPORTED
;
874 anv_instance_destructor(struct anv_device
* device
,
877 return vkDestroyInstance(object
);
881 anv_noop_destructor(struct anv_device
* device
,
888 anv_device_destructor(struct anv_device
* device
,
891 return vkDestroyDevice(object
);
895 anv_cmd_buffer_destructor(struct anv_device
* device
,
898 struct anv_cmd_buffer
*cmd_buffer
= (struct anv_cmd_buffer
*) object
;
900 anv_state_stream_finish(&cmd_buffer
->surface_state_stream
);
901 anv_state_stream_finish(&cmd_buffer
->dynamic_state_stream
);
902 anv_batch_finish(&cmd_buffer
->batch
, device
);
903 anv_device_free(device
, cmd_buffer
->exec2_objects
);
904 anv_device_free(device
, cmd_buffer
->exec2_bos
);
905 anv_device_free(device
, cmd_buffer
);
911 anv_pipeline_destructor(struct anv_device
* device
,
914 struct anv_pipeline
*pipeline
= (struct anv_pipeline
*) object
;
916 return anv_pipeline_destroy(pipeline
);
920 anv_free_destructor(struct anv_device
* device
,
923 anv_device_free(device
, (void *) object
);
928 static VkResult (*anv_object_destructors
[])(struct anv_device
*device
,
930 [VK_OBJECT_TYPE_INSTANCE
] = anv_instance_destructor
,
931 [VK_OBJECT_TYPE_PHYSICAL_DEVICE
] = anv_noop_destructor
,
932 [VK_OBJECT_TYPE_DEVICE
] = anv_device_destructor
,
933 [VK_OBJECT_TYPE_QUEUE
] = anv_noop_destructor
,
934 [VK_OBJECT_TYPE_COMMAND_BUFFER
] = anv_cmd_buffer_destructor
,
935 [VK_OBJECT_TYPE_PIPELINE
] = anv_pipeline_destructor
,
936 [VK_OBJECT_TYPE_SHADER
] = anv_free_destructor
,
937 [VK_OBJECT_TYPE_BUFFER
] = anv_free_destructor
,
938 [VK_OBJECT_TYPE_IMAGE
] = anv_free_destructor
,
939 [VK_OBJECT_TYPE_RENDER_PASS
] = anv_free_destructor
942 VkResult VKAPI
vkDestroyObject(
944 VkObjectType objType
,
947 struct anv_device
*device
= (struct anv_device
*) _device
;
949 assert(objType
< ARRAY_SIZE(anv_object_destructors
) &&
950 anv_object_destructors
[objType
] != NULL
);
952 return anv_object_destructors
[objType
](device
, object
);
956 fill_memory_requirements(
957 VkObjectType objType
,
959 VkMemoryRequirements
* memory_requirements
)
961 struct anv_buffer
*buffer
;
962 struct anv_image
*image
;
964 memory_requirements
->memPropsAllowed
=
965 VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT
|
966 VK_MEMORY_PROPERTY_HOST_DEVICE_COHERENT_BIT
|
967 /* VK_MEMORY_PROPERTY_HOST_UNCACHED_BIT | */
968 VK_MEMORY_PROPERTY_HOST_WRITE_COMBINED_BIT
|
969 VK_MEMORY_PROPERTY_PREFER_HOST_LOCAL
|
970 VK_MEMORY_PROPERTY_SHAREABLE_BIT
;
972 memory_requirements
->memPropsRequired
= 0;
975 case VK_OBJECT_TYPE_BUFFER
:
976 buffer
= (struct anv_buffer
*) object
;
977 memory_requirements
->size
= buffer
->size
;
978 memory_requirements
->alignment
= 16;
980 case VK_OBJECT_TYPE_IMAGE
:
981 image
= (struct anv_image
*) object
;
982 memory_requirements
->size
= image
->size
;
983 memory_requirements
->alignment
= image
->alignment
;
986 memory_requirements
->size
= 0;
991 VkResult VKAPI
vkGetObjectInfo(
993 VkObjectType objType
,
995 VkObjectInfoType infoType
,
999 VkMemoryRequirements memory_requirements
;
1002 case VK_OBJECT_INFO_TYPE_MEMORY_REQUIREMENTS
:
1003 fill_memory_requirements(objType
, object
, &memory_requirements
);
1004 memcpy(pData
, &memory_requirements
,
1005 MIN2(*pDataSize
, sizeof(memory_requirements
)));
1006 *pDataSize
= sizeof(memory_requirements
);
1009 case VK_OBJECT_INFO_TYPE_MEMORY_ALLOCATION_COUNT
:
1011 return VK_UNSUPPORTED
;
1016 VkResult VKAPI
vkQueueBindObjectMemory(
1018 VkObjectType objType
,
1020 uint32_t allocationIdx
,
1021 VkDeviceMemory _mem
,
1022 VkDeviceSize memOffset
)
1024 struct anv_buffer
*buffer
;
1025 struct anv_image
*image
;
1026 struct anv_device_memory
*mem
= (struct anv_device_memory
*) _mem
;
1029 case VK_OBJECT_TYPE_BUFFER
:
1030 buffer
= (struct anv_buffer
*) object
;
1031 buffer
->bo
= &mem
->bo
;
1032 buffer
->offset
= memOffset
;
1034 case VK_OBJECT_TYPE_IMAGE
:
1035 image
= (struct anv_image
*) object
;
1036 image
->bo
= &mem
->bo
;
1037 image
->offset
= memOffset
;
1046 VkResult VKAPI
vkQueueBindObjectMemoryRange(
1048 VkObjectType objType
,
1050 uint32_t allocationIdx
,
1051 VkDeviceSize rangeOffset
,
1052 VkDeviceSize rangeSize
,
1054 VkDeviceSize memOffset
)
1056 stub_return(VK_UNSUPPORTED
);
1059 VkResult
vkQueueBindImageMemoryRange(
1062 uint32_t allocationIdx
,
1063 const VkImageMemoryBindInfo
* pBindInfo
,
1065 VkDeviceSize memOffset
)
1067 stub_return(VK_UNSUPPORTED
);
1070 VkResult VKAPI
vkCreateFence(
1072 const VkFenceCreateInfo
* pCreateInfo
,
1075 stub_return(VK_UNSUPPORTED
);
1078 VkResult VKAPI
vkResetFences(
1080 uint32_t fenceCount
,
1083 stub_return(VK_UNSUPPORTED
);
1086 VkResult VKAPI
vkGetFenceStatus(
1090 stub_return(VK_UNSUPPORTED
);
1093 VkResult VKAPI
vkWaitForFences(
1095 uint32_t fenceCount
,
1096 const VkFence
* pFences
,
1100 stub_return(VK_UNSUPPORTED
);
1103 // Queue semaphore functions
1105 VkResult VKAPI
vkCreateSemaphore(
1107 const VkSemaphoreCreateInfo
* pCreateInfo
,
1108 VkSemaphore
* pSemaphore
)
1110 stub_return(VK_UNSUPPORTED
);
1113 VkResult VKAPI
vkQueueSignalSemaphore(
1115 VkSemaphore semaphore
)
1117 stub_return(VK_UNSUPPORTED
);
1120 VkResult VKAPI
vkQueueWaitSemaphore(
1122 VkSemaphore semaphore
)
1124 stub_return(VK_UNSUPPORTED
);
1129 VkResult VKAPI
vkCreateEvent(
1131 const VkEventCreateInfo
* pCreateInfo
,
1134 stub_return(VK_UNSUPPORTED
);
1137 VkResult VKAPI
vkGetEventStatus(
1141 stub_return(VK_UNSUPPORTED
);
1144 VkResult VKAPI
vkSetEvent(
1148 stub_return(VK_UNSUPPORTED
);
1151 VkResult VKAPI
vkResetEvent(
1155 stub_return(VK_UNSUPPORTED
);
1160 struct anv_query_pool
{
1166 VkResult VKAPI
vkCreateQueryPool(
1168 const VkQueryPoolCreateInfo
* pCreateInfo
,
1169 VkQueryPool
* pQueryPool
)
1171 struct anv_device
*device
= (struct anv_device
*) _device
;
1172 struct anv_query_pool
*pool
;
1175 assert(pCreateInfo
->sType
== VK_STRUCTURE_TYPE_QUERY_POOL_CREATE_INFO
);
1177 pool
= anv_device_alloc(device
, sizeof(*pool
), 8,
1178 VK_SYSTEM_ALLOC_TYPE_API_OBJECT
);
1180 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
1182 pool
->type
= pCreateInfo
->queryType
;
1183 result
= anv_bo_init_new(&pool
->bo
, device
, pCreateInfo
->slots
* 16);
1184 if (result
!= VK_SUCCESS
)
1187 *pQueryPool
= (VkQueryPool
) pool
;
1192 anv_device_free(device
, pool
);
1197 VkResult VKAPI
vkGetQueryPoolResults(
1199 VkQueryPool queryPool
,
1200 uint32_t startQuery
,
1201 uint32_t queryCount
,
1204 VkQueryResultFlags flags
)
1206 stub_return(VK_UNSUPPORTED
);
1209 // Format capabilities
1211 VkResult VKAPI
vkGetFormatInfo(
1214 VkFormatInfoType infoType
,
1218 stub_return(VK_UNSUPPORTED
);
1223 VkResult VKAPI
vkCreateBuffer(
1225 const VkBufferCreateInfo
* pCreateInfo
,
1228 struct anv_device
*device
= (struct anv_device
*) _device
;
1229 struct anv_buffer
*buffer
;
1231 assert(pCreateInfo
->sType
== VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO
);
1233 buffer
= anv_device_alloc(device
, sizeof(*buffer
), 8,
1234 VK_SYSTEM_ALLOC_TYPE_API_OBJECT
);
1236 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
1238 buffer
->size
= pCreateInfo
->size
;
1242 *pBuffer
= (VkBuffer
) buffer
;
1247 // Buffer view functions
1249 VkResult VKAPI
vkCreateBufferView(
1251 const VkBufferViewCreateInfo
* pCreateInfo
,
1252 VkBufferView
* pView
)
1254 struct anv_device
*device
= (struct anv_device
*) _device
;
1255 struct anv_buffer
*buffer
= (struct anv_buffer
*) pCreateInfo
->buffer
;
1256 struct anv_surface_view
*view
;
1257 const struct anv_format
*format
;
1259 assert(pCreateInfo
->sType
== VK_STRUCTURE_TYPE_BUFFER_VIEW_CREATE_INFO
);
1261 view
= anv_device_alloc(device
, sizeof(*view
), 8,
1262 VK_SYSTEM_ALLOC_TYPE_API_OBJECT
);
1264 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
1266 view
->bo
= buffer
->bo
;
1267 view
->offset
= buffer
->offset
+ pCreateInfo
->offset
;
1268 view
->surface_state
=
1269 anv_state_pool_alloc(&device
->surface_state_pool
, 64, 64);
1270 view
->format
= pCreateInfo
->format
;
1272 format
= anv_format_for_vk_format(pCreateInfo
->format
);
1273 /* This assumes RGBA float format. */
1274 uint32_t stride
= 4;
1275 uint32_t num_elements
= pCreateInfo
->range
/ stride
;
1276 struct GEN8_RENDER_SURFACE_STATE surface_state
= {
1277 .SurfaceType
= SURFTYPE_BUFFER
,
1278 .SurfaceArray
= false,
1279 .SurfaceFormat
= format
->format
,
1280 .SurfaceVerticalAlignment
= VALIGN4
,
1281 .SurfaceHorizontalAlignment
= HALIGN4
,
1283 .VerticalLineStride
= 0,
1284 .VerticalLineStrideOffset
= 0,
1285 .SamplerL2BypassModeDisable
= true,
1286 .RenderCacheReadWriteMode
= WriteOnlyCache
,
1287 .MemoryObjectControlState
= 0, /* FIXME: MOCS */
1290 .Height
= (num_elements
>> 7) & 0x3fff,
1291 .Width
= num_elements
& 0x7f,
1292 .Depth
= (num_elements
>> 21) & 0x3f,
1293 .SurfacePitch
= stride
- 1,
1294 .MinimumArrayElement
= 0,
1295 .NumberofMultisamples
= MULTISAMPLECOUNT_1
,
1300 .AuxiliarySurfaceMode
= AUX_NONE
,
1302 .GreenClearColor
= 0,
1303 .BlueClearColor
= 0,
1304 .AlphaClearColor
= 0,
1305 .ShaderChannelSelectRed
= SCS_RED
,
1306 .ShaderChannelSelectGreen
= SCS_GREEN
,
1307 .ShaderChannelSelectBlue
= SCS_BLUE
,
1308 .ShaderChannelSelectAlpha
= SCS_ALPHA
,
1309 .ResourceMinLOD
= 0,
1310 /* FIXME: We assume that the image must be bound at this time. */
1311 .SurfaceBaseAddress
= { NULL
, view
->offset
},
1314 GEN8_RENDER_SURFACE_STATE_pack(NULL
, view
->surface_state
.map
, &surface_state
);
1316 *pView
= (VkImageView
) view
;
1321 // Sampler functions
1323 VkResult VKAPI
vkCreateSampler(
1325 const VkSamplerCreateInfo
* pCreateInfo
,
1326 VkSampler
* pSampler
)
1328 struct anv_device
*device
= (struct anv_device
*) _device
;
1329 struct anv_sampler
*sampler
;
1331 assert(pCreateInfo
->sType
== VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO
);
1333 sampler
= anv_device_alloc(device
, sizeof(*sampler
), 8,
1334 VK_SYSTEM_ALLOC_TYPE_API_OBJECT
);
1336 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
1338 static const uint32_t vk_to_gen_tex_filter
[] = {
1339 [VK_TEX_FILTER_NEAREST
] = MAPFILTER_NEAREST
,
1340 [VK_TEX_FILTER_LINEAR
] = MAPFILTER_LINEAR
1343 static const uint32_t vk_to_gen_mipmap_mode
[] = {
1344 [VK_TEX_MIPMAP_MODE_BASE
] = MIPFILTER_NONE
,
1345 [VK_TEX_MIPMAP_MODE_NEAREST
] = MIPFILTER_NEAREST
,
1346 [VK_TEX_MIPMAP_MODE_LINEAR
] = MIPFILTER_LINEAR
1349 static const uint32_t vk_to_gen_tex_address
[] = {
1350 [VK_TEX_ADDRESS_WRAP
] = TCM_WRAP
,
1351 [VK_TEX_ADDRESS_MIRROR
] = TCM_MIRROR
,
1352 [VK_TEX_ADDRESS_CLAMP
] = TCM_CLAMP
,
1353 [VK_TEX_ADDRESS_MIRROR_ONCE
] = TCM_MIRROR_ONCE
,
1354 [VK_TEX_ADDRESS_CLAMP_BORDER
] = TCM_CLAMP_BORDER
,
1357 static const uint32_t vk_to_gen_compare_op
[] = {
1358 [VK_COMPARE_OP_NEVER
] = PREFILTEROPNEVER
,
1359 [VK_COMPARE_OP_LESS
] = PREFILTEROPLESS
,
1360 [VK_COMPARE_OP_EQUAL
] = PREFILTEROPEQUAL
,
1361 [VK_COMPARE_OP_LESS_EQUAL
] = PREFILTEROPLEQUAL
,
1362 [VK_COMPARE_OP_GREATER
] = PREFILTEROPGREATER
,
1363 [VK_COMPARE_OP_NOT_EQUAL
] = PREFILTEROPNOTEQUAL
,
1364 [VK_COMPARE_OP_GREATER_EQUAL
] = PREFILTEROPGEQUAL
,
1365 [VK_COMPARE_OP_ALWAYS
] = PREFILTEROPALWAYS
,
1368 if (pCreateInfo
->maxAnisotropy
> 0)
1369 anv_finishme("missing support for anisotropic filtering");
1371 struct GEN8_SAMPLER_STATE sampler_state
= {
1372 .SamplerDisable
= false,
1373 .TextureBorderColorMode
= DX10OGL
,
1374 .LODPreClampMode
= 0,
1376 .MipModeFilter
= vk_to_gen_mipmap_mode
[pCreateInfo
->mipMode
],
1377 .MagModeFilter
= vk_to_gen_tex_filter
[pCreateInfo
->magFilter
],
1378 .MinModeFilter
= vk_to_gen_tex_filter
[pCreateInfo
->minFilter
],
1379 .TextureLODBias
= pCreateInfo
->mipLodBias
* 256,
1380 .AnisotropicAlgorithm
= EWAApproximation
,
1381 .MinLOD
= pCreateInfo
->minLod
* 256,
1382 .MaxLOD
= pCreateInfo
->maxLod
* 256,
1383 .ChromaKeyEnable
= 0,
1384 .ChromaKeyIndex
= 0,
1386 .ShadowFunction
= vk_to_gen_compare_op
[pCreateInfo
->compareOp
],
1387 .CubeSurfaceControlMode
= 0,
1388 .IndirectStatePointer
= 0,
1389 .LODClampMagnificationMode
= MIPNONE
,
1390 .MaximumAnisotropy
= 0,
1391 .RAddressMinFilterRoundingEnable
= 0,
1392 .RAddressMagFilterRoundingEnable
= 0,
1393 .VAddressMinFilterRoundingEnable
= 0,
1394 .VAddressMagFilterRoundingEnable
= 0,
1395 .UAddressMinFilterRoundingEnable
= 0,
1396 .UAddressMagFilterRoundingEnable
= 0,
1397 .TrilinearFilterQuality
= 0,
1398 .NonnormalizedCoordinateEnable
= 0,
1399 .TCXAddressControlMode
= vk_to_gen_tex_address
[pCreateInfo
->addressU
],
1400 .TCYAddressControlMode
= vk_to_gen_tex_address
[pCreateInfo
->addressV
],
1401 .TCZAddressControlMode
= vk_to_gen_tex_address
[pCreateInfo
->addressW
],
1404 GEN8_SAMPLER_STATE_pack(NULL
, sampler
->state
, &sampler_state
);
1406 *pSampler
= (VkSampler
) sampler
;
1411 // Descriptor set functions
1413 VkResult VKAPI
vkCreateDescriptorSetLayout(
1415 const VkDescriptorSetLayoutCreateInfo
* pCreateInfo
,
1416 VkDescriptorSetLayout
* pSetLayout
)
1418 struct anv_device
*device
= (struct anv_device
*) _device
;
1419 struct anv_descriptor_set_layout
*set_layout
;
1421 assert(pCreateInfo
->sType
== VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO
);
1423 uint32_t sampler_count
[VK_NUM_SHADER_STAGE
] = { 0, };
1424 uint32_t surface_count
[VK_NUM_SHADER_STAGE
] = { 0, };
1425 uint32_t num_dynamic_buffers
= 0;
1429 for (uint32_t i
= 0; i
< pCreateInfo
->count
; i
++) {
1430 switch (pCreateInfo
->pBinding
[i
].descriptorType
) {
1431 case VK_DESCRIPTOR_TYPE_SAMPLER
:
1432 for_each_bit(s
, pCreateInfo
->pBinding
[i
].stageFlags
)
1433 sampler_count
[s
] += pCreateInfo
->pBinding
[i
].count
;
1436 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER
:
1437 for_each_bit(s
, pCreateInfo
->pBinding
[i
].stageFlags
)
1438 sampler_count
[s
] += pCreateInfo
->pBinding
[i
].count
;
1442 case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE
:
1443 case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE
:
1444 case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER
:
1445 case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER
:
1446 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER
:
1447 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER
:
1448 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC
:
1449 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC
:
1450 for_each_bit(s
, pCreateInfo
->pBinding
[i
].stageFlags
)
1451 surface_count
[s
] += pCreateInfo
->pBinding
[i
].count
;
1457 count
+= pCreateInfo
->pBinding
[i
].count
;
1460 for (uint32_t i
= 0; i
< pCreateInfo
->count
; i
++) {
1461 switch (pCreateInfo
->pBinding
[i
].descriptorType
) {
1462 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC
:
1463 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC
:
1464 num_dynamic_buffers
++;
1471 uint32_t sampler_total
= 0;
1472 uint32_t surface_total
= 0;
1473 for (uint32_t s
= 0; s
< VK_NUM_SHADER_STAGE
; s
++) {
1474 sampler_total
+= sampler_count
[s
];
1475 surface_total
+= surface_count
[s
];
1478 size_t size
= sizeof(*set_layout
) +
1479 (sampler_total
+ surface_total
) * sizeof(uint32_t);
1480 set_layout
= anv_device_alloc(device
, size
, 8,
1481 VK_SYSTEM_ALLOC_TYPE_API_OBJECT
);
1483 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
1485 set_layout
->num_dynamic_buffers
= num_dynamic_buffers
;
1486 set_layout
->count
= count
;
1488 uint32_t *p
= set_layout
->entries
;
1489 uint32_t *sampler
[VK_NUM_SHADER_STAGE
];
1490 uint32_t *surface
[VK_NUM_SHADER_STAGE
];
1491 for (uint32_t s
= 0; s
< VK_NUM_SHADER_STAGE
; s
++) {
1492 set_layout
->stage
[s
].surface_count
= surface_count
[s
];
1493 set_layout
->stage
[s
].surface_start
= surface
[s
] = p
;
1494 p
+= surface_count
[s
];
1495 set_layout
->stage
[s
].sampler_count
= sampler_count
[s
];
1496 set_layout
->stage
[s
].sampler_start
= sampler
[s
] = p
;
1497 p
+= sampler_count
[s
];
1500 uint32_t descriptor
= 0;
1501 for (uint32_t i
= 0; i
< pCreateInfo
->count
; i
++) {
1502 switch (pCreateInfo
->pBinding
[i
].descriptorType
) {
1503 case VK_DESCRIPTOR_TYPE_SAMPLER
:
1504 for_each_bit(s
, pCreateInfo
->pBinding
[i
].stageFlags
)
1505 for (uint32_t j
= 0; j
< pCreateInfo
->pBinding
[i
].count
; j
++)
1506 *(sampler
[s
])++ = descriptor
+ j
;
1509 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER
:
1510 for_each_bit(s
, pCreateInfo
->pBinding
[i
].stageFlags
)
1511 for (uint32_t j
= 0; j
< pCreateInfo
->pBinding
[i
].count
; j
++)
1512 *(sampler
[s
])++ = descriptor
+ j
;
1516 case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE
:
1517 case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE
:
1518 case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER
:
1519 case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER
:
1520 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER
:
1521 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER
:
1522 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC
:
1523 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC
:
1524 for_each_bit(s
, pCreateInfo
->pBinding
[i
].stageFlags
)
1525 for (uint32_t j
= 0; j
< pCreateInfo
->pBinding
[i
].count
; j
++) {
1526 *(surface
[s
])++ = descriptor
+ j
;
1532 descriptor
+= pCreateInfo
->pBinding
[i
].count
;
1535 *pSetLayout
= (VkDescriptorSetLayout
) set_layout
;
1540 VkResult VKAPI
vkBeginDescriptorPoolUpdate(
1542 VkDescriptorUpdateMode updateMode
)
1544 stub_return(VK_UNSUPPORTED
);
1547 VkResult VKAPI
vkEndDescriptorPoolUpdate(
1551 stub_return(VK_UNSUPPORTED
);
1554 VkResult VKAPI
vkCreateDescriptorPool(
1556 VkDescriptorPoolUsage poolUsage
,
1558 const VkDescriptorPoolCreateInfo
* pCreateInfo
,
1559 VkDescriptorPool
* pDescriptorPool
)
1561 stub_return(VK_UNSUPPORTED
);
1564 VkResult VKAPI
vkResetDescriptorPool(
1566 VkDescriptorPool descriptorPool
)
1568 stub_return(VK_UNSUPPORTED
);
1571 VkResult VKAPI
vkAllocDescriptorSets(
1573 VkDescriptorPool descriptorPool
,
1574 VkDescriptorSetUsage setUsage
,
1576 const VkDescriptorSetLayout
* pSetLayouts
,
1577 VkDescriptorSet
* pDescriptorSets
,
1580 struct anv_device
*device
= (struct anv_device
*) _device
;
1581 const struct anv_descriptor_set_layout
*layout
;
1582 struct anv_descriptor_set
*set
;
1585 for (uint32_t i
= 0; i
< count
; i
++) {
1586 layout
= (struct anv_descriptor_set_layout
*) pSetLayouts
[i
];
1587 size
= sizeof(*set
) + layout
->count
* sizeof(set
->descriptors
[0]);
1588 set
= anv_device_alloc(device
, size
, 8,
1589 VK_SYSTEM_ALLOC_TYPE_API_OBJECT
);
1592 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
1595 pDescriptorSets
[i
] = (VkDescriptorSet
) set
;
1600 return VK_UNSUPPORTED
;
1603 void VKAPI
vkClearDescriptorSets(
1605 VkDescriptorPool descriptorPool
,
1607 const VkDescriptorSet
* pDescriptorSets
)
1612 void VKAPI
vkUpdateDescriptors(
1614 VkDescriptorSet descriptorSet
,
1615 uint32_t updateCount
,
1616 const void** ppUpdateArray
)
1618 struct anv_descriptor_set
*set
= (struct anv_descriptor_set
*) descriptorSet
;
1619 VkUpdateSamplers
*update_samplers
;
1620 VkUpdateSamplerTextures
*update_sampler_textures
;
1621 VkUpdateImages
*update_images
;
1622 VkUpdateBuffers
*update_buffers
;
1623 VkUpdateAsCopy
*update_as_copy
;
1625 for (uint32_t i
= 0; i
< updateCount
; i
++) {
1626 const struct anv_common
*common
= ppUpdateArray
[i
];
1628 switch (common
->sType
) {
1629 case VK_STRUCTURE_TYPE_UPDATE_SAMPLERS
:
1630 update_samplers
= (VkUpdateSamplers
*) common
;
1632 for (uint32_t j
= 0; j
< update_samplers
->count
; j
++) {
1633 set
->descriptors
[update_samplers
->binding
+ j
].sampler
=
1634 (struct anv_sampler
*) update_samplers
->pSamplers
[j
];
1638 case VK_STRUCTURE_TYPE_UPDATE_SAMPLER_TEXTURES
:
1639 /* FIXME: Shouldn't this be *_UPDATE_SAMPLER_IMAGES? */
1640 update_sampler_textures
= (VkUpdateSamplerTextures
*) common
;
1642 for (uint32_t j
= 0; j
< update_sampler_textures
->count
; j
++) {
1643 set
->descriptors
[update_sampler_textures
->binding
+ j
].view
=
1644 (struct anv_surface_view
*)
1645 update_sampler_textures
->pSamplerImageViews
[j
].pImageView
->view
;
1646 set
->descriptors
[update_sampler_textures
->binding
+ j
].sampler
=
1647 (struct anv_sampler
*)
1648 update_sampler_textures
->pSamplerImageViews
[j
].sampler
;
1652 case VK_STRUCTURE_TYPE_UPDATE_IMAGES
:
1653 update_images
= (VkUpdateImages
*) common
;
1655 for (uint32_t j
= 0; j
< update_images
->count
; j
++) {
1656 set
->descriptors
[update_images
->binding
+ j
].view
=
1657 (struct anv_surface_view
*) update_images
->pImageViews
[j
].view
;
1661 case VK_STRUCTURE_TYPE_UPDATE_BUFFERS
:
1662 update_buffers
= (VkUpdateBuffers
*) common
;
1664 for (uint32_t j
= 0; j
< update_buffers
->count
; j
++) {
1665 set
->descriptors
[update_buffers
->binding
+ j
].view
=
1666 (struct anv_surface_view
*) update_buffers
->pBufferViews
[j
].view
;
1668 /* FIXME: descriptor arrays? */
1671 case VK_STRUCTURE_TYPE_UPDATE_AS_COPY
:
1672 update_as_copy
= (VkUpdateAsCopy
*) common
;
1673 (void) update_as_copy
;
1682 // State object functions
1684 static inline int64_t
1685 clamp_int64(int64_t x
, int64_t min
, int64_t max
)
1695 VkResult VKAPI
vkCreateDynamicViewportState(
1697 const VkDynamicVpStateCreateInfo
* pCreateInfo
,
1698 VkDynamicVpState
* pState
)
1700 struct anv_device
*device
= (struct anv_device
*) _device
;
1701 struct anv_dynamic_vp_state
*state
;
1703 assert(pCreateInfo
->sType
== VK_STRUCTURE_TYPE_DYNAMIC_VP_STATE_CREATE_INFO
);
1705 state
= anv_device_alloc(device
, sizeof(*state
), 8,
1706 VK_SYSTEM_ALLOC_TYPE_API_OBJECT
);
1708 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
1710 unsigned count
= pCreateInfo
->viewportAndScissorCount
;
1711 state
->sf_clip_vp
= anv_state_pool_alloc(&device
->dynamic_state_pool
,
1713 state
->cc_vp
= anv_state_pool_alloc(&device
->dynamic_state_pool
,
1715 state
->scissor
= anv_state_pool_alloc(&device
->dynamic_state_pool
,
1718 for (uint32_t i
= 0; i
< pCreateInfo
->viewportAndScissorCount
; i
++) {
1719 const VkViewport
*vp
= &pCreateInfo
->pViewports
[i
];
1720 const VkRect
*s
= &pCreateInfo
->pScissors
[i
];
1722 struct GEN8_SF_CLIP_VIEWPORT sf_clip_viewport
= {
1723 .ViewportMatrixElementm00
= vp
->width
/ 2,
1724 .ViewportMatrixElementm11
= vp
->height
/ 2,
1725 .ViewportMatrixElementm22
= (vp
->maxDepth
- vp
->minDepth
) / 2,
1726 .ViewportMatrixElementm30
= vp
->originX
+ vp
->width
/ 2,
1727 .ViewportMatrixElementm31
= vp
->originY
+ vp
->height
/ 2,
1728 .ViewportMatrixElementm32
= (vp
->maxDepth
+ vp
->minDepth
) / 2,
1729 .XMinClipGuardband
= -1.0f
,
1730 .XMaxClipGuardband
= 1.0f
,
1731 .YMinClipGuardband
= -1.0f
,
1732 .YMaxClipGuardband
= 1.0f
,
1733 .XMinViewPort
= vp
->originX
,
1734 .XMaxViewPort
= vp
->originX
+ vp
->width
- 1,
1735 .YMinViewPort
= vp
->originY
,
1736 .YMaxViewPort
= vp
->originY
+ vp
->height
- 1,
1739 struct GEN8_CC_VIEWPORT cc_viewport
= {
1740 .MinimumDepth
= vp
->minDepth
,
1741 .MaximumDepth
= vp
->maxDepth
1744 /* Since xmax and ymax are inclusive, we have to have xmax < xmin or
1745 * ymax < ymin for empty clips. In case clip x, y, width height are all
1746 * 0, the clamps below produce 0 for xmin, ymin, xmax, ymax, which isn't
1747 * what we want. Just special case empty clips and produce a canonical
1749 static const struct GEN8_SCISSOR_RECT empty_scissor
= {
1750 .ScissorRectangleYMin
= 1,
1751 .ScissorRectangleXMin
= 1,
1752 .ScissorRectangleYMax
= 0,
1753 .ScissorRectangleXMax
= 0
1756 const int max
= 0xffff;
1757 struct GEN8_SCISSOR_RECT scissor
= {
1758 /* Do this math using int64_t so overflow gets clamped correctly. */
1759 .ScissorRectangleYMin
= clamp_int64(s
->offset
.y
, 0, max
),
1760 .ScissorRectangleXMin
= clamp_int64(s
->offset
.x
, 0, max
),
1761 .ScissorRectangleYMax
= clamp_int64((uint64_t) s
->offset
.y
+ s
->extent
.height
- 1, 0, max
),
1762 .ScissorRectangleXMax
= clamp_int64((uint64_t) s
->offset
.x
+ s
->extent
.width
- 1, 0, max
)
1765 GEN8_SF_CLIP_VIEWPORT_pack(NULL
, state
->sf_clip_vp
.map
+ i
* 64, &sf_clip_viewport
);
1766 GEN8_CC_VIEWPORT_pack(NULL
, state
->cc_vp
.map
+ i
* 32, &cc_viewport
);
1768 if (s
->extent
.width
<= 0 || s
->extent
.height
<= 0) {
1769 GEN8_SCISSOR_RECT_pack(NULL
, state
->scissor
.map
+ i
* 32, &empty_scissor
);
1771 GEN8_SCISSOR_RECT_pack(NULL
, state
->scissor
.map
+ i
* 32, &scissor
);
1775 *pState
= (VkDynamicVpState
) state
;
1780 VkResult VKAPI
vkCreateDynamicRasterState(
1782 const VkDynamicRsStateCreateInfo
* pCreateInfo
,
1783 VkDynamicRsState
* pState
)
1785 struct anv_device
*device
= (struct anv_device
*) _device
;
1786 struct anv_dynamic_rs_state
*state
;
1788 assert(pCreateInfo
->sType
== VK_STRUCTURE_TYPE_DYNAMIC_RS_STATE_CREATE_INFO
);
1790 state
= anv_device_alloc(device
, sizeof(*state
), 8,
1791 VK_SYSTEM_ALLOC_TYPE_API_OBJECT
);
1793 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
1797 * float depthBiasClamp;
1798 * float slopeScaledDepthBias;
1799 * float pointFadeThreshold;
1800 * // optional (GL45) - Size of point fade threshold
1803 struct GEN8_3DSTATE_SF sf
= {
1804 GEN8_3DSTATE_SF_header
,
1805 .LineWidth
= pCreateInfo
->lineWidth
,
1806 .PointWidth
= pCreateInfo
->pointSize
,
1809 GEN8_3DSTATE_SF_pack(NULL
, state
->state_sf
, &sf
);
1811 *pState
= (VkDynamicRsState
) state
;
1816 VkResult VKAPI
vkCreateDynamicColorBlendState(
1818 const VkDynamicCbStateCreateInfo
* pCreateInfo
,
1819 VkDynamicCbState
* pState
)
1821 struct anv_device
*device
= (struct anv_device
*) _device
;
1822 struct anv_dynamic_cb_state
*state
;
1824 assert(pCreateInfo
->sType
== VK_STRUCTURE_TYPE_DYNAMIC_CB_STATE_CREATE_INFO
);
1826 state
= anv_device_alloc(device
, sizeof(*state
), 8,
1827 VK_SYSTEM_ALLOC_TYPE_API_OBJECT
);
1829 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
1831 *pState
= (VkDynamicCbState
) state
;
1836 VkResult VKAPI
vkCreateDynamicDepthStencilState(
1838 const VkDynamicDsStateCreateInfo
* pCreateInfo
,
1839 VkDynamicDsState
* pState
)
1841 stub_return(VK_UNSUPPORTED
);
1844 // Command buffer functions
1846 VkResult VKAPI
vkCreateCommandBuffer(
1848 const VkCmdBufferCreateInfo
* pCreateInfo
,
1849 VkCmdBuffer
* pCmdBuffer
)
1851 struct anv_device
*device
= (struct anv_device
*) _device
;
1852 struct anv_cmd_buffer
*cmd_buffer
;
1855 cmd_buffer
= anv_device_alloc(device
, sizeof(*cmd_buffer
), 8,
1856 VK_SYSTEM_ALLOC_TYPE_API_OBJECT
);
1857 if (cmd_buffer
== NULL
)
1858 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
1860 cmd_buffer
->device
= device
;
1861 cmd_buffer
->rs_state
= NULL
;
1862 cmd_buffer
->vp_state
= NULL
;
1863 memset(&cmd_buffer
->default_bindings
, 0, sizeof(cmd_buffer
->default_bindings
));
1864 cmd_buffer
->bindings
= &cmd_buffer
->default_bindings
;
1866 result
= anv_batch_init(&cmd_buffer
->batch
, device
);
1867 if (result
!= VK_SUCCESS
)
1870 cmd_buffer
->exec2_objects
=
1871 anv_device_alloc(device
, 8192 * sizeof(cmd_buffer
->exec2_objects
[0]), 8,
1872 VK_SYSTEM_ALLOC_TYPE_API_OBJECT
);
1873 if (cmd_buffer
->exec2_objects
== NULL
) {
1874 result
= vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
1878 cmd_buffer
->exec2_bos
=
1879 anv_device_alloc(device
, 8192 * sizeof(cmd_buffer
->exec2_bos
[0]), 8,
1880 VK_SYSTEM_ALLOC_TYPE_API_OBJECT
);
1881 if (cmd_buffer
->exec2_bos
== NULL
) {
1882 result
= vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
1883 goto fail_exec2_objects
;
1886 anv_state_stream_init(&cmd_buffer
->surface_state_stream
,
1887 &device
->surface_state_block_pool
);
1888 anv_state_stream_init(&cmd_buffer
->dynamic_state_stream
,
1889 &device
->dynamic_state_block_pool
);
1891 cmd_buffer
->dirty
= 0;
1892 cmd_buffer
->vb_dirty
= 0;
1894 *pCmdBuffer
= (VkCmdBuffer
) cmd_buffer
;
1899 anv_device_free(device
, cmd_buffer
->exec2_objects
);
1901 anv_batch_finish(&cmd_buffer
->batch
, device
);
1903 anv_device_free(device
, cmd_buffer
);
1908 VkResult VKAPI
vkBeginCommandBuffer(
1909 VkCmdBuffer cmdBuffer
,
1910 const VkCmdBufferBeginInfo
* pBeginInfo
)
1912 struct anv_cmd_buffer
*cmd_buffer
= (struct anv_cmd_buffer
*) cmdBuffer
;
1913 struct anv_device
*device
= cmd_buffer
->device
;
1915 anv_batch_emit(&cmd_buffer
->batch
, GEN8_PIPELINE_SELECT
,
1916 .PipelineSelection
= _3D
);
1917 anv_batch_emit(&cmd_buffer
->batch
, GEN8_STATE_SIP
);
1919 anv_batch_emit(&cmd_buffer
->batch
, GEN8_STATE_BASE_ADDRESS
,
1920 .GeneralStateBaseAddress
= { NULL
, 0 },
1921 .GeneralStateBaseAddressModifyEnable
= true,
1922 .GeneralStateBufferSize
= 0xfffff,
1923 .GeneralStateBufferSizeModifyEnable
= true,
1925 .SurfaceStateBaseAddress
= { &device
->surface_state_block_pool
.bo
, 0 },
1926 .SurfaceStateMemoryObjectControlState
= 0, /* FIXME: MOCS */
1927 .SurfaceStateBaseAddressModifyEnable
= true,
1929 .DynamicStateBaseAddress
= { &device
->dynamic_state_block_pool
.bo
, 0 },
1930 .DynamicStateBaseAddressModifyEnable
= true,
1931 .DynamicStateBufferSize
= 0xfffff,
1932 .DynamicStateBufferSizeModifyEnable
= true,
1934 .IndirectObjectBaseAddress
= { NULL
, 0 },
1935 .IndirectObjectBaseAddressModifyEnable
= true,
1936 .IndirectObjectBufferSize
= 0xfffff,
1937 .IndirectObjectBufferSizeModifyEnable
= true,
1939 .InstructionBaseAddress
= { &device
->instruction_block_pool
.bo
, 0 },
1940 .InstructionBaseAddressModifyEnable
= true,
1941 .InstructionBufferSize
= 0xfffff,
1942 .InstructionBuffersizeModifyEnable
= true);
1944 anv_batch_emit(&cmd_buffer
->batch
, GEN8_3DSTATE_VF_STATISTICS
,
1945 .StatisticsEnable
= true);
1946 anv_batch_emit(&cmd_buffer
->batch
, GEN8_3DSTATE_HS
, .Enable
= false);
1947 anv_batch_emit(&cmd_buffer
->batch
, GEN8_3DSTATE_TE
, .TEEnable
= false);
1948 anv_batch_emit(&cmd_buffer
->batch
, GEN8_3DSTATE_DS
, .FunctionEnable
= false);
1949 anv_batch_emit(&cmd_buffer
->batch
, GEN8_3DSTATE_STREAMOUT
, .SOFunctionEnable
= false);
1951 anv_batch_emit(&cmd_buffer
->batch
, GEN8_3DSTATE_PUSH_CONSTANT_ALLOC_VS
,
1952 .ConstantBufferOffset
= 0,
1953 .ConstantBufferSize
= 4);
1954 anv_batch_emit(&cmd_buffer
->batch
, GEN8_3DSTATE_PUSH_CONSTANT_ALLOC_GS
,
1955 .ConstantBufferOffset
= 4,
1956 .ConstantBufferSize
= 4);
1957 anv_batch_emit(&cmd_buffer
->batch
, GEN8_3DSTATE_PUSH_CONSTANT_ALLOC_PS
,
1958 .ConstantBufferOffset
= 8,
1959 .ConstantBufferSize
= 4);
1961 anv_batch_emit(&cmd_buffer
->batch
, GEN8_3DSTATE_WM_CHROMAKEY
,
1962 .ChromaKeyKillEnable
= false);
1963 anv_batch_emit(&cmd_buffer
->batch
, GEN8_3DSTATE_SBE_SWIZ
);
1964 anv_batch_emit(&cmd_buffer
->batch
, GEN8_3DSTATE_AA_LINE_PARAMETERS
);
1966 /* Hardcoded state: */
1967 anv_batch_emit(&cmd_buffer
->batch
, GEN8_3DSTATE_DEPTH_BUFFER
,
1968 .SurfaceType
= SURFTYPE_2D
,
1971 .SurfaceFormat
= D16_UNORM
,
1972 .SurfaceBaseAddress
= { NULL
, 0 },
1973 .HierarchicalDepthBufferEnable
= 0);
1975 anv_batch_emit(&cmd_buffer
->batch
, GEN8_3DSTATE_WM_DEPTH_STENCIL
,
1976 .DepthTestEnable
= false,
1977 .DepthBufferWriteEnable
= false);
1983 anv_cmd_buffer_add_bo(struct anv_cmd_buffer
*cmd_buffer
,
1984 struct anv_bo
*bo
, struct anv_reloc_list
*list
)
1986 struct drm_i915_gem_exec_object2
*obj
;
1988 bo
->index
= cmd_buffer
->bo_count
;
1989 obj
= &cmd_buffer
->exec2_objects
[bo
->index
];
1990 cmd_buffer
->exec2_bos
[bo
->index
] = bo
;
1991 cmd_buffer
->bo_count
++;
1993 obj
->handle
= bo
->gem_handle
;
1994 obj
->relocation_count
= 0;
1995 obj
->relocs_ptr
= 0;
1997 obj
->offset
= bo
->offset
;
2003 obj
->relocation_count
= list
->num_relocs
;
2004 obj
->relocs_ptr
= (uintptr_t) list
->relocs
;
2009 anv_cmd_buffer_add_validate_bos(struct anv_cmd_buffer
*cmd_buffer
,
2010 struct anv_reloc_list
*list
)
2012 struct anv_bo
*bo
, *batch_bo
;
2014 batch_bo
= &cmd_buffer
->batch
.bo
;
2015 for (size_t i
= 0; i
< list
->num_relocs
; i
++) {
2016 bo
= list
->reloc_bos
[i
];
2017 /* Skip any relocations targeting the batch bo. We need to make sure
2018 * it's the last in the list so we'll add it manually later.
2022 if (bo
->index
< cmd_buffer
->bo_count
&& cmd_buffer
->exec2_bos
[bo
->index
] == bo
)
2025 anv_cmd_buffer_add_bo(cmd_buffer
, bo
, NULL
);
2030 anv_cmd_buffer_process_relocs(struct anv_cmd_buffer
*cmd_buffer
,
2031 struct anv_reloc_list
*list
)
2035 /* If the kernel supports I915_EXEC_NO_RELOC, it will compare offset in
2036 * struct drm_i915_gem_exec_object2 against the bos current offset and if
2037 * all bos haven't moved it will skip relocation processing alltogether.
2038 * If I915_EXEC_NO_RELOC is not supported, the kernel ignores the incoming
2039 * value of offset so we can set it either way. For that to work we need
2040 * to make sure all relocs use the same presumed offset.
2043 for (size_t i
= 0; i
< list
->num_relocs
; i
++) {
2044 bo
= list
->reloc_bos
[i
];
2045 if (bo
->offset
!= list
->relocs
[i
].presumed_offset
)
2046 cmd_buffer
->need_reloc
= true;
2048 list
->relocs
[i
].target_handle
= bo
->index
;
2052 VkResult VKAPI
vkEndCommandBuffer(
2053 VkCmdBuffer cmdBuffer
)
2055 struct anv_cmd_buffer
*cmd_buffer
= (struct anv_cmd_buffer
*) cmdBuffer
;
2056 struct anv_device
*device
= cmd_buffer
->device
;
2057 struct anv_batch
*batch
= &cmd_buffer
->batch
;
2059 anv_batch_emit(batch
, GEN8_MI_BATCH_BUFFER_END
);
2061 /* Round batch up to an even number of dwords. */
2062 if ((batch
->next
- batch
->bo
.map
) & 4)
2063 anv_batch_emit(batch
, GEN8_MI_NOOP
);
2065 cmd_buffer
->bo_count
= 0;
2066 cmd_buffer
->need_reloc
= false;
2068 /* Lock for access to bo->index. */
2069 pthread_mutex_lock(&device
->mutex
);
2071 /* Add block pool bos first so we can add them with their relocs. */
2072 anv_cmd_buffer_add_bo(cmd_buffer
, &device
->surface_state_block_pool
.bo
,
2073 &batch
->surf_relocs
);
2075 anv_cmd_buffer_add_validate_bos(cmd_buffer
, &batch
->surf_relocs
);
2076 anv_cmd_buffer_add_validate_bos(cmd_buffer
, &batch
->cmd_relocs
);
2077 anv_cmd_buffer_add_bo(cmd_buffer
, &batch
->bo
, &batch
->cmd_relocs
);
2078 anv_cmd_buffer_process_relocs(cmd_buffer
, &batch
->surf_relocs
);
2079 anv_cmd_buffer_process_relocs(cmd_buffer
, &batch
->cmd_relocs
);
2081 cmd_buffer
->execbuf
.buffers_ptr
= (uintptr_t) cmd_buffer
->exec2_objects
;
2082 cmd_buffer
->execbuf
.buffer_count
= cmd_buffer
->bo_count
;
2083 cmd_buffer
->execbuf
.batch_start_offset
= 0;
2084 cmd_buffer
->execbuf
.batch_len
= batch
->next
- batch
->bo
.map
;
2085 cmd_buffer
->execbuf
.cliprects_ptr
= 0;
2086 cmd_buffer
->execbuf
.num_cliprects
= 0;
2087 cmd_buffer
->execbuf
.DR1
= 0;
2088 cmd_buffer
->execbuf
.DR4
= 0;
2090 cmd_buffer
->execbuf
.flags
= I915_EXEC_HANDLE_LUT
;
2091 if (!cmd_buffer
->need_reloc
)
2092 cmd_buffer
->execbuf
.flags
|= I915_EXEC_NO_RELOC
;
2093 cmd_buffer
->execbuf
.flags
|= I915_EXEC_RENDER
;
2094 cmd_buffer
->execbuf
.rsvd1
= device
->context_id
;
2095 cmd_buffer
->execbuf
.rsvd2
= 0;
2097 pthread_mutex_unlock(&device
->mutex
);
2102 VkResult VKAPI
vkResetCommandBuffer(
2103 VkCmdBuffer cmdBuffer
)
2105 struct anv_cmd_buffer
*cmd_buffer
= (struct anv_cmd_buffer
*) cmdBuffer
;
2107 anv_batch_reset(&cmd_buffer
->batch
);
2112 // Command buffer building functions
2114 void VKAPI
vkCmdBindPipeline(
2115 VkCmdBuffer cmdBuffer
,
2116 VkPipelineBindPoint pipelineBindPoint
,
2117 VkPipeline _pipeline
)
2119 struct anv_cmd_buffer
*cmd_buffer
= (struct anv_cmd_buffer
*) cmdBuffer
;
2121 cmd_buffer
->pipeline
= (struct anv_pipeline
*) _pipeline
;
2122 cmd_buffer
->dirty
|= ANV_CMD_BUFFER_PIPELINE_DIRTY
;
2125 void VKAPI
vkCmdBindDynamicStateObject(
2126 VkCmdBuffer cmdBuffer
,
2127 VkStateBindPoint stateBindPoint
,
2128 VkDynamicStateObject dynamicState
)
2130 struct anv_cmd_buffer
*cmd_buffer
= (struct anv_cmd_buffer
*) cmdBuffer
;
2131 struct anv_dynamic_vp_state
*vp_state
;
2133 switch (stateBindPoint
) {
2134 case VK_STATE_BIND_POINT_VIEWPORT
:
2135 vp_state
= (struct anv_dynamic_vp_state
*) dynamicState
;
2136 /* We emit state immediately, but set cmd_buffer->vp_state to indicate
2137 * that vp state has been set in this command buffer. */
2138 cmd_buffer
->vp_state
= vp_state
;
2139 anv_batch_emit(&cmd_buffer
->batch
, GEN8_3DSTATE_SCISSOR_STATE_POINTERS
,
2140 .ScissorRectPointer
= vp_state
->scissor
.offset
);
2141 anv_batch_emit(&cmd_buffer
->batch
, GEN8_3DSTATE_VIEWPORT_STATE_POINTERS_CC
,
2142 .CCViewportPointer
= vp_state
->cc_vp
.offset
);
2143 anv_batch_emit(&cmd_buffer
->batch
, GEN8_3DSTATE_VIEWPORT_STATE_POINTERS_SF_CLIP
,
2144 .SFClipViewportPointer
= vp_state
->sf_clip_vp
.offset
);
2146 case VK_STATE_BIND_POINT_RASTER
:
2147 cmd_buffer
->rs_state
= (struct anv_dynamic_rs_state
*) dynamicState
;
2148 cmd_buffer
->dirty
|= ANV_CMD_BUFFER_RS_DIRTY
;
2150 case VK_STATE_BIND_POINT_COLOR_BLEND
:
2151 case VK_STATE_BIND_POINT_DEPTH_STENCIL
:
2158 void VKAPI
vkCmdBindDescriptorSets(
2159 VkCmdBuffer cmdBuffer
,
2160 VkPipelineBindPoint pipelineBindPoint
,
2163 const VkDescriptorSet
* pDescriptorSets
,
2164 uint32_t dynamicOffsetCount
,
2165 const uint32_t* pDynamicOffsets
)
2167 struct anv_cmd_buffer
*cmd_buffer
= (struct anv_cmd_buffer
*) cmdBuffer
;
2168 struct anv_pipeline_layout
*layout
= cmd_buffer
->pipeline
->layout
;
2169 struct anv_bindings
*bindings
= cmd_buffer
->bindings
;
2171 uint32_t offset
= 0;
2172 for (uint32_t i
= 0; i
< setCount
; i
++) {
2173 struct anv_descriptor_set
*set
=
2174 (struct anv_descriptor_set
*) pDescriptorSets
[i
];
2175 struct anv_descriptor_set_layout
*set_layout
= layout
->set
[firstSet
+ i
].layout
;
2177 for (uint32_t s
= 0; s
< VK_NUM_SHADER_STAGE
; s
++) {
2178 uint32_t *surface_to_desc
= set_layout
->stage
[s
].surface_start
;
2179 uint32_t *sampler_to_desc
= set_layout
->stage
[s
].sampler_start
;
2180 uint32_t bias
= s
== VK_SHADER_STAGE_FRAGMENT
? MAX_RTS
: 0;
2183 start
= bias
+ layout
->set
[firstSet
+ i
].surface_start
[s
];
2184 for (uint32_t b
= 0; b
< set_layout
->stage
[s
].surface_count
; b
++) {
2185 struct anv_surface_view
*view
= set
->descriptors
[surface_to_desc
[b
]].view
;
2187 bindings
->descriptors
[s
].surfaces
[start
+ b
] =
2188 view
->surface_state
.offset
;
2189 bindings
->descriptors
[s
].relocs
[start
+ b
].bo
= view
->bo
;
2190 bindings
->descriptors
[s
].relocs
[start
+ b
].offset
= view
->offset
;
2193 start
= layout
->set
[firstSet
+ i
].sampler_start
[s
];
2194 for (uint32_t b
= 0; b
< set_layout
->stage
[s
].sampler_count
; b
++) {
2195 struct anv_sampler
*sampler
= set
->descriptors
[sampler_to_desc
[b
]].sampler
;
2197 memcpy(&bindings
->descriptors
[s
].samplers
[start
+ b
],
2198 sampler
->state
, sizeof(sampler
->state
));
2202 offset
+= layout
->set
[firstSet
+ i
].layout
->num_dynamic_buffers
;
2205 cmd_buffer
->dirty
|= ANV_CMD_BUFFER_DESCRIPTOR_SET_DIRTY
;
2208 void VKAPI
vkCmdBindIndexBuffer(
2209 VkCmdBuffer cmdBuffer
,
2211 VkDeviceSize offset
,
2212 VkIndexType indexType
)
2214 struct anv_cmd_buffer
*cmd_buffer
= (struct anv_cmd_buffer
*) cmdBuffer
;
2215 struct anv_buffer
*buffer
= (struct anv_buffer
*) _buffer
;
2217 static const uint32_t vk_to_gen_index_type
[] = {
2218 [VK_INDEX_TYPE_UINT8
] = INDEX_BYTE
,
2219 [VK_INDEX_TYPE_UINT16
] = INDEX_WORD
,
2220 [VK_INDEX_TYPE_UINT32
] = INDEX_DWORD
,
2223 anv_batch_emit(&cmd_buffer
->batch
, GEN8_3DSTATE_INDEX_BUFFER
,
2224 .IndexFormat
= vk_to_gen_index_type
[indexType
],
2225 .MemoryObjectControlState
= 0,
2226 .BufferStartingAddress
= { buffer
->bo
, buffer
->offset
+ offset
},
2227 .BufferSize
= buffer
->size
- offset
);
2230 void VKAPI
vkCmdBindVertexBuffers(
2231 VkCmdBuffer cmdBuffer
,
2232 uint32_t startBinding
,
2233 uint32_t bindingCount
,
2234 const VkBuffer
* pBuffers
,
2235 const VkDeviceSize
* pOffsets
)
2237 struct anv_cmd_buffer
*cmd_buffer
= (struct anv_cmd_buffer
*) cmdBuffer
;
2238 struct anv_bindings
*bindings
= cmd_buffer
->bindings
;
2240 /* We have to defer setting up vertex buffer since we need the buffer
2241 * stride from the pipeline. */
2243 for (uint32_t i
= 0; i
< bindingCount
; i
++) {
2244 bindings
->vb
[startBinding
+ i
].buffer
= (struct anv_buffer
*) pBuffers
[i
];
2245 bindings
->vb
[startBinding
+ i
].offset
= pOffsets
[i
];
2246 cmd_buffer
->vb_dirty
|= 1 << (startBinding
+ i
);
2251 flush_descriptor_sets(struct anv_cmd_buffer
*cmd_buffer
)
2253 struct anv_pipeline_layout
*layout
= cmd_buffer
->pipeline
->layout
;
2254 struct anv_bindings
*bindings
= cmd_buffer
->bindings
;
2255 uint32_t layers
= cmd_buffer
->framebuffer
->layers
;
2257 for (uint32_t s
= 0; s
< VK_NUM_SHADER_STAGE
; s
++) {
2260 if (s
== VK_SHADER_STAGE_FRAGMENT
) {
2262 layers
= cmd_buffer
->framebuffer
->layers
;
2268 /* This is a little awkward: layout can be NULL but we still have to
2269 * allocate and set a binding table for the PS stage for render
2271 uint32_t surface_count
= layout
? layout
->stage
[s
].surface_count
: 0;
2273 if (layers
+ surface_count
> 0) {
2274 struct anv_state state
;
2277 size
= (layers
+ surface_count
) * sizeof(uint32_t);
2278 state
= anv_state_stream_alloc(&cmd_buffer
->surface_state_stream
, size
, 32);
2279 memcpy(state
.map
, bindings
->descriptors
[s
].surfaces
, size
);
2281 for (uint32_t i
= 0; i
< layers
; i
++)
2282 anv_reloc_list_add(&cmd_buffer
->batch
.surf_relocs
,
2283 bindings
->descriptors
[s
].surfaces
[i
] + 8 * sizeof(int32_t),
2284 bindings
->descriptors
[s
].relocs
[i
].bo
,
2285 bindings
->descriptors
[s
].relocs
[i
].offset
);
2287 for (uint32_t i
= 0; i
< surface_count
; i
++)
2288 anv_reloc_list_add(&cmd_buffer
->batch
.surf_relocs
,
2289 bindings
->descriptors
[s
].surfaces
[bias
+ i
] + 8 * sizeof(int32_t),
2290 bindings
->descriptors
[s
].relocs
[bias
+ i
].bo
,
2291 bindings
->descriptors
[s
].relocs
[bias
+ i
].offset
);
2293 static const uint32_t binding_table_opcodes
[] = {
2294 [VK_SHADER_STAGE_VERTEX
] = 38,
2295 [VK_SHADER_STAGE_TESS_CONTROL
] = 39,
2296 [VK_SHADER_STAGE_TESS_EVALUATION
] = 40,
2297 [VK_SHADER_STAGE_GEOMETRY
] = 41,
2298 [VK_SHADER_STAGE_FRAGMENT
] = 42,
2299 [VK_SHADER_STAGE_COMPUTE
] = 0,
2302 anv_batch_emit(&cmd_buffer
->batch
,
2303 GEN8_3DSTATE_BINDING_TABLE_POINTERS_VS
,
2304 ._3DCommandSubOpcode
= binding_table_opcodes
[s
],
2305 .PointertoVSBindingTable
= state
.offset
);
2308 if (layout
&& layout
->stage
[s
].sampler_count
> 0) {
2309 struct anv_state state
;
2312 size
= layout
->stage
[s
].sampler_count
* 16;
2313 state
= anv_state_stream_alloc(&cmd_buffer
->dynamic_state_stream
, size
, 32);
2314 memcpy(state
.map
, bindings
->descriptors
[s
].samplers
, size
);
2316 static const uint32_t sampler_state_opcodes
[] = {
2317 [VK_SHADER_STAGE_VERTEX
] = 43,
2318 [VK_SHADER_STAGE_TESS_CONTROL
] = 44, /* HS */
2319 [VK_SHADER_STAGE_TESS_EVALUATION
] = 45, /* DS */
2320 [VK_SHADER_STAGE_GEOMETRY
] = 46,
2321 [VK_SHADER_STAGE_FRAGMENT
] = 47,
2322 [VK_SHADER_STAGE_COMPUTE
] = 0,
2325 anv_batch_emit(&cmd_buffer
->batch
,
2326 GEN8_3DSTATE_SAMPLER_STATE_POINTERS_VS
,
2327 ._3DCommandSubOpcode
= sampler_state_opcodes
[s
],
2328 .PointertoVSSamplerState
= state
.offset
);
2334 anv_cmd_buffer_flush_state(struct anv_cmd_buffer
*cmd_buffer
)
2336 struct anv_pipeline
*pipeline
= cmd_buffer
->pipeline
;
2337 struct anv_bindings
*bindings
= cmd_buffer
->bindings
;
2338 const uint32_t num_buffers
= __builtin_popcount(cmd_buffer
->vb_dirty
);
2339 const uint32_t num_dwords
= 1 + num_buffers
* 4;
2342 if (cmd_buffer
->vb_dirty
) {
2343 p
= anv_batch_emitn(&cmd_buffer
->batch
, num_dwords
,
2344 GEN8_3DSTATE_VERTEX_BUFFERS
);
2346 for_each_bit(vb
, cmd_buffer
->vb_dirty
) {
2347 struct anv_buffer
*buffer
= bindings
->vb
[vb
].buffer
;
2348 uint32_t offset
= bindings
->vb
[vb
].offset
;
2350 struct GEN8_VERTEX_BUFFER_STATE state
= {
2351 .VertexBufferIndex
= vb
,
2352 .MemoryObjectControlState
= 0,
2353 .AddressModifyEnable
= true,
2354 .BufferPitch
= pipeline
->binding_stride
[vb
],
2355 .BufferStartingAddress
= { buffer
->bo
, buffer
->offset
+ offset
},
2356 .BufferSize
= buffer
->size
- offset
2359 GEN8_VERTEX_BUFFER_STATE_pack(&cmd_buffer
->batch
, &p
[1 + i
* 4], &state
);
2364 if (cmd_buffer
->dirty
& ANV_CMD_BUFFER_PIPELINE_DIRTY
)
2365 anv_batch_emit_batch(&cmd_buffer
->batch
, &pipeline
->batch
);
2367 if (cmd_buffer
->dirty
& ANV_CMD_BUFFER_DESCRIPTOR_SET_DIRTY
)
2368 flush_descriptor_sets(cmd_buffer
);
2370 if (cmd_buffer
->dirty
& (ANV_CMD_BUFFER_PIPELINE_DIRTY
| ANV_CMD_BUFFER_RS_DIRTY
))
2371 anv_batch_emit_merge(&cmd_buffer
->batch
,
2372 cmd_buffer
->rs_state
->state_sf
, pipeline
->state_sf
);
2374 cmd_buffer
->vb_dirty
= 0;
2375 cmd_buffer
->dirty
= 0;
2378 void VKAPI
vkCmdDraw(
2379 VkCmdBuffer cmdBuffer
,
2380 uint32_t firstVertex
,
2381 uint32_t vertexCount
,
2382 uint32_t firstInstance
,
2383 uint32_t instanceCount
)
2385 struct anv_cmd_buffer
*cmd_buffer
= (struct anv_cmd_buffer
*) cmdBuffer
;
2387 anv_cmd_buffer_flush_state(cmd_buffer
);
2389 anv_batch_emit(&cmd_buffer
->batch
, GEN8_3DPRIMITIVE
,
2390 .VertexAccessType
= SEQUENTIAL
,
2391 .VertexCountPerInstance
= vertexCount
,
2392 .StartVertexLocation
= firstVertex
,
2393 .InstanceCount
= instanceCount
,
2394 .StartInstanceLocation
= firstInstance
,
2395 .BaseVertexLocation
= 0);
2398 void VKAPI
vkCmdDrawIndexed(
2399 VkCmdBuffer cmdBuffer
,
2400 uint32_t firstIndex
,
2401 uint32_t indexCount
,
2402 int32_t vertexOffset
,
2403 uint32_t firstInstance
,
2404 uint32_t instanceCount
)
2406 struct anv_cmd_buffer
*cmd_buffer
= (struct anv_cmd_buffer
*) cmdBuffer
;
2408 anv_cmd_buffer_flush_state(cmd_buffer
);
2410 anv_batch_emit(&cmd_buffer
->batch
, GEN8_3DPRIMITIVE
,
2411 .VertexAccessType
= RANDOM
,
2412 .VertexCountPerInstance
= indexCount
,
2413 .StartVertexLocation
= firstIndex
,
2414 .InstanceCount
= instanceCount
,
2415 .StartInstanceLocation
= firstInstance
,
2416 .BaseVertexLocation
= 0);
2420 anv_batch_lrm(struct anv_batch
*batch
,
2421 uint32_t reg
, struct anv_bo
*bo
, uint32_t offset
)
2423 anv_batch_emit(batch
, GEN8_MI_LOAD_REGISTER_MEM
,
2424 .RegisterAddress
= reg
,
2425 .MemoryAddress
= { bo
, offset
});
2429 anv_batch_lri(struct anv_batch
*batch
, uint32_t reg
, uint32_t imm
)
2431 anv_batch_emit(batch
, GEN8_MI_LOAD_REGISTER_IMM
,
2432 .RegisterOffset
= reg
,
2436 /* Auto-Draw / Indirect Registers */
2437 #define GEN7_3DPRIM_END_OFFSET 0x2420
2438 #define GEN7_3DPRIM_START_VERTEX 0x2430
2439 #define GEN7_3DPRIM_VERTEX_COUNT 0x2434
2440 #define GEN7_3DPRIM_INSTANCE_COUNT 0x2438
2441 #define GEN7_3DPRIM_START_INSTANCE 0x243C
2442 #define GEN7_3DPRIM_BASE_VERTEX 0x2440
2444 void VKAPI
vkCmdDrawIndirect(
2445 VkCmdBuffer cmdBuffer
,
2447 VkDeviceSize offset
,
2451 struct anv_cmd_buffer
*cmd_buffer
= (struct anv_cmd_buffer
*) cmdBuffer
;
2452 struct anv_buffer
*buffer
= (struct anv_buffer
*) _buffer
;
2453 struct anv_bo
*bo
= buffer
->bo
;
2454 uint32_t bo_offset
= buffer
->offset
+ offset
;
2456 anv_cmd_buffer_flush_state(cmd_buffer
);
2458 anv_batch_lrm(&cmd_buffer
->batch
, GEN7_3DPRIM_VERTEX_COUNT
, bo
, bo_offset
);
2459 anv_batch_lrm(&cmd_buffer
->batch
, GEN7_3DPRIM_INSTANCE_COUNT
, bo
, bo_offset
+ 4);
2460 anv_batch_lrm(&cmd_buffer
->batch
, GEN7_3DPRIM_START_VERTEX
, bo
, bo_offset
+ 8);
2461 anv_batch_lrm(&cmd_buffer
->batch
, GEN7_3DPRIM_START_INSTANCE
, bo
, bo_offset
+ 12);
2462 anv_batch_lri(&cmd_buffer
->batch
, GEN7_3DPRIM_BASE_VERTEX
, 0);
2464 anv_batch_emit(&cmd_buffer
->batch
, GEN8_3DPRIMITIVE
,
2465 .IndirectParameterEnable
= true,
2466 .VertexAccessType
= SEQUENTIAL
);
2469 void VKAPI
vkCmdDrawIndexedIndirect(
2470 VkCmdBuffer cmdBuffer
,
2472 VkDeviceSize offset
,
2476 struct anv_cmd_buffer
*cmd_buffer
= (struct anv_cmd_buffer
*) cmdBuffer
;
2477 struct anv_buffer
*buffer
= (struct anv_buffer
*) _buffer
;
2478 struct anv_bo
*bo
= buffer
->bo
;
2479 uint32_t bo_offset
= buffer
->offset
+ offset
;
2481 anv_cmd_buffer_flush_state(cmd_buffer
);
2483 anv_batch_lrm(&cmd_buffer
->batch
, GEN7_3DPRIM_VERTEX_COUNT
, bo
, bo_offset
);
2484 anv_batch_lrm(&cmd_buffer
->batch
, GEN7_3DPRIM_INSTANCE_COUNT
, bo
, bo_offset
+ 4);
2485 anv_batch_lrm(&cmd_buffer
->batch
, GEN7_3DPRIM_START_VERTEX
, bo
, bo_offset
+ 8);
2486 anv_batch_lrm(&cmd_buffer
->batch
, GEN7_3DPRIM_BASE_VERTEX
, bo
, bo_offset
+ 12);
2487 anv_batch_lrm(&cmd_buffer
->batch
, GEN7_3DPRIM_START_INSTANCE
, bo
, bo_offset
+ 16);
2489 anv_batch_emit(&cmd_buffer
->batch
, GEN8_3DPRIMITIVE
,
2490 .IndirectParameterEnable
= true,
2491 .VertexAccessType
= RANDOM
);
2494 void VKAPI
vkCmdDispatch(
2495 VkCmdBuffer cmdBuffer
,
2503 void VKAPI
vkCmdDispatchIndirect(
2504 VkCmdBuffer cmdBuffer
,
2506 VkDeviceSize offset
)
2511 void VKAPI
vkCmdSetEvent(
2512 VkCmdBuffer cmdBuffer
,
2514 VkPipeEvent pipeEvent
)
2519 void VKAPI
vkCmdResetEvent(
2520 VkCmdBuffer cmdBuffer
,
2522 VkPipeEvent pipeEvent
)
2527 void VKAPI
vkCmdWaitEvents(
2528 VkCmdBuffer cmdBuffer
,
2529 VkWaitEvent waitEvent
,
2530 uint32_t eventCount
,
2531 const VkEvent
* pEvents
,
2532 uint32_t memBarrierCount
,
2533 const void** ppMemBarriers
)
2538 void VKAPI
vkCmdPipelineBarrier(
2539 VkCmdBuffer cmdBuffer
,
2540 VkWaitEvent waitEvent
,
2541 uint32_t pipeEventCount
,
2542 const VkPipeEvent
* pPipeEvents
,
2543 uint32_t memBarrierCount
,
2544 const void** ppMemBarriers
)
2550 anv_batch_emit_ps_depth_count(struct anv_batch
*batch
,
2551 struct anv_bo
*bo
, uint32_t offset
)
2553 anv_batch_emit(batch
, GEN8_PIPE_CONTROL
,
2554 .DestinationAddressType
= DAT_PPGTT
,
2555 .PostSyncOperation
= WritePSDepthCount
,
2556 .Address
= { bo
, offset
}); /* FIXME: This is only lower 32 bits */
2559 void VKAPI
vkCmdBeginQuery(
2560 VkCmdBuffer cmdBuffer
,
2561 VkQueryPool queryPool
,
2563 VkQueryControlFlags flags
)
2565 struct anv_cmd_buffer
*cmd_buffer
= (struct anv_cmd_buffer
*) cmdBuffer
;
2566 struct anv_query_pool
*pool
= (struct anv_query_pool
*) queryPool
;
2568 switch (pool
->type
) {
2569 case VK_QUERY_TYPE_OCCLUSION
:
2570 anv_batch_emit_ps_depth_count(&cmd_buffer
->batch
, &pool
->bo
, slot
* 16);
2573 case VK_QUERY_TYPE_PIPELINE_STATISTICS
:
2581 void VKAPI
vkCmdEndQuery(
2582 VkCmdBuffer cmdBuffer
,
2583 VkQueryPool queryPool
,
2586 struct anv_cmd_buffer
*cmd_buffer
= (struct anv_cmd_buffer
*) cmdBuffer
;
2587 struct anv_query_pool
*pool
= (struct anv_query_pool
*) queryPool
;
2589 switch (pool
->type
) {
2590 case VK_QUERY_TYPE_OCCLUSION
:
2591 anv_batch_emit_ps_depth_count(&cmd_buffer
->batch
, &pool
->bo
, slot
* 16 + 8);
2594 case VK_QUERY_TYPE_PIPELINE_STATISTICS
:
2602 void VKAPI
vkCmdResetQueryPool(
2603 VkCmdBuffer cmdBuffer
,
2604 VkQueryPool queryPool
,
2605 uint32_t startQuery
,
2606 uint32_t queryCount
)
2611 #define TIMESTAMP 0x44070
2613 void VKAPI
vkCmdWriteTimestamp(
2614 VkCmdBuffer cmdBuffer
,
2615 VkTimestampType timestampType
,
2616 VkBuffer destBuffer
,
2617 VkDeviceSize destOffset
)
2619 struct anv_cmd_buffer
*cmd_buffer
= (struct anv_cmd_buffer
*) cmdBuffer
;
2620 struct anv_buffer
*buffer
= (struct anv_buffer
*) destBuffer
;
2621 struct anv_bo
*bo
= buffer
->bo
;
2623 switch (timestampType
) {
2624 case VK_TIMESTAMP_TYPE_TOP
:
2625 anv_batch_emit(&cmd_buffer
->batch
, GEN8_MI_STORE_REGISTER_MEM
,
2626 .RegisterAddress
= TIMESTAMP
,
2627 .MemoryAddress
= { bo
, buffer
->offset
+ destOffset
});
2630 case VK_TIMESTAMP_TYPE_BOTTOM
:
2631 anv_batch_emit(&cmd_buffer
->batch
, GEN8_PIPE_CONTROL
,
2632 .DestinationAddressType
= DAT_PPGTT
,
2633 .PostSyncOperation
= WriteTimestamp
,
2634 .Address
= /* FIXME: This is only lower 32 bits */
2635 { bo
, buffer
->offset
+ destOffset
});
2643 void VKAPI
vkCmdCopyQueryPoolResults(
2644 VkCmdBuffer cmdBuffer
,
2645 VkQueryPool queryPool
,
2646 uint32_t startQuery
,
2647 uint32_t queryCount
,
2648 VkBuffer destBuffer
,
2649 VkDeviceSize destOffset
,
2650 VkDeviceSize destStride
,
2651 VkQueryResultFlags flags
)
2656 void VKAPI
vkCmdInitAtomicCounters(
2657 VkCmdBuffer cmdBuffer
,
2658 VkPipelineBindPoint pipelineBindPoint
,
2659 uint32_t startCounter
,
2660 uint32_t counterCount
,
2661 const uint32_t* pData
)
2666 void VKAPI
vkCmdLoadAtomicCounters(
2667 VkCmdBuffer cmdBuffer
,
2668 VkPipelineBindPoint pipelineBindPoint
,
2669 uint32_t startCounter
,
2670 uint32_t counterCount
,
2672 VkDeviceSize srcOffset
)
2677 void VKAPI
vkCmdSaveAtomicCounters(
2678 VkCmdBuffer cmdBuffer
,
2679 VkPipelineBindPoint pipelineBindPoint
,
2680 uint32_t startCounter
,
2681 uint32_t counterCount
,
2682 VkBuffer destBuffer
,
2683 VkDeviceSize destOffset
)
2688 VkResult VKAPI
vkCreateFramebuffer(
2690 const VkFramebufferCreateInfo
* pCreateInfo
,
2691 VkFramebuffer
* pFramebuffer
)
2693 struct anv_device
*device
= (struct anv_device
*) _device
;
2694 struct anv_framebuffer
*framebuffer
;
2696 assert(pCreateInfo
->sType
== VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO
);
2698 framebuffer
= anv_device_alloc(device
, sizeof(*framebuffer
), 8,
2699 VK_SYSTEM_ALLOC_TYPE_API_OBJECT
);
2700 if (framebuffer
== NULL
)
2701 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
2703 framebuffer
->color_attachment_count
= pCreateInfo
->colorAttachmentCount
;
2704 for (uint32_t i
= 0; i
< pCreateInfo
->colorAttachmentCount
; i
++) {
2705 framebuffer
->color_attachments
[i
] =
2706 (struct anv_surface_view
*) pCreateInfo
->pColorAttachments
[i
].view
;
2709 if (pCreateInfo
->pDepthStencilAttachment
) {
2710 framebuffer
->depth_stencil
=
2711 (struct anv_depth_stencil_view
*) pCreateInfo
->pDepthStencilAttachment
->view
;
2714 framebuffer
->sample_count
= pCreateInfo
->sampleCount
;
2715 framebuffer
->width
= pCreateInfo
->width
;
2716 framebuffer
->height
= pCreateInfo
->height
;
2717 framebuffer
->layers
= pCreateInfo
->layers
;
2719 vkCreateDynamicViewportState((VkDevice
) device
,
2720 &(VkDynamicVpStateCreateInfo
) {
2721 .sType
= VK_STRUCTURE_TYPE_DYNAMIC_VP_STATE_CREATE_INFO
,
2722 .viewportAndScissorCount
= 2,
2723 .pViewports
= (VkViewport
[]) {
2727 .width
= pCreateInfo
->width
,
2728 .height
= pCreateInfo
->height
,
2733 .pScissors
= (VkRect
[]) {
2735 { pCreateInfo
->width
, pCreateInfo
->height
} },
2738 &framebuffer
->vp_state
);
2740 *pFramebuffer
= (VkFramebuffer
) framebuffer
;
2745 VkResult VKAPI
vkCreateRenderPass(
2747 const VkRenderPassCreateInfo
* pCreateInfo
,
2748 VkRenderPass
* pRenderPass
)
2750 struct anv_device
*device
= (struct anv_device
*) _device
;
2751 struct anv_render_pass
*pass
;
2754 assert(pCreateInfo
->sType
== VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO
);
2756 size
= sizeof(*pass
) +
2757 pCreateInfo
->layers
* sizeof(struct anv_render_pass_layer
);
2758 pass
= anv_device_alloc(device
, size
, 8,
2759 VK_SYSTEM_ALLOC_TYPE_API_OBJECT
);
2761 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
2763 pass
->render_area
= pCreateInfo
->renderArea
;
2765 pass
->num_layers
= pCreateInfo
->layers
;
2767 pass
->num_clear_layers
= 0;
2768 for (uint32_t i
= 0; i
< pCreateInfo
->layers
; i
++) {
2769 pass
->layers
[i
].color_load_op
= pCreateInfo
->pColorLoadOps
[i
];
2770 pass
->layers
[i
].clear_color
= pCreateInfo
->pColorLoadClearValues
[i
];
2771 if (pass
->layers
[i
].color_load_op
== VK_ATTACHMENT_LOAD_OP_CLEAR
)
2772 pass
->num_clear_layers
++;
2775 *pRenderPass
= (VkRenderPass
) pass
;
2781 anv_cmd_buffer_fill_render_targets(struct anv_cmd_buffer
*cmd_buffer
)
2783 struct anv_framebuffer
*framebuffer
= cmd_buffer
->framebuffer
;
2784 struct anv_bindings
*bindings
= cmd_buffer
->bindings
;
2786 for (uint32_t i
= 0; i
< framebuffer
->color_attachment_count
; i
++) {
2787 struct anv_surface_view
*view
= framebuffer
->color_attachments
[i
];
2789 bindings
->descriptors
[VK_SHADER_STAGE_FRAGMENT
].surfaces
[i
] = view
->surface_state
.offset
;
2790 bindings
->descriptors
[VK_SHADER_STAGE_FRAGMENT
].relocs
[i
].bo
= view
->bo
;
2791 bindings
->descriptors
[VK_SHADER_STAGE_FRAGMENT
].relocs
[i
].offset
= view
->offset
;
2793 cmd_buffer
->dirty
|= ANV_CMD_BUFFER_DESCRIPTOR_SET_DIRTY
;
2796 void VKAPI
vkCmdBeginRenderPass(
2797 VkCmdBuffer cmdBuffer
,
2798 const VkRenderPassBegin
* pRenderPassBegin
)
2800 struct anv_cmd_buffer
*cmd_buffer
= (struct anv_cmd_buffer
*) cmdBuffer
;
2801 struct anv_render_pass
*pass
= (struct anv_render_pass
*) pRenderPassBegin
->renderPass
;
2802 struct anv_framebuffer
*framebuffer
=
2803 (struct anv_framebuffer
*) pRenderPassBegin
->framebuffer
;
2805 cmd_buffer
->framebuffer
= framebuffer
;
2807 anv_batch_emit(&cmd_buffer
->batch
, GEN8_3DSTATE_DRAWING_RECTANGLE
,
2808 .ClippedDrawingRectangleYMin
= pass
->render_area
.offset
.y
,
2809 .ClippedDrawingRectangleXMin
= pass
->render_area
.offset
.x
,
2810 .ClippedDrawingRectangleYMax
=
2811 pass
->render_area
.offset
.y
+ pass
->render_area
.extent
.height
- 1,
2812 .ClippedDrawingRectangleXMax
=
2813 pass
->render_area
.offset
.x
+ pass
->render_area
.extent
.width
- 1,
2814 .DrawingRectangleOriginY
= 0,
2815 .DrawingRectangleOriginX
= 0);
2817 anv_cmd_buffer_fill_render_targets(cmd_buffer
);
2819 anv_cmd_buffer_clear(cmd_buffer
, pass
);
2822 void VKAPI
vkCmdEndRenderPass(
2823 VkCmdBuffer cmdBuffer
,
2824 VkRenderPass renderPass
)
2826 /* Emit a flushing pipe control at the end of a pass. This is kind of a
2827 * hack but it ensures that render targets always actually get written.
2828 * Eventually, we should do flushing based on image format transitions
2829 * or something of that nature.
2831 struct anv_cmd_buffer
*cmd_buffer
= (struct anv_cmd_buffer
*)cmdBuffer
;
2832 anv_batch_emit(&cmd_buffer
->batch
, GEN8_PIPE_CONTROL
,
2833 .PostSyncOperation
= NoWrite
,
2834 .RenderTargetCacheFlushEnable
= true,
2835 .InstructionCacheInvalidateEnable
= true,
2836 .DepthCacheFlushEnable
= true,
2837 .VFCacheInvalidationEnable
= true,
2838 .TextureCacheInvalidationEnable
= true,
2839 .CommandStreamerStallEnable
= true);