2 * Copyright © 2015 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
33 anv_env_get_int(const char *name
)
35 const char *val
= getenv(name
);
40 return strtol(val
, NULL
, 0);
44 fill_physical_device(struct anv_physical_device
*device
,
45 struct anv_instance
*instance
,
50 fd
= open("/dev/dri/renderD128", O_RDWR
| O_CLOEXEC
);
52 return vk_error(VK_ERROR_UNAVAILABLE
);
54 device
->instance
= instance
;
57 device
->chipset_id
= anv_env_get_int("INTEL_DEVID_OVERRIDE");
58 device
->no_hw
= false;
59 if (device
->chipset_id
) {
60 /* INTEL_DEVID_OVERRIDE implies INTEL_NO_HW. */
63 device
->chipset_id
= anv_gem_get_param(fd
, I915_PARAM_CHIPSET_ID
);
65 if (!device
->chipset_id
)
68 device
->name
= brw_get_device_name(device
->chipset_id
);
69 device
->info
= brw_get_device_info(device
->chipset_id
, -1);
73 if (!anv_gem_get_param(fd
, I915_PARAM_HAS_WAIT_TIMEOUT
))
76 if (!anv_gem_get_param(fd
, I915_PARAM_HAS_EXECBUF2
))
79 if (!anv_gem_get_param(fd
, I915_PARAM_HAS_LLC
))
82 if (!anv_gem_get_param(fd
, I915_PARAM_HAS_EXEC_CONSTANTS
))
92 return vk_error(VK_ERROR_UNAVAILABLE
);
95 static void *default_alloc(
99 VkSystemAllocType allocType
)
104 static void default_free(
111 static const VkAllocCallbacks default_alloc_callbacks
= {
113 .pfnAlloc
= default_alloc
,
114 .pfnFree
= default_free
117 VkResult
anv_CreateInstance(
118 const VkInstanceCreateInfo
* pCreateInfo
,
119 VkInstance
* pInstance
)
121 struct anv_instance
*instance
;
122 const VkAllocCallbacks
*alloc_callbacks
= &default_alloc_callbacks
;
123 void *user_data
= NULL
;
126 assert(pCreateInfo
->sType
== VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO
);
128 if (pCreateInfo
->pAllocCb
) {
129 alloc_callbacks
= pCreateInfo
->pAllocCb
;
130 user_data
= pCreateInfo
->pAllocCb
->pUserData
;
132 instance
= alloc_callbacks
->pfnAlloc(user_data
, sizeof(*instance
), 8,
133 VK_SYSTEM_ALLOC_TYPE_API_OBJECT
);
135 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
137 instance
->pAllocUserData
= alloc_callbacks
->pUserData
;
138 instance
->pfnAlloc
= alloc_callbacks
->pfnAlloc
;
139 instance
->pfnFree
= alloc_callbacks
->pfnFree
;
140 instance
->apiVersion
= pCreateInfo
->pAppInfo
->apiVersion
;
142 instance
->physicalDeviceCount
= 0;
143 result
= fill_physical_device(&instance
->physicalDevice
,
144 instance
, "/dev/dri/renderD128");
145 if (result
== VK_SUCCESS
)
146 instance
->physicalDeviceCount
++;
148 *pInstance
= (VkInstance
) instance
;
153 VkResult
anv_DestroyInstance(
154 VkInstance _instance
)
156 struct anv_instance
*instance
= (struct anv_instance
*) _instance
;
158 instance
->pfnFree(instance
->pAllocUserData
, instance
);
163 VkResult
anv_EnumeratePhysicalDevices(
164 VkInstance _instance
,
165 uint32_t* pPhysicalDeviceCount
,
166 VkPhysicalDevice
* pPhysicalDevices
)
168 struct anv_instance
*instance
= (struct anv_instance
*) _instance
;
170 if (*pPhysicalDeviceCount
>= 1)
171 pPhysicalDevices
[0] = (VkPhysicalDevice
) &instance
->physicalDevice
;
172 *pPhysicalDeviceCount
= instance
->physicalDeviceCount
;
177 VkResult
anv_GetPhysicalDeviceInfo(
178 VkPhysicalDevice physicalDevice
,
179 VkPhysicalDeviceInfoType infoType
,
183 struct anv_physical_device
*device
= (struct anv_physical_device
*) physicalDevice
;
184 VkPhysicalDeviceProperties
*properties
;
185 VkPhysicalDevicePerformance
*performance
;
186 VkPhysicalDeviceQueueProperties
*queue_properties
;
187 VkPhysicalDeviceMemoryProperties
*memory_properties
;
188 uint64_t ns_per_tick
= 80;
191 case VK_PHYSICAL_DEVICE_INFO_TYPE_PROPERTIES
:
194 *pDataSize
= sizeof(*properties
);
198 properties
->apiVersion
= 1;
199 properties
->driverVersion
= 1;
200 properties
->vendorId
= 0x8086;
201 properties
->deviceId
= device
->chipset_id
;
202 properties
->deviceType
= VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU
;
203 strcpy(properties
->deviceName
, device
->name
);
204 properties
->maxInlineMemoryUpdateSize
= 0;
205 properties
->maxBoundDescriptorSets
= MAX_SETS
;
206 properties
->maxThreadGroupSize
= 512;
207 properties
->timestampFrequency
= 1000 * 1000 * 1000 / ns_per_tick
;
208 properties
->multiColorAttachmentClears
= true;
209 properties
->maxDescriptorSets
= 8;
210 properties
->maxViewports
= 16;
211 properties
->maxColorAttachments
= 8;
214 case VK_PHYSICAL_DEVICE_INFO_TYPE_PERFORMANCE
:
217 *pDataSize
= sizeof(*performance
);
221 performance
->maxDeviceClock
= 1.0;
222 performance
->aluPerClock
= 1.0;
223 performance
->texPerClock
= 1.0;
224 performance
->primsPerClock
= 1.0;
225 performance
->pixelsPerClock
= 1.0;
228 case VK_PHYSICAL_DEVICE_INFO_TYPE_QUEUE_PROPERTIES
:
229 queue_properties
= pData
;
231 *pDataSize
= sizeof(*queue_properties
);
235 queue_properties
->queueFlags
= 0;
236 queue_properties
->queueCount
= 1;
237 queue_properties
->maxAtomicCounters
= 0;
238 queue_properties
->supportsTimestamps
= true;
239 queue_properties
->maxMemReferences
= 256;
242 case VK_PHYSICAL_DEVICE_INFO_TYPE_MEMORY_PROPERTIES
:
243 memory_properties
= pData
;
245 *pDataSize
= sizeof(*memory_properties
);
249 memory_properties
->supportsMigration
= false;
250 memory_properties
->supportsPinning
= false;
254 return VK_UNSUPPORTED
;
259 void * vkGetProcAddr(
260 VkPhysicalDevice physicalDevice
,
263 return anv_lookup_entrypoint(pName
);
267 parse_debug_flags(struct anv_device
*device
)
269 const char *debug
, *p
, *end
;
271 debug
= getenv("INTEL_DEBUG");
272 device
->dump_aub
= false;
274 for (p
= debug
; *p
; p
= end
+ 1) {
275 end
= strchrnul(p
, ',');
276 if (end
- p
== 3 && memcmp(p
, "aub", 3) == 0)
277 device
->dump_aub
= true;
278 if (end
- p
== 5 && memcmp(p
, "no_hw", 5) == 0)
279 device
->no_hw
= true;
286 VkResult
anv_CreateDevice(
287 VkPhysicalDevice _physicalDevice
,
288 const VkDeviceCreateInfo
* pCreateInfo
,
291 struct anv_physical_device
*physicalDevice
=
292 (struct anv_physical_device
*) _physicalDevice
;
293 struct anv_instance
*instance
= physicalDevice
->instance
;
294 struct anv_device
*device
;
296 assert(pCreateInfo
->sType
== VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO
);
298 device
= instance
->pfnAlloc(instance
->pAllocUserData
,
300 VK_SYSTEM_ALLOC_TYPE_API_OBJECT
);
302 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
304 device
->no_hw
= physicalDevice
->no_hw
;
305 parse_debug_flags(device
);
307 device
->instance
= physicalDevice
->instance
;
308 device
->fd
= open("/dev/dri/renderD128", O_RDWR
| O_CLOEXEC
);
309 if (device
->fd
== -1)
312 device
->context_id
= anv_gem_create_context(device
);
313 if (device
->context_id
== -1)
316 anv_block_pool_init(&device
->dynamic_state_block_pool
, device
, 2048);
318 anv_state_pool_init(&device
->dynamic_state_pool
,
319 &device
->dynamic_state_block_pool
);
321 anv_block_pool_init(&device
->instruction_block_pool
, device
, 2048);
322 anv_block_pool_init(&device
->surface_state_block_pool
, device
, 2048);
324 anv_state_pool_init(&device
->surface_state_pool
,
325 &device
->surface_state_block_pool
);
327 device
->compiler
= anv_compiler_create(device
->fd
);
328 device
->aub_writer
= NULL
;
330 device
->info
= *physicalDevice
->info
;
332 pthread_mutex_init(&device
->mutex
, NULL
);
334 anv_device_init_meta(device
);
336 *pDevice
= (VkDevice
) device
;
343 anv_device_free(device
, device
);
345 return vk_error(VK_ERROR_UNAVAILABLE
);
348 VkResult
anv_DestroyDevice(
351 struct anv_device
*device
= (struct anv_device
*) _device
;
353 anv_compiler_destroy(device
->compiler
);
355 anv_block_pool_finish(&device
->dynamic_state_block_pool
);
356 anv_block_pool_finish(&device
->instruction_block_pool
);
357 anv_block_pool_finish(&device
->surface_state_block_pool
);
361 if (device
->aub_writer
)
362 anv_aub_writer_destroy(device
->aub_writer
);
364 anv_device_free(device
, device
);
369 VkResult
anv_GetGlobalExtensionInfo(
370 VkExtensionInfoType infoType
,
371 uint32_t extensionIndex
,
378 case VK_EXTENSION_INFO_TYPE_COUNT
:
380 assert(*pDataSize
== 4);
384 case VK_EXTENSION_INFO_TYPE_PROPERTIES
:
385 return vk_error(VK_ERROR_INVALID_EXTENSION
);
388 return VK_UNSUPPORTED
;
392 VkResult
anv_GetPhysicalDeviceExtensionInfo(
393 VkPhysicalDevice physicalDevice
,
394 VkExtensionInfoType infoType
,
395 uint32_t extensionIndex
,
402 case VK_EXTENSION_INFO_TYPE_COUNT
:
411 case VK_EXTENSION_INFO_TYPE_PROPERTIES
:
412 return vk_error(VK_ERROR_INVALID_EXTENSION
);
415 return VK_UNSUPPORTED
;
419 VkResult
anv_EnumerateLayers(
420 VkPhysicalDevice physicalDevice
,
421 size_t maxStringSize
,
423 char* const* pOutLayers
,
431 VkResult
anv_GetDeviceQueue(
433 uint32_t queueNodeIndex
,
437 struct anv_device
*device
= (struct anv_device
*) _device
;
438 struct anv_queue
*queue
;
440 /* FIXME: Should allocate these at device create time. */
442 queue
= anv_device_alloc(device
, sizeof(*queue
), 8,
443 VK_SYSTEM_ALLOC_TYPE_API_OBJECT
);
445 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
447 queue
->device
= device
;
448 queue
->pool
= &device
->surface_state_pool
;
450 queue
->completed_serial
= anv_state_pool_alloc(queue
->pool
, 4, 4);
451 *(uint32_t *)queue
->completed_serial
.map
= 0;
452 queue
->next_serial
= 1;
454 *pQueue
= (VkQueue
) queue
;
459 static const uint32_t BATCH_SIZE
= 8192;
462 anv_batch_init(struct anv_batch
*batch
, struct anv_device
*device
)
466 result
= anv_bo_init_new(&batch
->bo
, device
, BATCH_SIZE
);
467 if (result
!= VK_SUCCESS
)
471 anv_gem_mmap(device
, batch
->bo
.gem_handle
, 0, BATCH_SIZE
);
472 if (batch
->bo
.map
== NULL
) {
473 anv_gem_close(device
, batch
->bo
.gem_handle
);
474 return vk_error(VK_ERROR_MEMORY_MAP_FAILED
);
477 batch
->cmd_relocs
.num_relocs
= 0;
478 batch
->surf_relocs
.num_relocs
= 0;
479 batch
->next
= batch
->bo
.map
;
485 anv_batch_finish(struct anv_batch
*batch
, struct anv_device
*device
)
487 anv_gem_munmap(batch
->bo
.map
, BATCH_SIZE
);
488 anv_gem_close(device
, batch
->bo
.gem_handle
);
492 anv_batch_reset(struct anv_batch
*batch
)
494 batch
->next
= batch
->bo
.map
;
495 batch
->cmd_relocs
.num_relocs
= 0;
496 batch
->surf_relocs
.num_relocs
= 0;
500 anv_batch_emit_dwords(struct anv_batch
*batch
, int num_dwords
)
502 void *p
= batch
->next
;
504 batch
->next
+= num_dwords
* 4;
510 anv_reloc_list_append(struct anv_reloc_list
*list
,
511 struct anv_reloc_list
*other
, uint32_t offset
)
515 count
= list
->num_relocs
;
516 memcpy(&list
->relocs
[count
], &other
->relocs
[0],
517 other
->num_relocs
* sizeof(other
->relocs
[0]));
518 memcpy(&list
->reloc_bos
[count
], &other
->reloc_bos
[0],
519 other
->num_relocs
* sizeof(other
->reloc_bos
[0]));
520 for (i
= 0; i
< other
->num_relocs
; i
++)
521 list
->relocs
[i
+ count
].offset
+= offset
;
523 count
+= other
->num_relocs
;
527 anv_reloc_list_add(struct anv_reloc_list
*list
,
529 struct anv_bo
*target_bo
, uint32_t delta
)
531 struct drm_i915_gem_relocation_entry
*entry
;
534 assert(list
->num_relocs
< ANV_BATCH_MAX_RELOCS
);
536 /* XXX: Can we use I915_EXEC_HANDLE_LUT? */
537 index
= list
->num_relocs
++;
538 list
->reloc_bos
[index
] = target_bo
;
539 entry
= &list
->relocs
[index
];
540 entry
->target_handle
= target_bo
->gem_handle
;
541 entry
->delta
= delta
;
542 entry
->offset
= offset
;
543 entry
->presumed_offset
= target_bo
->offset
;
544 entry
->read_domains
= 0;
545 entry
->write_domain
= 0;
547 return target_bo
->offset
+ delta
;
551 anv_batch_emit_batch(struct anv_batch
*batch
, struct anv_batch
*other
)
553 uint32_t size
, offset
;
555 size
= other
->next
- other
->bo
.map
;
556 memcpy(batch
->next
, other
->bo
.map
, size
);
558 offset
= batch
->next
- batch
->bo
.map
;
559 anv_reloc_list_append(&batch
->cmd_relocs
, &other
->cmd_relocs
, offset
);
560 anv_reloc_list_append(&batch
->surf_relocs
, &other
->surf_relocs
, offset
);
566 anv_batch_emit_reloc(struct anv_batch
*batch
,
567 void *location
, struct anv_bo
*bo
, uint32_t delta
)
569 return anv_reloc_list_add(&batch
->cmd_relocs
,
570 location
- batch
->bo
.map
, bo
, delta
);
573 VkResult
anv_QueueSubmit(
575 uint32_t cmdBufferCount
,
576 const VkCmdBuffer
* pCmdBuffers
,
579 struct anv_queue
*queue
= (struct anv_queue
*) _queue
;
580 struct anv_device
*device
= queue
->device
;
581 struct anv_fence
*fence
= (struct anv_fence
*) _fence
;
584 for (uint32_t i
= 0; i
< cmdBufferCount
; i
++) {
585 struct anv_cmd_buffer
*cmd_buffer
=
586 (struct anv_cmd_buffer
*) pCmdBuffers
[i
];
588 if (device
->dump_aub
)
589 anv_cmd_buffer_dump(cmd_buffer
);
591 if (!device
->no_hw
) {
592 ret
= anv_gem_execbuffer(device
, &cmd_buffer
->execbuf
);
594 return vk_error(VK_ERROR_UNKNOWN
);
597 ret
= anv_gem_execbuffer(device
, &fence
->execbuf
);
599 return vk_error(VK_ERROR_UNKNOWN
);
602 for (uint32_t i
= 0; i
< cmd_buffer
->bo_count
; i
++)
603 cmd_buffer
->exec2_bos
[i
]->offset
= cmd_buffer
->exec2_objects
[i
].offset
;
605 *(uint32_t *)queue
->completed_serial
.map
= cmd_buffer
->serial
;
612 VkResult
anv_QueueAddMemReferences(
615 const VkDeviceMemory
* pMems
)
620 VkResult
anv_QueueRemoveMemReferences(
623 const VkDeviceMemory
* pMems
)
628 VkResult
anv_QueueWaitIdle(
631 struct anv_queue
*queue
= (struct anv_queue
*) _queue
;
633 return vkDeviceWaitIdle((VkDevice
) queue
->device
);
636 VkResult
anv_DeviceWaitIdle(
639 struct anv_device
*device
= (struct anv_device
*) _device
;
640 struct anv_state state
;
641 struct anv_batch batch
;
642 struct drm_i915_gem_execbuffer2 execbuf
;
643 struct drm_i915_gem_exec_object2 exec2_objects
[1];
644 struct anv_bo
*bo
= NULL
;
649 state
= anv_state_pool_alloc(&device
->dynamic_state_pool
, 32, 32);
650 bo
= &device
->dynamic_state_pool
.block_pool
->bo
;
651 batch
.next
= state
.map
;
652 anv_batch_emit(&batch
, GEN8_MI_BATCH_BUFFER_END
);
653 anv_batch_emit(&batch
, GEN8_MI_NOOP
);
655 exec2_objects
[0].handle
= bo
->gem_handle
;
656 exec2_objects
[0].relocation_count
= 0;
657 exec2_objects
[0].relocs_ptr
= 0;
658 exec2_objects
[0].alignment
= 0;
659 exec2_objects
[0].offset
= bo
->offset
;
660 exec2_objects
[0].flags
= 0;
661 exec2_objects
[0].rsvd1
= 0;
662 exec2_objects
[0].rsvd2
= 0;
664 execbuf
.buffers_ptr
= (uintptr_t) exec2_objects
;
665 execbuf
.buffer_count
= 1;
666 execbuf
.batch_start_offset
= state
.offset
;
667 execbuf
.batch_len
= batch
.next
- state
.map
;
668 execbuf
.cliprects_ptr
= 0;
669 execbuf
.num_cliprects
= 0;
674 I915_EXEC_HANDLE_LUT
| I915_EXEC_NO_RELOC
| I915_EXEC_RENDER
;
675 execbuf
.rsvd1
= device
->context_id
;
678 if (!device
->no_hw
) {
679 ret
= anv_gem_execbuffer(device
, &execbuf
);
681 result
= vk_error(VK_ERROR_UNKNOWN
);
686 ret
= anv_gem_wait(device
, bo
->gem_handle
, &timeout
);
688 result
= vk_error(VK_ERROR_UNKNOWN
);
693 anv_state_pool_free(&device
->dynamic_state_pool
, state
);
698 anv_state_pool_free(&device
->dynamic_state_pool
, state
);
704 anv_device_alloc(struct anv_device
* device
,
707 VkSystemAllocType allocType
)
709 return device
->instance
->pfnAlloc(device
->instance
->pAllocUserData
,
716 anv_device_free(struct anv_device
* device
,
719 return device
->instance
->pfnFree(device
->instance
->pAllocUserData
,
724 anv_bo_init_new(struct anv_bo
*bo
, struct anv_device
*device
, uint64_t size
)
726 bo
->gem_handle
= anv_gem_create(device
, size
);
728 return vk_error(VK_ERROR_OUT_OF_DEVICE_MEMORY
);
738 VkResult
anv_AllocMemory(
740 const VkMemoryAllocInfo
* pAllocInfo
,
741 VkDeviceMemory
* pMem
)
743 struct anv_device
*device
= (struct anv_device
*) _device
;
744 struct anv_device_memory
*mem
;
747 assert(pAllocInfo
->sType
== VK_STRUCTURE_TYPE_MEMORY_ALLOC_INFO
);
749 mem
= anv_device_alloc(device
, sizeof(*mem
), 8,
750 VK_SYSTEM_ALLOC_TYPE_API_OBJECT
);
752 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
754 result
= anv_bo_init_new(&mem
->bo
, device
, pAllocInfo
->allocationSize
);
755 if (result
!= VK_SUCCESS
)
758 *pMem
= (VkDeviceMemory
) mem
;
763 anv_device_free(device
, mem
);
768 VkResult
anv_FreeMemory(
772 struct anv_device
*device
= (struct anv_device
*) _device
;
773 struct anv_device_memory
*mem
= (struct anv_device_memory
*) _mem
;
776 anv_gem_munmap(mem
->bo
.map
, mem
->bo
.size
);
778 if (mem
->bo
.gem_handle
!= 0)
779 anv_gem_close(device
, mem
->bo
.gem_handle
);
781 anv_device_free(device
, mem
);
786 VkResult
anv_SetMemoryPriority(
789 VkMemoryPriority priority
)
794 VkResult
anv_MapMemory(
799 VkMemoryMapFlags flags
,
802 struct anv_device
*device
= (struct anv_device
*) _device
;
803 struct anv_device_memory
*mem
= (struct anv_device_memory
*) _mem
;
805 /* FIXME: Is this supposed to be thread safe? Since vkUnmapMemory() only
806 * takes a VkDeviceMemory pointer, it seems like only one map of the memory
807 * at a time is valid. We could just mmap up front and return an offset
808 * pointer here, but that may exhaust virtual memory on 32 bit
811 mem
->map
= anv_gem_mmap(device
, mem
->bo
.gem_handle
, offset
, size
);
812 mem
->map_size
= size
;
819 VkResult
anv_UnmapMemory(
823 struct anv_device_memory
*mem
= (struct anv_device_memory
*) _mem
;
825 anv_gem_munmap(mem
->map
, mem
->map_size
);
830 VkResult
anv_FlushMappedMemory(
836 /* clflush here for !llc platforms */
841 VkResult
anv_PinSystemMemory(
845 VkDeviceMemory
* pMem
)
850 VkResult
anv_GetMultiDeviceCompatibility(
851 VkPhysicalDevice physicalDevice0
,
852 VkPhysicalDevice physicalDevice1
,
853 VkPhysicalDeviceCompatibilityInfo
* pInfo
)
855 return VK_UNSUPPORTED
;
858 VkResult
anv_OpenSharedMemory(
860 const VkMemoryOpenInfo
* pOpenInfo
,
861 VkDeviceMemory
* pMem
)
863 return VK_UNSUPPORTED
;
866 VkResult
anv_OpenSharedSemaphore(
868 const VkSemaphoreOpenInfo
* pOpenInfo
,
869 VkSemaphore
* pSemaphore
)
871 return VK_UNSUPPORTED
;
874 VkResult
anv_OpenPeerMemory(
876 const VkPeerMemoryOpenInfo
* pOpenInfo
,
877 VkDeviceMemory
* pMem
)
879 return VK_UNSUPPORTED
;
882 VkResult
anv_OpenPeerImage(
884 const VkPeerImageOpenInfo
* pOpenInfo
,
886 VkDeviceMemory
* pMem
)
888 return VK_UNSUPPORTED
;
892 anv_instance_destructor(struct anv_device
* device
,
895 return vkDestroyInstance(object
);
899 anv_noop_destructor(struct anv_device
* device
,
906 anv_device_destructor(struct anv_device
* device
,
909 return vkDestroyDevice(object
);
913 anv_cmd_buffer_destructor(struct anv_device
* device
,
916 struct anv_cmd_buffer
*cmd_buffer
= (struct anv_cmd_buffer
*) object
;
918 anv_state_stream_finish(&cmd_buffer
->surface_state_stream
);
919 anv_state_stream_finish(&cmd_buffer
->dynamic_state_stream
);
920 anv_batch_finish(&cmd_buffer
->batch
, device
);
921 anv_device_free(device
, cmd_buffer
->exec2_objects
);
922 anv_device_free(device
, cmd_buffer
->exec2_bos
);
923 anv_device_free(device
, cmd_buffer
);
929 anv_pipeline_destructor(struct anv_device
* device
,
932 struct anv_pipeline
*pipeline
= (struct anv_pipeline
*) object
;
934 return anv_pipeline_destroy(pipeline
);
938 anv_free_destructor(struct anv_device
* device
,
941 anv_device_free(device
, (void *) object
);
947 anv_fence_destructor(struct anv_device
* device
,
950 struct anv_fence
*fence
= (struct anv_fence
*) object
;
952 anv_gem_munmap(fence
->bo
.map
, fence
->bo
.size
);
953 anv_gem_close(device
, fence
->bo
.gem_handle
);
954 anv_device_free(device
, fence
);
960 anv_query_pool_destructor(struct anv_device
* device
,
963 struct anv_query_pool
*pool
= (struct anv_query_pool
*) object
;
965 anv_gem_munmap(pool
->bo
.map
, pool
->bo
.size
);
966 anv_gem_close(device
, pool
->bo
.gem_handle
);
967 anv_device_free(device
, pool
);
972 static VkResult (*anv_object_destructors
[])(struct anv_device
*device
,
974 [VK_OBJECT_TYPE_INSTANCE
] = anv_instance_destructor
,
975 [VK_OBJECT_TYPE_PHYSICAL_DEVICE
] = anv_noop_destructor
,
976 [VK_OBJECT_TYPE_DEVICE
] = anv_device_destructor
,
977 [VK_OBJECT_TYPE_QUEUE
] = anv_noop_destructor
,
978 [VK_OBJECT_TYPE_COMMAND_BUFFER
] = anv_cmd_buffer_destructor
,
979 [VK_OBJECT_TYPE_PIPELINE
] = anv_pipeline_destructor
,
980 [VK_OBJECT_TYPE_SHADER
] = anv_free_destructor
,
981 [VK_OBJECT_TYPE_BUFFER
] = anv_free_destructor
,
982 [VK_OBJECT_TYPE_IMAGE
] = anv_free_destructor
,
983 [VK_OBJECT_TYPE_RENDER_PASS
] = anv_free_destructor
,
984 [VK_OBJECT_TYPE_FENCE
] = anv_fence_destructor
,
985 [VK_OBJECT_TYPE_QUERY_POOL
] = anv_query_pool_destructor
988 VkResult
anv_DestroyObject(
990 VkObjectType objType
,
993 struct anv_device
*device
= (struct anv_device
*) _device
;
995 assert(objType
< ARRAY_SIZE(anv_object_destructors
) &&
996 anv_object_destructors
[objType
] != NULL
);
998 return anv_object_destructors
[objType
](device
, object
);
1002 fill_memory_requirements(
1003 VkObjectType objType
,
1005 VkMemoryRequirements
* memory_requirements
)
1007 struct anv_buffer
*buffer
;
1008 struct anv_image
*image
;
1010 memory_requirements
->memPropsAllowed
=
1011 VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT
|
1012 VK_MEMORY_PROPERTY_HOST_DEVICE_COHERENT_BIT
|
1013 /* VK_MEMORY_PROPERTY_HOST_UNCACHED_BIT | */
1014 VK_MEMORY_PROPERTY_HOST_WRITE_COMBINED_BIT
|
1015 VK_MEMORY_PROPERTY_PREFER_HOST_LOCAL
|
1016 VK_MEMORY_PROPERTY_SHAREABLE_BIT
;
1018 memory_requirements
->memPropsRequired
= 0;
1021 case VK_OBJECT_TYPE_BUFFER
:
1022 buffer
= (struct anv_buffer
*) object
;
1023 memory_requirements
->size
= buffer
->size
;
1024 memory_requirements
->alignment
= 16;
1026 case VK_OBJECT_TYPE_IMAGE
:
1027 image
= (struct anv_image
*) object
;
1028 memory_requirements
->size
= image
->size
;
1029 memory_requirements
->alignment
= image
->alignment
;
1032 memory_requirements
->size
= 0;
1038 get_allocation_count(VkObjectType objType
)
1041 case VK_OBJECT_TYPE_BUFFER
:
1042 case VK_OBJECT_TYPE_IMAGE
:
1049 VkResult
anv_GetObjectInfo(
1051 VkObjectType objType
,
1053 VkObjectInfoType infoType
,
1057 VkMemoryRequirements memory_requirements
;
1061 case VK_OBJECT_INFO_TYPE_MEMORY_REQUIREMENTS
:
1062 *pDataSize
= sizeof(memory_requirements
);
1066 fill_memory_requirements(objType
, object
, pData
);
1069 case VK_OBJECT_INFO_TYPE_MEMORY_ALLOCATION_COUNT
:
1070 *pDataSize
= sizeof(count
);
1075 *count
= get_allocation_count(objType
);
1079 return VK_UNSUPPORTED
;
1084 VkResult
anv_QueueBindObjectMemory(
1086 VkObjectType objType
,
1088 uint32_t allocationIdx
,
1089 VkDeviceMemory _mem
,
1090 VkDeviceSize memOffset
)
1092 struct anv_buffer
*buffer
;
1093 struct anv_image
*image
;
1094 struct anv_device_memory
*mem
= (struct anv_device_memory
*) _mem
;
1097 case VK_OBJECT_TYPE_BUFFER
:
1098 buffer
= (struct anv_buffer
*) object
;
1099 buffer
->bo
= &mem
->bo
;
1100 buffer
->offset
= memOffset
;
1102 case VK_OBJECT_TYPE_IMAGE
:
1103 image
= (struct anv_image
*) object
;
1104 image
->bo
= &mem
->bo
;
1105 image
->offset
= memOffset
;
1114 VkResult
anv_QueueBindObjectMemoryRange(
1116 VkObjectType objType
,
1118 uint32_t allocationIdx
,
1119 VkDeviceSize rangeOffset
,
1120 VkDeviceSize rangeSize
,
1122 VkDeviceSize memOffset
)
1124 stub_return(VK_UNSUPPORTED
);
1127 VkResult
anv_QueueBindImageMemoryRange(
1130 uint32_t allocationIdx
,
1131 const VkImageMemoryBindInfo
* pBindInfo
,
1133 VkDeviceSize memOffset
)
1135 stub_return(VK_UNSUPPORTED
);
1138 VkResult
anv_CreateFence(
1140 const VkFenceCreateInfo
* pCreateInfo
,
1143 struct anv_device
*device
= (struct anv_device
*) _device
;
1144 struct anv_fence
*fence
;
1145 struct anv_batch batch
;
1148 const uint32_t fence_size
= 128;
1150 assert(pCreateInfo
->sType
== VK_STRUCTURE_TYPE_FENCE_CREATE_INFO
);
1152 fence
= anv_device_alloc(device
, sizeof(*fence
), 8,
1153 VK_SYSTEM_ALLOC_TYPE_API_OBJECT
);
1155 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
1157 result
= anv_bo_init_new(&fence
->bo
, device
, fence_size
);
1158 if (result
!= VK_SUCCESS
)
1162 anv_gem_mmap(device
, fence
->bo
.gem_handle
, 0, fence
->bo
.size
);
1163 batch
.next
= fence
->bo
.map
;
1164 anv_batch_emit(&batch
, GEN8_MI_BATCH_BUFFER_END
);
1165 anv_batch_emit(&batch
, GEN8_MI_NOOP
);
1167 fence
->exec2_objects
[0].handle
= fence
->bo
.gem_handle
;
1168 fence
->exec2_objects
[0].relocation_count
= 0;
1169 fence
->exec2_objects
[0].relocs_ptr
= 0;
1170 fence
->exec2_objects
[0].alignment
= 0;
1171 fence
->exec2_objects
[0].offset
= fence
->bo
.offset
;
1172 fence
->exec2_objects
[0].flags
= 0;
1173 fence
->exec2_objects
[0].rsvd1
= 0;
1174 fence
->exec2_objects
[0].rsvd2
= 0;
1176 fence
->execbuf
.buffers_ptr
= (uintptr_t) fence
->exec2_objects
;
1177 fence
->execbuf
.buffer_count
= 1;
1178 fence
->execbuf
.batch_start_offset
= 0;
1179 fence
->execbuf
.batch_len
= batch
.next
- fence
->bo
.map
;
1180 fence
->execbuf
.cliprects_ptr
= 0;
1181 fence
->execbuf
.num_cliprects
= 0;
1182 fence
->execbuf
.DR1
= 0;
1183 fence
->execbuf
.DR4
= 0;
1185 fence
->execbuf
.flags
=
1186 I915_EXEC_HANDLE_LUT
| I915_EXEC_NO_RELOC
| I915_EXEC_RENDER
;
1187 fence
->execbuf
.rsvd1
= device
->context_id
;
1188 fence
->execbuf
.rsvd2
= 0;
1190 *pFence
= (VkQueryPool
) fence
;
1195 anv_device_free(device
, fence
);
1200 VkResult
anv_ResetFences(
1202 uint32_t fenceCount
,
1205 struct anv_fence
**fences
= (struct anv_fence
**) pFences
;
1207 for (uint32_t i
; i
< fenceCount
; i
++)
1208 fences
[i
]->ready
= false;
1213 VkResult
anv_GetFenceStatus(
1217 struct anv_device
*device
= (struct anv_device
*) _device
;
1218 struct anv_fence
*fence
= (struct anv_fence
*) _fence
;
1225 ret
= anv_gem_wait(device
, fence
->bo
.gem_handle
, &t
);
1227 fence
->ready
= true;
1231 return VK_NOT_READY
;
1234 VkResult
anv_WaitForFences(
1236 uint32_t fenceCount
,
1237 const VkFence
* pFences
,
1241 struct anv_device
*device
= (struct anv_device
*) _device
;
1242 struct anv_fence
**fences
= (struct anv_fence
**) pFences
;
1243 int64_t t
= timeout
;
1246 /* FIXME: handle !waitAll */
1248 for (uint32_t i
= 0; i
< fenceCount
; i
++) {
1249 ret
= anv_gem_wait(device
, fences
[i
]->bo
.gem_handle
, &t
);
1250 if (ret
== -1 && errno
== ETIME
)
1253 return vk_error(VK_ERROR_UNKNOWN
);
1259 // Queue semaphore functions
1261 VkResult
anv_CreateSemaphore(
1263 const VkSemaphoreCreateInfo
* pCreateInfo
,
1264 VkSemaphore
* pSemaphore
)
1266 stub_return(VK_UNSUPPORTED
);
1269 VkResult
anv_QueueSignalSemaphore(
1271 VkSemaphore semaphore
)
1273 stub_return(VK_UNSUPPORTED
);
1276 VkResult
anv_QueueWaitSemaphore(
1278 VkSemaphore semaphore
)
1280 stub_return(VK_UNSUPPORTED
);
1285 VkResult
anv_CreateEvent(
1287 const VkEventCreateInfo
* pCreateInfo
,
1290 stub_return(VK_UNSUPPORTED
);
1293 VkResult
anv_GetEventStatus(
1297 stub_return(VK_UNSUPPORTED
);
1300 VkResult
anv_SetEvent(
1304 stub_return(VK_UNSUPPORTED
);
1307 VkResult
anv_ResetEvent(
1311 stub_return(VK_UNSUPPORTED
);
1316 VkResult
anv_CreateQueryPool(
1318 const VkQueryPoolCreateInfo
* pCreateInfo
,
1319 VkQueryPool
* pQueryPool
)
1321 struct anv_device
*device
= (struct anv_device
*) _device
;
1322 struct anv_query_pool
*pool
;
1326 assert(pCreateInfo
->sType
== VK_STRUCTURE_TYPE_QUERY_POOL_CREATE_INFO
);
1328 switch (pCreateInfo
->queryType
) {
1329 case VK_QUERY_TYPE_OCCLUSION
:
1331 case VK_QUERY_TYPE_PIPELINE_STATISTICS
:
1332 return VK_UNSUPPORTED
;
1337 pool
= anv_device_alloc(device
, sizeof(*pool
), 8,
1338 VK_SYSTEM_ALLOC_TYPE_API_OBJECT
);
1340 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
1342 pool
->type
= pCreateInfo
->queryType
;
1343 size
= pCreateInfo
->slots
* sizeof(struct anv_query_pool_slot
);
1344 result
= anv_bo_init_new(&pool
->bo
, device
, size
);
1345 if (result
!= VK_SUCCESS
)
1348 pool
->bo
.map
= anv_gem_mmap(device
, pool
->bo
.gem_handle
, 0, size
);
1350 *pQueryPool
= (VkQueryPool
) pool
;
1355 anv_device_free(device
, pool
);
1360 VkResult
anv_GetQueryPoolResults(
1362 VkQueryPool queryPool
,
1363 uint32_t startQuery
,
1364 uint32_t queryCount
,
1367 VkQueryResultFlags flags
)
1369 struct anv_device
*device
= (struct anv_device
*) _device
;
1370 struct anv_query_pool
*pool
= (struct anv_query_pool
*) queryPool
;
1371 struct anv_query_pool_slot
*slot
= pool
->bo
.map
;
1372 int64_t timeout
= INT64_MAX
;
1373 uint32_t *dst32
= pData
;
1374 uint64_t *dst64
= pData
;
1378 if (flags
& VK_QUERY_RESULT_WITH_AVAILABILITY_BIT
) {
1379 /* Where is the availabilty info supposed to go? */
1380 anv_finishme("VK_QUERY_RESULT_WITH_AVAILABILITY_BIT");
1381 return VK_UNSUPPORTED
;
1384 assert(pool
->type
== VK_QUERY_TYPE_OCCLUSION
);
1386 if (flags
& VK_QUERY_RESULT_64_BIT
)
1387 *pDataSize
= queryCount
* sizeof(uint64_t);
1389 *pDataSize
= queryCount
* sizeof(uint32_t);
1394 if (flags
& VK_QUERY_RESULT_WAIT_BIT
) {
1395 ret
= anv_gem_wait(device
, pool
->bo
.gem_handle
, &timeout
);
1397 return vk_error(VK_ERROR_UNKNOWN
);
1400 for (uint32_t i
= 0; i
< queryCount
; i
++) {
1401 result
= slot
[startQuery
+ i
].end
- slot
[startQuery
+ i
].begin
;
1402 if (flags
& VK_QUERY_RESULT_64_BIT
) {
1405 if (result
> UINT32_MAX
)
1406 result
= UINT32_MAX
;
1416 VkResult
anv_CreateBuffer(
1418 const VkBufferCreateInfo
* pCreateInfo
,
1421 struct anv_device
*device
= (struct anv_device
*) _device
;
1422 struct anv_buffer
*buffer
;
1424 assert(pCreateInfo
->sType
== VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO
);
1426 buffer
= anv_device_alloc(device
, sizeof(*buffer
), 8,
1427 VK_SYSTEM_ALLOC_TYPE_API_OBJECT
);
1429 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
1431 buffer
->size
= pCreateInfo
->size
;
1435 *pBuffer
= (VkBuffer
) buffer
;
1440 // Buffer view functions
1442 VkResult
anv_CreateBufferView(
1444 const VkBufferViewCreateInfo
* pCreateInfo
,
1445 VkBufferView
* pView
)
1447 struct anv_device
*device
= (struct anv_device
*) _device
;
1448 struct anv_buffer
*buffer
= (struct anv_buffer
*) pCreateInfo
->buffer
;
1449 struct anv_surface_view
*view
;
1450 const struct anv_format
*format
;
1452 assert(pCreateInfo
->sType
== VK_STRUCTURE_TYPE_BUFFER_VIEW_CREATE_INFO
);
1454 view
= anv_device_alloc(device
, sizeof(*view
), 8,
1455 VK_SYSTEM_ALLOC_TYPE_API_OBJECT
);
1457 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
1459 view
->bo
= buffer
->bo
;
1460 view
->offset
= buffer
->offset
+ pCreateInfo
->offset
;
1461 view
->surface_state
=
1462 anv_state_pool_alloc(&device
->surface_state_pool
, 64, 64);
1463 view
->format
= pCreateInfo
->format
;
1465 format
= anv_format_for_vk_format(pCreateInfo
->format
);
1466 /* This assumes RGBA float format. */
1467 uint32_t stride
= 4;
1468 uint32_t num_elements
= pCreateInfo
->range
/ stride
;
1469 struct GEN8_RENDER_SURFACE_STATE surface_state
= {
1470 .SurfaceType
= SURFTYPE_BUFFER
,
1471 .SurfaceArray
= false,
1472 .SurfaceFormat
= format
->format
,
1473 .SurfaceVerticalAlignment
= VALIGN4
,
1474 .SurfaceHorizontalAlignment
= HALIGN4
,
1476 .VerticalLineStride
= 0,
1477 .VerticalLineStrideOffset
= 0,
1478 .SamplerL2BypassModeDisable
= true,
1479 .RenderCacheReadWriteMode
= WriteOnlyCache
,
1480 .MemoryObjectControlState
= 0, /* FIXME: MOCS */
1483 .Height
= (num_elements
>> 7) & 0x3fff,
1484 .Width
= num_elements
& 0x7f,
1485 .Depth
= (num_elements
>> 21) & 0x3f,
1486 .SurfacePitch
= stride
- 1,
1487 .MinimumArrayElement
= 0,
1488 .NumberofMultisamples
= MULTISAMPLECOUNT_1
,
1493 .AuxiliarySurfaceMode
= AUX_NONE
,
1495 .GreenClearColor
= 0,
1496 .BlueClearColor
= 0,
1497 .AlphaClearColor
= 0,
1498 .ShaderChannelSelectRed
= SCS_RED
,
1499 .ShaderChannelSelectGreen
= SCS_GREEN
,
1500 .ShaderChannelSelectBlue
= SCS_BLUE
,
1501 .ShaderChannelSelectAlpha
= SCS_ALPHA
,
1502 .ResourceMinLOD
= 0,
1503 /* FIXME: We assume that the image must be bound at this time. */
1504 .SurfaceBaseAddress
= { NULL
, view
->offset
},
1507 GEN8_RENDER_SURFACE_STATE_pack(NULL
, view
->surface_state
.map
, &surface_state
);
1509 *pView
= (VkImageView
) view
;
1514 // Sampler functions
1516 VkResult
anv_CreateSampler(
1518 const VkSamplerCreateInfo
* pCreateInfo
,
1519 VkSampler
* pSampler
)
1521 struct anv_device
*device
= (struct anv_device
*) _device
;
1522 struct anv_sampler
*sampler
;
1524 assert(pCreateInfo
->sType
== VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO
);
1526 sampler
= anv_device_alloc(device
, sizeof(*sampler
), 8,
1527 VK_SYSTEM_ALLOC_TYPE_API_OBJECT
);
1529 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
1531 static const uint32_t vk_to_gen_tex_filter
[] = {
1532 [VK_TEX_FILTER_NEAREST
] = MAPFILTER_NEAREST
,
1533 [VK_TEX_FILTER_LINEAR
] = MAPFILTER_LINEAR
1536 static const uint32_t vk_to_gen_mipmap_mode
[] = {
1537 [VK_TEX_MIPMAP_MODE_BASE
] = MIPFILTER_NONE
,
1538 [VK_TEX_MIPMAP_MODE_NEAREST
] = MIPFILTER_NEAREST
,
1539 [VK_TEX_MIPMAP_MODE_LINEAR
] = MIPFILTER_LINEAR
1542 static const uint32_t vk_to_gen_tex_address
[] = {
1543 [VK_TEX_ADDRESS_WRAP
] = TCM_WRAP
,
1544 [VK_TEX_ADDRESS_MIRROR
] = TCM_MIRROR
,
1545 [VK_TEX_ADDRESS_CLAMP
] = TCM_CLAMP
,
1546 [VK_TEX_ADDRESS_MIRROR_ONCE
] = TCM_MIRROR_ONCE
,
1547 [VK_TEX_ADDRESS_CLAMP_BORDER
] = TCM_CLAMP_BORDER
,
1550 static const uint32_t vk_to_gen_compare_op
[] = {
1551 [VK_COMPARE_OP_NEVER
] = PREFILTEROPNEVER
,
1552 [VK_COMPARE_OP_LESS
] = PREFILTEROPLESS
,
1553 [VK_COMPARE_OP_EQUAL
] = PREFILTEROPEQUAL
,
1554 [VK_COMPARE_OP_LESS_EQUAL
] = PREFILTEROPLEQUAL
,
1555 [VK_COMPARE_OP_GREATER
] = PREFILTEROPGREATER
,
1556 [VK_COMPARE_OP_NOT_EQUAL
] = PREFILTEROPNOTEQUAL
,
1557 [VK_COMPARE_OP_GREATER_EQUAL
] = PREFILTEROPGEQUAL
,
1558 [VK_COMPARE_OP_ALWAYS
] = PREFILTEROPALWAYS
,
1561 if (pCreateInfo
->maxAnisotropy
> 0)
1562 anv_finishme("missing support for anisotropic filtering");
1564 struct GEN8_SAMPLER_STATE sampler_state
= {
1565 .SamplerDisable
= false,
1566 .TextureBorderColorMode
= DX10OGL
,
1567 .LODPreClampMode
= 0,
1569 .MipModeFilter
= vk_to_gen_mipmap_mode
[pCreateInfo
->mipMode
],
1570 .MagModeFilter
= vk_to_gen_tex_filter
[pCreateInfo
->magFilter
],
1571 .MinModeFilter
= vk_to_gen_tex_filter
[pCreateInfo
->minFilter
],
1572 .TextureLODBias
= pCreateInfo
->mipLodBias
* 256,
1573 .AnisotropicAlgorithm
= EWAApproximation
,
1574 .MinLOD
= pCreateInfo
->minLod
* 256,
1575 .MaxLOD
= pCreateInfo
->maxLod
* 256,
1576 .ChromaKeyEnable
= 0,
1577 .ChromaKeyIndex
= 0,
1579 .ShadowFunction
= vk_to_gen_compare_op
[pCreateInfo
->compareOp
],
1580 .CubeSurfaceControlMode
= 0,
1581 .IndirectStatePointer
= 0,
1582 .LODClampMagnificationMode
= MIPNONE
,
1583 .MaximumAnisotropy
= 0,
1584 .RAddressMinFilterRoundingEnable
= 0,
1585 .RAddressMagFilterRoundingEnable
= 0,
1586 .VAddressMinFilterRoundingEnable
= 0,
1587 .VAddressMagFilterRoundingEnable
= 0,
1588 .UAddressMinFilterRoundingEnable
= 0,
1589 .UAddressMagFilterRoundingEnable
= 0,
1590 .TrilinearFilterQuality
= 0,
1591 .NonnormalizedCoordinateEnable
= 0,
1592 .TCXAddressControlMode
= vk_to_gen_tex_address
[pCreateInfo
->addressU
],
1593 .TCYAddressControlMode
= vk_to_gen_tex_address
[pCreateInfo
->addressV
],
1594 .TCZAddressControlMode
= vk_to_gen_tex_address
[pCreateInfo
->addressW
],
1597 GEN8_SAMPLER_STATE_pack(NULL
, sampler
->state
, &sampler_state
);
1599 *pSampler
= (VkSampler
) sampler
;
1604 // Descriptor set functions
1606 VkResult
anv_CreateDescriptorSetLayout(
1608 const VkDescriptorSetLayoutCreateInfo
* pCreateInfo
,
1609 VkDescriptorSetLayout
* pSetLayout
)
1611 struct anv_device
*device
= (struct anv_device
*) _device
;
1612 struct anv_descriptor_set_layout
*set_layout
;
1614 assert(pCreateInfo
->sType
== VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO
);
1616 uint32_t sampler_count
[VK_NUM_SHADER_STAGE
] = { 0, };
1617 uint32_t surface_count
[VK_NUM_SHADER_STAGE
] = { 0, };
1618 uint32_t num_dynamic_buffers
= 0;
1622 for (uint32_t i
= 0; i
< pCreateInfo
->count
; i
++) {
1623 switch (pCreateInfo
->pBinding
[i
].descriptorType
) {
1624 case VK_DESCRIPTOR_TYPE_SAMPLER
:
1625 for_each_bit(s
, pCreateInfo
->pBinding
[i
].stageFlags
)
1626 sampler_count
[s
] += pCreateInfo
->pBinding
[i
].count
;
1629 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER
:
1630 for_each_bit(s
, pCreateInfo
->pBinding
[i
].stageFlags
)
1631 sampler_count
[s
] += pCreateInfo
->pBinding
[i
].count
;
1635 case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE
:
1636 case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE
:
1637 case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER
:
1638 case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER
:
1639 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER
:
1640 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER
:
1641 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC
:
1642 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC
:
1643 for_each_bit(s
, pCreateInfo
->pBinding
[i
].stageFlags
)
1644 surface_count
[s
] += pCreateInfo
->pBinding
[i
].count
;
1650 count
+= pCreateInfo
->pBinding
[i
].count
;
1653 for (uint32_t i
= 0; i
< pCreateInfo
->count
; i
++) {
1654 switch (pCreateInfo
->pBinding
[i
].descriptorType
) {
1655 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC
:
1656 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC
:
1657 num_dynamic_buffers
++;
1664 uint32_t sampler_total
= 0;
1665 uint32_t surface_total
= 0;
1666 for (uint32_t s
= 0; s
< VK_NUM_SHADER_STAGE
; s
++) {
1667 sampler_total
+= sampler_count
[s
];
1668 surface_total
+= surface_count
[s
];
1671 size_t size
= sizeof(*set_layout
) +
1672 (sampler_total
+ surface_total
) * sizeof(uint32_t);
1673 set_layout
= anv_device_alloc(device
, size
, 8,
1674 VK_SYSTEM_ALLOC_TYPE_API_OBJECT
);
1676 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
1678 set_layout
->num_dynamic_buffers
= num_dynamic_buffers
;
1679 set_layout
->count
= count
;
1681 uint32_t *p
= set_layout
->entries
;
1682 uint32_t *sampler
[VK_NUM_SHADER_STAGE
];
1683 uint32_t *surface
[VK_NUM_SHADER_STAGE
];
1684 for (uint32_t s
= 0; s
< VK_NUM_SHADER_STAGE
; s
++) {
1685 set_layout
->stage
[s
].surface_count
= surface_count
[s
];
1686 set_layout
->stage
[s
].surface_start
= surface
[s
] = p
;
1687 p
+= surface_count
[s
];
1688 set_layout
->stage
[s
].sampler_count
= sampler_count
[s
];
1689 set_layout
->stage
[s
].sampler_start
= sampler
[s
] = p
;
1690 p
+= sampler_count
[s
];
1693 uint32_t descriptor
= 0;
1694 for (uint32_t i
= 0; i
< pCreateInfo
->count
; i
++) {
1695 switch (pCreateInfo
->pBinding
[i
].descriptorType
) {
1696 case VK_DESCRIPTOR_TYPE_SAMPLER
:
1697 for_each_bit(s
, pCreateInfo
->pBinding
[i
].stageFlags
)
1698 for (uint32_t j
= 0; j
< pCreateInfo
->pBinding
[i
].count
; j
++)
1699 *(sampler
[s
])++ = descriptor
+ j
;
1702 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER
:
1703 for_each_bit(s
, pCreateInfo
->pBinding
[i
].stageFlags
)
1704 for (uint32_t j
= 0; j
< pCreateInfo
->pBinding
[i
].count
; j
++)
1705 *(sampler
[s
])++ = descriptor
+ j
;
1709 case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE
:
1710 case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE
:
1711 case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER
:
1712 case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER
:
1713 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER
:
1714 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER
:
1715 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC
:
1716 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC
:
1717 for_each_bit(s
, pCreateInfo
->pBinding
[i
].stageFlags
)
1718 for (uint32_t j
= 0; j
< pCreateInfo
->pBinding
[i
].count
; j
++) {
1719 *(surface
[s
])++ = descriptor
+ j
;
1725 descriptor
+= pCreateInfo
->pBinding
[i
].count
;
1728 *pSetLayout
= (VkDescriptorSetLayout
) set_layout
;
1733 VkResult
anv_BeginDescriptorPoolUpdate(
1735 VkDescriptorUpdateMode updateMode
)
1740 VkResult
anv_EndDescriptorPoolUpdate(
1747 VkResult
anv_CreateDescriptorPool(
1749 VkDescriptorPoolUsage poolUsage
,
1751 const VkDescriptorPoolCreateInfo
* pCreateInfo
,
1752 VkDescriptorPool
* pDescriptorPool
)
1754 *pDescriptorPool
= 1;
1759 VkResult
anv_ResetDescriptorPool(
1761 VkDescriptorPool descriptorPool
)
1766 VkResult
anv_AllocDescriptorSets(
1768 VkDescriptorPool descriptorPool
,
1769 VkDescriptorSetUsage setUsage
,
1771 const VkDescriptorSetLayout
* pSetLayouts
,
1772 VkDescriptorSet
* pDescriptorSets
,
1775 struct anv_device
*device
= (struct anv_device
*) _device
;
1776 const struct anv_descriptor_set_layout
*layout
;
1777 struct anv_descriptor_set
*set
;
1780 for (uint32_t i
= 0; i
< count
; i
++) {
1781 layout
= (struct anv_descriptor_set_layout
*) pSetLayouts
[i
];
1782 size
= sizeof(*set
) + layout
->count
* sizeof(set
->descriptors
[0]);
1783 set
= anv_device_alloc(device
, size
, 8,
1784 VK_SYSTEM_ALLOC_TYPE_API_OBJECT
);
1787 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
1790 pDescriptorSets
[i
] = (VkDescriptorSet
) set
;
1798 void anv_ClearDescriptorSets(
1800 VkDescriptorPool descriptorPool
,
1802 const VkDescriptorSet
* pDescriptorSets
)
1806 void anv_UpdateDescriptors(
1808 VkDescriptorSet descriptorSet
,
1809 uint32_t updateCount
,
1810 const void** ppUpdateArray
)
1812 struct anv_descriptor_set
*set
= (struct anv_descriptor_set
*) descriptorSet
;
1813 VkUpdateSamplers
*update_samplers
;
1814 VkUpdateSamplerTextures
*update_sampler_textures
;
1815 VkUpdateImages
*update_images
;
1816 VkUpdateBuffers
*update_buffers
;
1817 VkUpdateAsCopy
*update_as_copy
;
1819 for (uint32_t i
= 0; i
< updateCount
; i
++) {
1820 const struct anv_common
*common
= ppUpdateArray
[i
];
1822 switch (common
->sType
) {
1823 case VK_STRUCTURE_TYPE_UPDATE_SAMPLERS
:
1824 update_samplers
= (VkUpdateSamplers
*) common
;
1826 for (uint32_t j
= 0; j
< update_samplers
->count
; j
++) {
1827 set
->descriptors
[update_samplers
->binding
+ j
].sampler
=
1828 (struct anv_sampler
*) update_samplers
->pSamplers
[j
];
1832 case VK_STRUCTURE_TYPE_UPDATE_SAMPLER_TEXTURES
:
1833 /* FIXME: Shouldn't this be *_UPDATE_SAMPLER_IMAGES? */
1834 update_sampler_textures
= (VkUpdateSamplerTextures
*) common
;
1836 for (uint32_t j
= 0; j
< update_sampler_textures
->count
; j
++) {
1837 set
->descriptors
[update_sampler_textures
->binding
+ j
].view
=
1838 (struct anv_surface_view
*)
1839 update_sampler_textures
->pSamplerImageViews
[j
].pImageView
->view
;
1840 set
->descriptors
[update_sampler_textures
->binding
+ j
].sampler
=
1841 (struct anv_sampler
*)
1842 update_sampler_textures
->pSamplerImageViews
[j
].sampler
;
1846 case VK_STRUCTURE_TYPE_UPDATE_IMAGES
:
1847 update_images
= (VkUpdateImages
*) common
;
1849 for (uint32_t j
= 0; j
< update_images
->count
; j
++) {
1850 set
->descriptors
[update_images
->binding
+ j
].view
=
1851 (struct anv_surface_view
*) update_images
->pImageViews
[j
].view
;
1855 case VK_STRUCTURE_TYPE_UPDATE_BUFFERS
:
1856 update_buffers
= (VkUpdateBuffers
*) common
;
1858 for (uint32_t j
= 0; j
< update_buffers
->count
; j
++) {
1859 set
->descriptors
[update_buffers
->binding
+ j
].view
=
1860 (struct anv_surface_view
*) update_buffers
->pBufferViews
[j
].view
;
1862 /* FIXME: descriptor arrays? */
1865 case VK_STRUCTURE_TYPE_UPDATE_AS_COPY
:
1866 update_as_copy
= (VkUpdateAsCopy
*) common
;
1867 (void) update_as_copy
;
1876 // State object functions
1878 static inline int64_t
1879 clamp_int64(int64_t x
, int64_t min
, int64_t max
)
1889 VkResult
anv_CreateDynamicViewportState(
1891 const VkDynamicVpStateCreateInfo
* pCreateInfo
,
1892 VkDynamicVpState
* pState
)
1894 struct anv_device
*device
= (struct anv_device
*) _device
;
1895 struct anv_dynamic_vp_state
*state
;
1897 assert(pCreateInfo
->sType
== VK_STRUCTURE_TYPE_DYNAMIC_VP_STATE_CREATE_INFO
);
1899 state
= anv_device_alloc(device
, sizeof(*state
), 8,
1900 VK_SYSTEM_ALLOC_TYPE_API_OBJECT
);
1902 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
1904 unsigned count
= pCreateInfo
->viewportAndScissorCount
;
1905 state
->sf_clip_vp
= anv_state_pool_alloc(&device
->dynamic_state_pool
,
1907 state
->cc_vp
= anv_state_pool_alloc(&device
->dynamic_state_pool
,
1909 state
->scissor
= anv_state_pool_alloc(&device
->dynamic_state_pool
,
1912 for (uint32_t i
= 0; i
< pCreateInfo
->viewportAndScissorCount
; i
++) {
1913 const VkViewport
*vp
= &pCreateInfo
->pViewports
[i
];
1914 const VkRect
*s
= &pCreateInfo
->pScissors
[i
];
1916 struct GEN8_SF_CLIP_VIEWPORT sf_clip_viewport
= {
1917 .ViewportMatrixElementm00
= vp
->width
/ 2,
1918 .ViewportMatrixElementm11
= vp
->height
/ 2,
1919 .ViewportMatrixElementm22
= (vp
->maxDepth
- vp
->minDepth
) / 2,
1920 .ViewportMatrixElementm30
= vp
->originX
+ vp
->width
/ 2,
1921 .ViewportMatrixElementm31
= vp
->originY
+ vp
->height
/ 2,
1922 .ViewportMatrixElementm32
= (vp
->maxDepth
+ vp
->minDepth
) / 2,
1923 .XMinClipGuardband
= -1.0f
,
1924 .XMaxClipGuardband
= 1.0f
,
1925 .YMinClipGuardband
= -1.0f
,
1926 .YMaxClipGuardband
= 1.0f
,
1927 .XMinViewPort
= vp
->originX
,
1928 .XMaxViewPort
= vp
->originX
+ vp
->width
- 1,
1929 .YMinViewPort
= vp
->originY
,
1930 .YMaxViewPort
= vp
->originY
+ vp
->height
- 1,
1933 struct GEN8_CC_VIEWPORT cc_viewport
= {
1934 .MinimumDepth
= vp
->minDepth
,
1935 .MaximumDepth
= vp
->maxDepth
1938 /* Since xmax and ymax are inclusive, we have to have xmax < xmin or
1939 * ymax < ymin for empty clips. In case clip x, y, width height are all
1940 * 0, the clamps below produce 0 for xmin, ymin, xmax, ymax, which isn't
1941 * what we want. Just special case empty clips and produce a canonical
1943 static const struct GEN8_SCISSOR_RECT empty_scissor
= {
1944 .ScissorRectangleYMin
= 1,
1945 .ScissorRectangleXMin
= 1,
1946 .ScissorRectangleYMax
= 0,
1947 .ScissorRectangleXMax
= 0
1950 const int max
= 0xffff;
1951 struct GEN8_SCISSOR_RECT scissor
= {
1952 /* Do this math using int64_t so overflow gets clamped correctly. */
1953 .ScissorRectangleYMin
= clamp_int64(s
->offset
.y
, 0, max
),
1954 .ScissorRectangleXMin
= clamp_int64(s
->offset
.x
, 0, max
),
1955 .ScissorRectangleYMax
= clamp_int64((uint64_t) s
->offset
.y
+ s
->extent
.height
- 1, 0, max
),
1956 .ScissorRectangleXMax
= clamp_int64((uint64_t) s
->offset
.x
+ s
->extent
.width
- 1, 0, max
)
1959 GEN8_SF_CLIP_VIEWPORT_pack(NULL
, state
->sf_clip_vp
.map
+ i
* 64, &sf_clip_viewport
);
1960 GEN8_CC_VIEWPORT_pack(NULL
, state
->cc_vp
.map
+ i
* 32, &cc_viewport
);
1962 if (s
->extent
.width
<= 0 || s
->extent
.height
<= 0) {
1963 GEN8_SCISSOR_RECT_pack(NULL
, state
->scissor
.map
+ i
* 32, &empty_scissor
);
1965 GEN8_SCISSOR_RECT_pack(NULL
, state
->scissor
.map
+ i
* 32, &scissor
);
1969 *pState
= (VkDynamicVpState
) state
;
1974 VkResult
anv_CreateDynamicRasterState(
1976 const VkDynamicRsStateCreateInfo
* pCreateInfo
,
1977 VkDynamicRsState
* pState
)
1979 struct anv_device
*device
= (struct anv_device
*) _device
;
1980 struct anv_dynamic_rs_state
*state
;
1982 assert(pCreateInfo
->sType
== VK_STRUCTURE_TYPE_DYNAMIC_RS_STATE_CREATE_INFO
);
1984 state
= anv_device_alloc(device
, sizeof(*state
), 8,
1985 VK_SYSTEM_ALLOC_TYPE_API_OBJECT
);
1987 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
1991 * float depthBiasClamp;
1992 * float slopeScaledDepthBias;
1993 * float pointFadeThreshold;
1994 * // optional (GL45) - Size of point fade threshold
1997 struct GEN8_3DSTATE_SF sf
= {
1998 GEN8_3DSTATE_SF_header
,
1999 .LineWidth
= pCreateInfo
->lineWidth
,
2000 .PointWidth
= pCreateInfo
->pointSize
,
2003 GEN8_3DSTATE_SF_pack(NULL
, state
->state_sf
, &sf
);
2005 *pState
= (VkDynamicRsState
) state
;
2010 VkResult
anv_CreateDynamicColorBlendState(
2012 const VkDynamicCbStateCreateInfo
* pCreateInfo
,
2013 VkDynamicCbState
* pState
)
2015 struct anv_device
*device
= (struct anv_device
*) _device
;
2016 struct anv_dynamic_cb_state
*state
;
2018 assert(pCreateInfo
->sType
== VK_STRUCTURE_TYPE_DYNAMIC_CB_STATE_CREATE_INFO
);
2020 state
= anv_device_alloc(device
, sizeof(*state
), 8,
2021 VK_SYSTEM_ALLOC_TYPE_API_OBJECT
);
2023 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
2025 *pState
= (VkDynamicCbState
) state
;
2030 VkResult
anv_CreateDynamicDepthStencilState(
2032 const VkDynamicDsStateCreateInfo
* pCreateInfo
,
2033 VkDynamicDsState
* pState
)
2035 stub_return(VK_UNSUPPORTED
);
2038 // Command buffer functions
2040 VkResult
anv_CreateCommandBuffer(
2042 const VkCmdBufferCreateInfo
* pCreateInfo
,
2043 VkCmdBuffer
* pCmdBuffer
)
2045 struct anv_device
*device
= (struct anv_device
*) _device
;
2046 struct anv_cmd_buffer
*cmd_buffer
;
2049 cmd_buffer
= anv_device_alloc(device
, sizeof(*cmd_buffer
), 8,
2050 VK_SYSTEM_ALLOC_TYPE_API_OBJECT
);
2051 if (cmd_buffer
== NULL
)
2052 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
2054 cmd_buffer
->device
= device
;
2055 cmd_buffer
->rs_state
= NULL
;
2056 cmd_buffer
->vp_state
= NULL
;
2057 memset(&cmd_buffer
->default_bindings
, 0, sizeof(cmd_buffer
->default_bindings
));
2058 cmd_buffer
->bindings
= &cmd_buffer
->default_bindings
;
2060 result
= anv_batch_init(&cmd_buffer
->batch
, device
);
2061 if (result
!= VK_SUCCESS
)
2064 cmd_buffer
->exec2_objects
=
2065 anv_device_alloc(device
, 8192 * sizeof(cmd_buffer
->exec2_objects
[0]), 8,
2066 VK_SYSTEM_ALLOC_TYPE_API_OBJECT
);
2067 if (cmd_buffer
->exec2_objects
== NULL
) {
2068 result
= vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
2072 cmd_buffer
->exec2_bos
=
2073 anv_device_alloc(device
, 8192 * sizeof(cmd_buffer
->exec2_bos
[0]), 8,
2074 VK_SYSTEM_ALLOC_TYPE_API_OBJECT
);
2075 if (cmd_buffer
->exec2_bos
== NULL
) {
2076 result
= vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
2077 goto fail_exec2_objects
;
2080 anv_state_stream_init(&cmd_buffer
->surface_state_stream
,
2081 &device
->surface_state_block_pool
);
2082 anv_state_stream_init(&cmd_buffer
->dynamic_state_stream
,
2083 &device
->dynamic_state_block_pool
);
2085 cmd_buffer
->dirty
= 0;
2086 cmd_buffer
->vb_dirty
= 0;
2088 *pCmdBuffer
= (VkCmdBuffer
) cmd_buffer
;
2093 anv_device_free(device
, cmd_buffer
->exec2_objects
);
2095 anv_batch_finish(&cmd_buffer
->batch
, device
);
2097 anv_device_free(device
, cmd_buffer
);
2102 VkResult
anv_BeginCommandBuffer(
2103 VkCmdBuffer cmdBuffer
,
2104 const VkCmdBufferBeginInfo
* pBeginInfo
)
2106 struct anv_cmd_buffer
*cmd_buffer
= (struct anv_cmd_buffer
*) cmdBuffer
;
2107 struct anv_device
*device
= cmd_buffer
->device
;
2109 anv_batch_emit(&cmd_buffer
->batch
, GEN8_PIPELINE_SELECT
,
2110 .PipelineSelection
= _3D
);
2111 anv_batch_emit(&cmd_buffer
->batch
, GEN8_STATE_SIP
);
2113 anv_batch_emit(&cmd_buffer
->batch
, GEN8_STATE_BASE_ADDRESS
,
2114 .GeneralStateBaseAddress
= { NULL
, 0 },
2115 .GeneralStateBaseAddressModifyEnable
= true,
2116 .GeneralStateBufferSize
= 0xfffff,
2117 .GeneralStateBufferSizeModifyEnable
= true,
2119 .SurfaceStateBaseAddress
= { &device
->surface_state_block_pool
.bo
, 0 },
2120 .SurfaceStateMemoryObjectControlState
= 0, /* FIXME: MOCS */
2121 .SurfaceStateBaseAddressModifyEnable
= true,
2123 .DynamicStateBaseAddress
= { &device
->dynamic_state_block_pool
.bo
, 0 },
2124 .DynamicStateBaseAddressModifyEnable
= true,
2125 .DynamicStateBufferSize
= 0xfffff,
2126 .DynamicStateBufferSizeModifyEnable
= true,
2128 .IndirectObjectBaseAddress
= { NULL
, 0 },
2129 .IndirectObjectBaseAddressModifyEnable
= true,
2130 .IndirectObjectBufferSize
= 0xfffff,
2131 .IndirectObjectBufferSizeModifyEnable
= true,
2133 .InstructionBaseAddress
= { &device
->instruction_block_pool
.bo
, 0 },
2134 .InstructionBaseAddressModifyEnable
= true,
2135 .InstructionBufferSize
= 0xfffff,
2136 .InstructionBuffersizeModifyEnable
= true);
2138 anv_batch_emit(&cmd_buffer
->batch
, GEN8_3DSTATE_VF_STATISTICS
,
2139 .StatisticsEnable
= true);
2140 anv_batch_emit(&cmd_buffer
->batch
, GEN8_3DSTATE_HS
, .Enable
= false);
2141 anv_batch_emit(&cmd_buffer
->batch
, GEN8_3DSTATE_TE
, .TEEnable
= false);
2142 anv_batch_emit(&cmd_buffer
->batch
, GEN8_3DSTATE_DS
, .FunctionEnable
= false);
2143 anv_batch_emit(&cmd_buffer
->batch
, GEN8_3DSTATE_STREAMOUT
, .SOFunctionEnable
= false);
2145 anv_batch_emit(&cmd_buffer
->batch
, GEN8_3DSTATE_PUSH_CONSTANT_ALLOC_VS
,
2146 .ConstantBufferOffset
= 0,
2147 .ConstantBufferSize
= 4);
2148 anv_batch_emit(&cmd_buffer
->batch
, GEN8_3DSTATE_PUSH_CONSTANT_ALLOC_GS
,
2149 .ConstantBufferOffset
= 4,
2150 .ConstantBufferSize
= 4);
2151 anv_batch_emit(&cmd_buffer
->batch
, GEN8_3DSTATE_PUSH_CONSTANT_ALLOC_PS
,
2152 .ConstantBufferOffset
= 8,
2153 .ConstantBufferSize
= 4);
2155 anv_batch_emit(&cmd_buffer
->batch
, GEN8_3DSTATE_WM_CHROMAKEY
,
2156 .ChromaKeyKillEnable
= false);
2157 anv_batch_emit(&cmd_buffer
->batch
, GEN8_3DSTATE_SBE_SWIZ
);
2158 anv_batch_emit(&cmd_buffer
->batch
, GEN8_3DSTATE_AA_LINE_PARAMETERS
);
2160 /* Hardcoded state: */
2161 anv_batch_emit(&cmd_buffer
->batch
, GEN8_3DSTATE_DEPTH_BUFFER
,
2162 .SurfaceType
= SURFTYPE_2D
,
2165 .SurfaceFormat
= D16_UNORM
,
2166 .SurfaceBaseAddress
= { NULL
, 0 },
2167 .HierarchicalDepthBufferEnable
= 0);
2169 anv_batch_emit(&cmd_buffer
->batch
, GEN8_3DSTATE_WM_DEPTH_STENCIL
,
2170 .DepthTestEnable
= false,
2171 .DepthBufferWriteEnable
= false);
2177 anv_cmd_buffer_add_bo(struct anv_cmd_buffer
*cmd_buffer
,
2178 struct anv_bo
*bo
, struct anv_reloc_list
*list
)
2180 struct drm_i915_gem_exec_object2
*obj
;
2182 bo
->index
= cmd_buffer
->bo_count
;
2183 obj
= &cmd_buffer
->exec2_objects
[bo
->index
];
2184 cmd_buffer
->exec2_bos
[bo
->index
] = bo
;
2185 cmd_buffer
->bo_count
++;
2187 obj
->handle
= bo
->gem_handle
;
2188 obj
->relocation_count
= 0;
2189 obj
->relocs_ptr
= 0;
2191 obj
->offset
= bo
->offset
;
2197 obj
->relocation_count
= list
->num_relocs
;
2198 obj
->relocs_ptr
= (uintptr_t) list
->relocs
;
2203 anv_cmd_buffer_add_validate_bos(struct anv_cmd_buffer
*cmd_buffer
,
2204 struct anv_reloc_list
*list
)
2206 struct anv_bo
*bo
, *batch_bo
;
2208 batch_bo
= &cmd_buffer
->batch
.bo
;
2209 for (size_t i
= 0; i
< list
->num_relocs
; i
++) {
2210 bo
= list
->reloc_bos
[i
];
2211 /* Skip any relocations targeting the batch bo. We need to make sure
2212 * it's the last in the list so we'll add it manually later.
2216 if (bo
->index
< cmd_buffer
->bo_count
&& cmd_buffer
->exec2_bos
[bo
->index
] == bo
)
2219 anv_cmd_buffer_add_bo(cmd_buffer
, bo
, NULL
);
2224 anv_cmd_buffer_process_relocs(struct anv_cmd_buffer
*cmd_buffer
,
2225 struct anv_reloc_list
*list
)
2229 /* If the kernel supports I915_EXEC_NO_RELOC, it will compare offset in
2230 * struct drm_i915_gem_exec_object2 against the bos current offset and if
2231 * all bos haven't moved it will skip relocation processing alltogether.
2232 * If I915_EXEC_NO_RELOC is not supported, the kernel ignores the incoming
2233 * value of offset so we can set it either way. For that to work we need
2234 * to make sure all relocs use the same presumed offset.
2237 for (size_t i
= 0; i
< list
->num_relocs
; i
++) {
2238 bo
= list
->reloc_bos
[i
];
2239 if (bo
->offset
!= list
->relocs
[i
].presumed_offset
)
2240 cmd_buffer
->need_reloc
= true;
2242 list
->relocs
[i
].target_handle
= bo
->index
;
2246 VkResult
anv_EndCommandBuffer(
2247 VkCmdBuffer cmdBuffer
)
2249 struct anv_cmd_buffer
*cmd_buffer
= (struct anv_cmd_buffer
*) cmdBuffer
;
2250 struct anv_device
*device
= cmd_buffer
->device
;
2251 struct anv_batch
*batch
= &cmd_buffer
->batch
;
2253 anv_batch_emit(batch
, GEN8_MI_BATCH_BUFFER_END
);
2255 /* Round batch up to an even number of dwords. */
2256 if ((batch
->next
- batch
->bo
.map
) & 4)
2257 anv_batch_emit(batch
, GEN8_MI_NOOP
);
2259 cmd_buffer
->bo_count
= 0;
2260 cmd_buffer
->need_reloc
= false;
2262 /* Lock for access to bo->index. */
2263 pthread_mutex_lock(&device
->mutex
);
2265 /* Add block pool bos first so we can add them with their relocs. */
2266 anv_cmd_buffer_add_bo(cmd_buffer
, &device
->surface_state_block_pool
.bo
,
2267 &batch
->surf_relocs
);
2269 anv_cmd_buffer_add_validate_bos(cmd_buffer
, &batch
->surf_relocs
);
2270 anv_cmd_buffer_add_validate_bos(cmd_buffer
, &batch
->cmd_relocs
);
2271 anv_cmd_buffer_add_bo(cmd_buffer
, &batch
->bo
, &batch
->cmd_relocs
);
2272 anv_cmd_buffer_process_relocs(cmd_buffer
, &batch
->surf_relocs
);
2273 anv_cmd_buffer_process_relocs(cmd_buffer
, &batch
->cmd_relocs
);
2275 cmd_buffer
->execbuf
.buffers_ptr
= (uintptr_t) cmd_buffer
->exec2_objects
;
2276 cmd_buffer
->execbuf
.buffer_count
= cmd_buffer
->bo_count
;
2277 cmd_buffer
->execbuf
.batch_start_offset
= 0;
2278 cmd_buffer
->execbuf
.batch_len
= batch
->next
- batch
->bo
.map
;
2279 cmd_buffer
->execbuf
.cliprects_ptr
= 0;
2280 cmd_buffer
->execbuf
.num_cliprects
= 0;
2281 cmd_buffer
->execbuf
.DR1
= 0;
2282 cmd_buffer
->execbuf
.DR4
= 0;
2284 cmd_buffer
->execbuf
.flags
= I915_EXEC_HANDLE_LUT
;
2285 if (!cmd_buffer
->need_reloc
)
2286 cmd_buffer
->execbuf
.flags
|= I915_EXEC_NO_RELOC
;
2287 cmd_buffer
->execbuf
.flags
|= I915_EXEC_RENDER
;
2288 cmd_buffer
->execbuf
.rsvd1
= device
->context_id
;
2289 cmd_buffer
->execbuf
.rsvd2
= 0;
2291 pthread_mutex_unlock(&device
->mutex
);
2296 VkResult
anv_ResetCommandBuffer(
2297 VkCmdBuffer cmdBuffer
)
2299 struct anv_cmd_buffer
*cmd_buffer
= (struct anv_cmd_buffer
*) cmdBuffer
;
2301 anv_batch_reset(&cmd_buffer
->batch
);
2306 // Command buffer building functions
2308 void anv_CmdBindPipeline(
2309 VkCmdBuffer cmdBuffer
,
2310 VkPipelineBindPoint pipelineBindPoint
,
2311 VkPipeline _pipeline
)
2313 struct anv_cmd_buffer
*cmd_buffer
= (struct anv_cmd_buffer
*) cmdBuffer
;
2315 cmd_buffer
->pipeline
= (struct anv_pipeline
*) _pipeline
;
2316 cmd_buffer
->dirty
|= ANV_CMD_BUFFER_PIPELINE_DIRTY
;
2319 void anv_CmdBindDynamicStateObject(
2320 VkCmdBuffer cmdBuffer
,
2321 VkStateBindPoint stateBindPoint
,
2322 VkDynamicStateObject dynamicState
)
2324 struct anv_cmd_buffer
*cmd_buffer
= (struct anv_cmd_buffer
*) cmdBuffer
;
2325 struct anv_dynamic_vp_state
*vp_state
;
2327 switch (stateBindPoint
) {
2328 case VK_STATE_BIND_POINT_VIEWPORT
:
2329 vp_state
= (struct anv_dynamic_vp_state
*) dynamicState
;
2330 /* We emit state immediately, but set cmd_buffer->vp_state to indicate
2331 * that vp state has been set in this command buffer. */
2332 cmd_buffer
->vp_state
= vp_state
;
2333 anv_batch_emit(&cmd_buffer
->batch
, GEN8_3DSTATE_SCISSOR_STATE_POINTERS
,
2334 .ScissorRectPointer
= vp_state
->scissor
.offset
);
2335 anv_batch_emit(&cmd_buffer
->batch
, GEN8_3DSTATE_VIEWPORT_STATE_POINTERS_CC
,
2336 .CCViewportPointer
= vp_state
->cc_vp
.offset
);
2337 anv_batch_emit(&cmd_buffer
->batch
, GEN8_3DSTATE_VIEWPORT_STATE_POINTERS_SF_CLIP
,
2338 .SFClipViewportPointer
= vp_state
->sf_clip_vp
.offset
);
2340 case VK_STATE_BIND_POINT_RASTER
:
2341 cmd_buffer
->rs_state
= (struct anv_dynamic_rs_state
*) dynamicState
;
2342 cmd_buffer
->dirty
|= ANV_CMD_BUFFER_RS_DIRTY
;
2344 case VK_STATE_BIND_POINT_COLOR_BLEND
:
2345 case VK_STATE_BIND_POINT_DEPTH_STENCIL
:
2352 void anv_CmdBindDescriptorSets(
2353 VkCmdBuffer cmdBuffer
,
2354 VkPipelineBindPoint pipelineBindPoint
,
2357 const VkDescriptorSet
* pDescriptorSets
,
2358 uint32_t dynamicOffsetCount
,
2359 const uint32_t* pDynamicOffsets
)
2361 struct anv_cmd_buffer
*cmd_buffer
= (struct anv_cmd_buffer
*) cmdBuffer
;
2362 struct anv_pipeline_layout
*layout
= cmd_buffer
->pipeline
->layout
;
2363 struct anv_bindings
*bindings
= cmd_buffer
->bindings
;
2365 uint32_t offset
= 0;
2366 for (uint32_t i
= 0; i
< setCount
; i
++) {
2367 struct anv_descriptor_set
*set
=
2368 (struct anv_descriptor_set
*) pDescriptorSets
[i
];
2369 struct anv_descriptor_set_layout
*set_layout
= layout
->set
[firstSet
+ i
].layout
;
2371 for (uint32_t s
= 0; s
< VK_NUM_SHADER_STAGE
; s
++) {
2372 uint32_t *surface_to_desc
= set_layout
->stage
[s
].surface_start
;
2373 uint32_t *sampler_to_desc
= set_layout
->stage
[s
].sampler_start
;
2374 uint32_t bias
= s
== VK_SHADER_STAGE_FRAGMENT
? MAX_RTS
: 0;
2377 start
= bias
+ layout
->set
[firstSet
+ i
].surface_start
[s
];
2378 for (uint32_t b
= 0; b
< set_layout
->stage
[s
].surface_count
; b
++) {
2379 struct anv_surface_view
*view
= set
->descriptors
[surface_to_desc
[b
]].view
;
2381 bindings
->descriptors
[s
].surfaces
[start
+ b
] =
2382 view
->surface_state
.offset
;
2383 bindings
->descriptors
[s
].relocs
[start
+ b
].bo
= view
->bo
;
2384 bindings
->descriptors
[s
].relocs
[start
+ b
].offset
= view
->offset
;
2387 start
= layout
->set
[firstSet
+ i
].sampler_start
[s
];
2388 for (uint32_t b
= 0; b
< set_layout
->stage
[s
].sampler_count
; b
++) {
2389 struct anv_sampler
*sampler
= set
->descriptors
[sampler_to_desc
[b
]].sampler
;
2391 memcpy(&bindings
->descriptors
[s
].samplers
[start
+ b
],
2392 sampler
->state
, sizeof(sampler
->state
));
2396 offset
+= layout
->set
[firstSet
+ i
].layout
->num_dynamic_buffers
;
2399 cmd_buffer
->dirty
|= ANV_CMD_BUFFER_DESCRIPTOR_SET_DIRTY
;
2402 void anv_CmdBindIndexBuffer(
2403 VkCmdBuffer cmdBuffer
,
2405 VkDeviceSize offset
,
2406 VkIndexType indexType
)
2408 struct anv_cmd_buffer
*cmd_buffer
= (struct anv_cmd_buffer
*) cmdBuffer
;
2409 struct anv_buffer
*buffer
= (struct anv_buffer
*) _buffer
;
2411 static const uint32_t vk_to_gen_index_type
[] = {
2412 [VK_INDEX_TYPE_UINT8
] = INDEX_BYTE
,
2413 [VK_INDEX_TYPE_UINT16
] = INDEX_WORD
,
2414 [VK_INDEX_TYPE_UINT32
] = INDEX_DWORD
,
2417 anv_batch_emit(&cmd_buffer
->batch
, GEN8_3DSTATE_INDEX_BUFFER
,
2418 .IndexFormat
= vk_to_gen_index_type
[indexType
],
2419 .MemoryObjectControlState
= 0,
2420 .BufferStartingAddress
= { buffer
->bo
, buffer
->offset
+ offset
},
2421 .BufferSize
= buffer
->size
- offset
);
2424 void anv_CmdBindVertexBuffers(
2425 VkCmdBuffer cmdBuffer
,
2426 uint32_t startBinding
,
2427 uint32_t bindingCount
,
2428 const VkBuffer
* pBuffers
,
2429 const VkDeviceSize
* pOffsets
)
2431 struct anv_cmd_buffer
*cmd_buffer
= (struct anv_cmd_buffer
*) cmdBuffer
;
2432 struct anv_bindings
*bindings
= cmd_buffer
->bindings
;
2434 /* We have to defer setting up vertex buffer since we need the buffer
2435 * stride from the pipeline. */
2437 for (uint32_t i
= 0; i
< bindingCount
; i
++) {
2438 bindings
->vb
[startBinding
+ i
].buffer
= (struct anv_buffer
*) pBuffers
[i
];
2439 bindings
->vb
[startBinding
+ i
].offset
= pOffsets
[i
];
2440 cmd_buffer
->vb_dirty
|= 1 << (startBinding
+ i
);
2445 flush_descriptor_sets(struct anv_cmd_buffer
*cmd_buffer
)
2447 struct anv_pipeline_layout
*layout
= cmd_buffer
->pipeline
->layout
;
2448 struct anv_bindings
*bindings
= cmd_buffer
->bindings
;
2449 uint32_t layers
= cmd_buffer
->framebuffer
->layers
;
2451 for (uint32_t s
= 0; s
< VK_NUM_SHADER_STAGE
; s
++) {
2454 if (s
== VK_SHADER_STAGE_FRAGMENT
) {
2456 layers
= cmd_buffer
->framebuffer
->layers
;
2462 /* This is a little awkward: layout can be NULL but we still have to
2463 * allocate and set a binding table for the PS stage for render
2465 uint32_t surface_count
= layout
? layout
->stage
[s
].surface_count
: 0;
2467 if (layers
+ surface_count
> 0) {
2468 struct anv_state state
;
2471 size
= (bias
+ surface_count
) * sizeof(uint32_t);
2472 state
= anv_state_stream_alloc(&cmd_buffer
->surface_state_stream
, size
, 32);
2473 memcpy(state
.map
, bindings
->descriptors
[s
].surfaces
, size
);
2475 for (uint32_t i
= 0; i
< layers
; i
++)
2476 anv_reloc_list_add(&cmd_buffer
->batch
.surf_relocs
,
2477 bindings
->descriptors
[s
].surfaces
[i
] + 8 * sizeof(int32_t),
2478 bindings
->descriptors
[s
].relocs
[i
].bo
,
2479 bindings
->descriptors
[s
].relocs
[i
].offset
);
2481 for (uint32_t i
= 0; i
< surface_count
; i
++)
2482 anv_reloc_list_add(&cmd_buffer
->batch
.surf_relocs
,
2483 bindings
->descriptors
[s
].surfaces
[bias
+ i
] + 8 * sizeof(int32_t),
2484 bindings
->descriptors
[s
].relocs
[bias
+ i
].bo
,
2485 bindings
->descriptors
[s
].relocs
[bias
+ i
].offset
);
2487 static const uint32_t binding_table_opcodes
[] = {
2488 [VK_SHADER_STAGE_VERTEX
] = 38,
2489 [VK_SHADER_STAGE_TESS_CONTROL
] = 39,
2490 [VK_SHADER_STAGE_TESS_EVALUATION
] = 40,
2491 [VK_SHADER_STAGE_GEOMETRY
] = 41,
2492 [VK_SHADER_STAGE_FRAGMENT
] = 42,
2493 [VK_SHADER_STAGE_COMPUTE
] = 0,
2496 anv_batch_emit(&cmd_buffer
->batch
,
2497 GEN8_3DSTATE_BINDING_TABLE_POINTERS_VS
,
2498 ._3DCommandSubOpcode
= binding_table_opcodes
[s
],
2499 .PointertoVSBindingTable
= state
.offset
);
2502 if (layout
&& layout
->stage
[s
].sampler_count
> 0) {
2503 struct anv_state state
;
2506 size
= layout
->stage
[s
].sampler_count
* 16;
2507 state
= anv_state_stream_alloc(&cmd_buffer
->dynamic_state_stream
, size
, 32);
2508 memcpy(state
.map
, bindings
->descriptors
[s
].samplers
, size
);
2510 static const uint32_t sampler_state_opcodes
[] = {
2511 [VK_SHADER_STAGE_VERTEX
] = 43,
2512 [VK_SHADER_STAGE_TESS_CONTROL
] = 44, /* HS */
2513 [VK_SHADER_STAGE_TESS_EVALUATION
] = 45, /* DS */
2514 [VK_SHADER_STAGE_GEOMETRY
] = 46,
2515 [VK_SHADER_STAGE_FRAGMENT
] = 47,
2516 [VK_SHADER_STAGE_COMPUTE
] = 0,
2519 anv_batch_emit(&cmd_buffer
->batch
,
2520 GEN8_3DSTATE_SAMPLER_STATE_POINTERS_VS
,
2521 ._3DCommandSubOpcode
= sampler_state_opcodes
[s
],
2522 .PointertoVSSamplerState
= state
.offset
);
2528 anv_cmd_buffer_flush_state(struct anv_cmd_buffer
*cmd_buffer
)
2530 struct anv_pipeline
*pipeline
= cmd_buffer
->pipeline
;
2531 struct anv_bindings
*bindings
= cmd_buffer
->bindings
;
2532 const uint32_t num_buffers
= __builtin_popcount(cmd_buffer
->vb_dirty
);
2533 const uint32_t num_dwords
= 1 + num_buffers
* 4;
2536 if (cmd_buffer
->vb_dirty
) {
2537 p
= anv_batch_emitn(&cmd_buffer
->batch
, num_dwords
,
2538 GEN8_3DSTATE_VERTEX_BUFFERS
);
2540 for_each_bit(vb
, cmd_buffer
->vb_dirty
) {
2541 struct anv_buffer
*buffer
= bindings
->vb
[vb
].buffer
;
2542 uint32_t offset
= bindings
->vb
[vb
].offset
;
2544 struct GEN8_VERTEX_BUFFER_STATE state
= {
2545 .VertexBufferIndex
= vb
,
2546 .MemoryObjectControlState
= 0,
2547 .AddressModifyEnable
= true,
2548 .BufferPitch
= pipeline
->binding_stride
[vb
],
2549 .BufferStartingAddress
= { buffer
->bo
, buffer
->offset
+ offset
},
2550 .BufferSize
= buffer
->size
- offset
2553 GEN8_VERTEX_BUFFER_STATE_pack(&cmd_buffer
->batch
, &p
[1 + i
* 4], &state
);
2558 if (cmd_buffer
->dirty
& ANV_CMD_BUFFER_PIPELINE_DIRTY
)
2559 anv_batch_emit_batch(&cmd_buffer
->batch
, &pipeline
->batch
);
2561 if (cmd_buffer
->dirty
& ANV_CMD_BUFFER_DESCRIPTOR_SET_DIRTY
)
2562 flush_descriptor_sets(cmd_buffer
);
2564 if (cmd_buffer
->dirty
& (ANV_CMD_BUFFER_PIPELINE_DIRTY
| ANV_CMD_BUFFER_RS_DIRTY
))
2565 anv_batch_emit_merge(&cmd_buffer
->batch
,
2566 cmd_buffer
->rs_state
->state_sf
, pipeline
->state_sf
);
2568 cmd_buffer
->vb_dirty
= 0;
2569 cmd_buffer
->dirty
= 0;
2573 VkCmdBuffer cmdBuffer
,
2574 uint32_t firstVertex
,
2575 uint32_t vertexCount
,
2576 uint32_t firstInstance
,
2577 uint32_t instanceCount
)
2579 struct anv_cmd_buffer
*cmd_buffer
= (struct anv_cmd_buffer
*) cmdBuffer
;
2581 anv_cmd_buffer_flush_state(cmd_buffer
);
2583 anv_batch_emit(&cmd_buffer
->batch
, GEN8_3DPRIMITIVE
,
2584 .VertexAccessType
= SEQUENTIAL
,
2585 .VertexCountPerInstance
= vertexCount
,
2586 .StartVertexLocation
= firstVertex
,
2587 .InstanceCount
= instanceCount
,
2588 .StartInstanceLocation
= firstInstance
,
2589 .BaseVertexLocation
= 0);
2592 void anv_CmdDrawIndexed(
2593 VkCmdBuffer cmdBuffer
,
2594 uint32_t firstIndex
,
2595 uint32_t indexCount
,
2596 int32_t vertexOffset
,
2597 uint32_t firstInstance
,
2598 uint32_t instanceCount
)
2600 struct anv_cmd_buffer
*cmd_buffer
= (struct anv_cmd_buffer
*) cmdBuffer
;
2602 anv_cmd_buffer_flush_state(cmd_buffer
);
2604 anv_batch_emit(&cmd_buffer
->batch
, GEN8_3DPRIMITIVE
,
2605 .VertexAccessType
= RANDOM
,
2606 .VertexCountPerInstance
= indexCount
,
2607 .StartVertexLocation
= firstIndex
,
2608 .InstanceCount
= instanceCount
,
2609 .StartInstanceLocation
= firstInstance
,
2610 .BaseVertexLocation
= 0);
2614 anv_batch_lrm(struct anv_batch
*batch
,
2615 uint32_t reg
, struct anv_bo
*bo
, uint32_t offset
)
2617 anv_batch_emit(batch
, GEN8_MI_LOAD_REGISTER_MEM
,
2618 .RegisterAddress
= reg
,
2619 .MemoryAddress
= { bo
, offset
});
2623 anv_batch_lri(struct anv_batch
*batch
, uint32_t reg
, uint32_t imm
)
2625 anv_batch_emit(batch
, GEN8_MI_LOAD_REGISTER_IMM
,
2626 .RegisterOffset
= reg
,
2630 /* Auto-Draw / Indirect Registers */
2631 #define GEN7_3DPRIM_END_OFFSET 0x2420
2632 #define GEN7_3DPRIM_START_VERTEX 0x2430
2633 #define GEN7_3DPRIM_VERTEX_COUNT 0x2434
2634 #define GEN7_3DPRIM_INSTANCE_COUNT 0x2438
2635 #define GEN7_3DPRIM_START_INSTANCE 0x243C
2636 #define GEN7_3DPRIM_BASE_VERTEX 0x2440
2638 void anv_CmdDrawIndirect(
2639 VkCmdBuffer cmdBuffer
,
2641 VkDeviceSize offset
,
2645 struct anv_cmd_buffer
*cmd_buffer
= (struct anv_cmd_buffer
*) cmdBuffer
;
2646 struct anv_buffer
*buffer
= (struct anv_buffer
*) _buffer
;
2647 struct anv_bo
*bo
= buffer
->bo
;
2648 uint32_t bo_offset
= buffer
->offset
+ offset
;
2650 anv_cmd_buffer_flush_state(cmd_buffer
);
2652 anv_batch_lrm(&cmd_buffer
->batch
, GEN7_3DPRIM_VERTEX_COUNT
, bo
, bo_offset
);
2653 anv_batch_lrm(&cmd_buffer
->batch
, GEN7_3DPRIM_INSTANCE_COUNT
, bo
, bo_offset
+ 4);
2654 anv_batch_lrm(&cmd_buffer
->batch
, GEN7_3DPRIM_START_VERTEX
, bo
, bo_offset
+ 8);
2655 anv_batch_lrm(&cmd_buffer
->batch
, GEN7_3DPRIM_START_INSTANCE
, bo
, bo_offset
+ 12);
2656 anv_batch_lri(&cmd_buffer
->batch
, GEN7_3DPRIM_BASE_VERTEX
, 0);
2658 anv_batch_emit(&cmd_buffer
->batch
, GEN8_3DPRIMITIVE
,
2659 .IndirectParameterEnable
= true,
2660 .VertexAccessType
= SEQUENTIAL
);
2663 void anv_CmdDrawIndexedIndirect(
2664 VkCmdBuffer cmdBuffer
,
2666 VkDeviceSize offset
,
2670 struct anv_cmd_buffer
*cmd_buffer
= (struct anv_cmd_buffer
*) cmdBuffer
;
2671 struct anv_buffer
*buffer
= (struct anv_buffer
*) _buffer
;
2672 struct anv_bo
*bo
= buffer
->bo
;
2673 uint32_t bo_offset
= buffer
->offset
+ offset
;
2675 anv_cmd_buffer_flush_state(cmd_buffer
);
2677 anv_batch_lrm(&cmd_buffer
->batch
, GEN7_3DPRIM_VERTEX_COUNT
, bo
, bo_offset
);
2678 anv_batch_lrm(&cmd_buffer
->batch
, GEN7_3DPRIM_INSTANCE_COUNT
, bo
, bo_offset
+ 4);
2679 anv_batch_lrm(&cmd_buffer
->batch
, GEN7_3DPRIM_START_VERTEX
, bo
, bo_offset
+ 8);
2680 anv_batch_lrm(&cmd_buffer
->batch
, GEN7_3DPRIM_BASE_VERTEX
, bo
, bo_offset
+ 12);
2681 anv_batch_lrm(&cmd_buffer
->batch
, GEN7_3DPRIM_START_INSTANCE
, bo
, bo_offset
+ 16);
2683 anv_batch_emit(&cmd_buffer
->batch
, GEN8_3DPRIMITIVE
,
2684 .IndirectParameterEnable
= true,
2685 .VertexAccessType
= RANDOM
);
2688 void anv_CmdDispatch(
2689 VkCmdBuffer cmdBuffer
,
2697 void anv_CmdDispatchIndirect(
2698 VkCmdBuffer cmdBuffer
,
2700 VkDeviceSize offset
)
2705 void anv_CmdSetEvent(
2706 VkCmdBuffer cmdBuffer
,
2708 VkPipeEvent pipeEvent
)
2713 void anv_CmdResetEvent(
2714 VkCmdBuffer cmdBuffer
,
2716 VkPipeEvent pipeEvent
)
2721 void anv_CmdWaitEvents(
2722 VkCmdBuffer cmdBuffer
,
2723 VkWaitEvent waitEvent
,
2724 uint32_t eventCount
,
2725 const VkEvent
* pEvents
,
2726 uint32_t memBarrierCount
,
2727 const void** ppMemBarriers
)
2732 void anv_CmdPipelineBarrier(
2733 VkCmdBuffer cmdBuffer
,
2734 VkWaitEvent waitEvent
,
2735 uint32_t pipeEventCount
,
2736 const VkPipeEvent
* pPipeEvents
,
2737 uint32_t memBarrierCount
,
2738 const void** ppMemBarriers
)
2744 anv_batch_emit_ps_depth_count(struct anv_batch
*batch
,
2745 struct anv_bo
*bo
, uint32_t offset
)
2747 anv_batch_emit(batch
, GEN8_PIPE_CONTROL
,
2748 .DestinationAddressType
= DAT_PPGTT
,
2749 .PostSyncOperation
= WritePSDepthCount
,
2750 .Address
= { bo
, offset
}); /* FIXME: This is only lower 32 bits */
2753 void anv_CmdBeginQuery(
2754 VkCmdBuffer cmdBuffer
,
2755 VkQueryPool queryPool
,
2757 VkQueryControlFlags flags
)
2759 struct anv_cmd_buffer
*cmd_buffer
= (struct anv_cmd_buffer
*) cmdBuffer
;
2760 struct anv_query_pool
*pool
= (struct anv_query_pool
*) queryPool
;
2762 switch (pool
->type
) {
2763 case VK_QUERY_TYPE_OCCLUSION
:
2764 anv_batch_emit_ps_depth_count(&cmd_buffer
->batch
, &pool
->bo
,
2765 slot
* sizeof(struct anv_query_pool_slot
));
2768 case VK_QUERY_TYPE_PIPELINE_STATISTICS
:
2774 void anv_CmdEndQuery(
2775 VkCmdBuffer cmdBuffer
,
2776 VkQueryPool queryPool
,
2779 struct anv_cmd_buffer
*cmd_buffer
= (struct anv_cmd_buffer
*) cmdBuffer
;
2780 struct anv_query_pool
*pool
= (struct anv_query_pool
*) queryPool
;
2782 switch (pool
->type
) {
2783 case VK_QUERY_TYPE_OCCLUSION
:
2784 anv_batch_emit_ps_depth_count(&cmd_buffer
->batch
, &pool
->bo
,
2785 slot
* sizeof(struct anv_query_pool_slot
) + 8);
2788 case VK_QUERY_TYPE_PIPELINE_STATISTICS
:
2794 void anv_CmdResetQueryPool(
2795 VkCmdBuffer cmdBuffer
,
2796 VkQueryPool queryPool
,
2797 uint32_t startQuery
,
2798 uint32_t queryCount
)
2803 #define TIMESTAMP 0x44070
2805 void anv_CmdWriteTimestamp(
2806 VkCmdBuffer cmdBuffer
,
2807 VkTimestampType timestampType
,
2808 VkBuffer destBuffer
,
2809 VkDeviceSize destOffset
)
2811 struct anv_cmd_buffer
*cmd_buffer
= (struct anv_cmd_buffer
*) cmdBuffer
;
2812 struct anv_buffer
*buffer
= (struct anv_buffer
*) destBuffer
;
2813 struct anv_bo
*bo
= buffer
->bo
;
2815 switch (timestampType
) {
2816 case VK_TIMESTAMP_TYPE_TOP
:
2817 anv_batch_emit(&cmd_buffer
->batch
, GEN8_MI_STORE_REGISTER_MEM
,
2818 .RegisterAddress
= TIMESTAMP
,
2819 .MemoryAddress
= { bo
, buffer
->offset
+ destOffset
});
2822 case VK_TIMESTAMP_TYPE_BOTTOM
:
2823 anv_batch_emit(&cmd_buffer
->batch
, GEN8_PIPE_CONTROL
,
2824 .DestinationAddressType
= DAT_PPGTT
,
2825 .PostSyncOperation
= WriteTimestamp
,
2826 .Address
= /* FIXME: This is only lower 32 bits */
2827 { bo
, buffer
->offset
+ destOffset
});
2835 #define alu_opcode(v) __gen_field((v), 20, 31)
2836 #define alu_operand1(v) __gen_field((v), 10, 19)
2837 #define alu_operand2(v) __gen_field((v), 0, 9)
2838 #define alu(opcode, operand1, operand2) \
2839 alu_opcode(opcode) | alu_operand1(operand1) | alu_operand2(operand2)
2841 #define OPCODE_NOOP 0x000
2842 #define OPCODE_LOAD 0x080
2843 #define OPCODE_LOADINV 0x480
2844 #define OPCODE_LOAD0 0x081
2845 #define OPCODE_LOAD1 0x481
2846 #define OPCODE_ADD 0x100
2847 #define OPCODE_SUB 0x101
2848 #define OPCODE_AND 0x102
2849 #define OPCODE_OR 0x103
2850 #define OPCODE_XOR 0x104
2851 #define OPCODE_STORE 0x180
2852 #define OPCODE_STOREINV 0x580
2854 #define OPERAND_R0 0x00
2855 #define OPERAND_R1 0x01
2856 #define OPERAND_R2 0x02
2857 #define OPERAND_R3 0x03
2858 #define OPERAND_R4 0x04
2859 #define OPERAND_SRCA 0x20
2860 #define OPERAND_SRCB 0x21
2861 #define OPERAND_ACCU 0x31
2862 #define OPERAND_ZF 0x32
2863 #define OPERAND_CF 0x33
2865 #define CS_GPR(n) (0x2600 + (n) * 8)
2868 emit_load_alu_reg_u64(struct anv_batch
*batch
, uint32_t reg
,
2869 struct anv_bo
*bo
, uint32_t offset
)
2871 anv_batch_emit(batch
, GEN8_MI_LOAD_REGISTER_MEM
,
2872 .RegisterAddress
= reg
,
2873 .MemoryAddress
= { bo
, offset
});
2874 anv_batch_emit(batch
, GEN8_MI_LOAD_REGISTER_MEM
,
2875 .RegisterAddress
= reg
+ 4,
2876 .MemoryAddress
= { bo
, offset
+ 4 });
2879 void anv_CmdCopyQueryPoolResults(
2880 VkCmdBuffer cmdBuffer
,
2881 VkQueryPool queryPool
,
2882 uint32_t startQuery
,
2883 uint32_t queryCount
,
2884 VkBuffer destBuffer
,
2885 VkDeviceSize destOffset
,
2886 VkDeviceSize destStride
,
2887 VkQueryResultFlags flags
)
2889 struct anv_cmd_buffer
*cmd_buffer
= (struct anv_cmd_buffer
*) cmdBuffer
;
2890 struct anv_query_pool
*pool
= (struct anv_query_pool
*) queryPool
;
2891 struct anv_buffer
*buffer
= (struct anv_buffer
*) destBuffer
;
2892 uint32_t slot_offset
, dst_offset
;
2894 if (flags
& VK_QUERY_RESULT_WITH_AVAILABILITY_BIT
) {
2895 /* Where is the availabilty info supposed to go? */
2896 anv_finishme("VK_QUERY_RESULT_WITH_AVAILABILITY_BIT");
2900 assert(pool
->type
== VK_QUERY_TYPE_OCCLUSION
);
2902 /* FIXME: If we're not waiting, should we just do this on the CPU? */
2903 if (flags
& VK_QUERY_RESULT_WAIT_BIT
)
2904 anv_batch_emit(&cmd_buffer
->batch
, GEN8_PIPE_CONTROL
,
2905 .CommandStreamerStallEnable
= true);
2907 dst_offset
= buffer
->offset
+ destOffset
;
2908 for (uint32_t i
= 0; i
< queryCount
; i
++) {
2910 slot_offset
= (startQuery
+ i
) * sizeof(struct anv_query_pool_slot
);
2912 emit_load_alu_reg_u64(&cmd_buffer
->batch
, CS_GPR(0), &pool
->bo
, slot_offset
);
2913 emit_load_alu_reg_u64(&cmd_buffer
->batch
, CS_GPR(1), &pool
->bo
, slot_offset
+ 8);
2915 /* FIXME: We need to clamp the result for 32 bit. */
2917 uint32_t *dw
= anv_batch_emitn(&cmd_buffer
->batch
, 5, GEN8_MI_MATH
);
2918 dw
[1] = alu(OPCODE_LOAD
, OPERAND_SRCA
, OPERAND_R1
);
2919 dw
[2] = alu(OPCODE_LOAD
, OPERAND_SRCB
, OPERAND_R0
);
2920 dw
[3] = alu(OPCODE_SUB
, 0, 0);
2921 dw
[4] = alu(OPCODE_STORE
, OPERAND_R2
, OPERAND_ACCU
);
2923 anv_batch_emit(&cmd_buffer
->batch
, GEN8_MI_STORE_REGISTER_MEM
,
2924 .RegisterAddress
= CS_GPR(2),
2925 /* FIXME: This is only lower 32 bits */
2926 .MemoryAddress
= { buffer
->bo
, dst_offset
});
2928 if (flags
& VK_QUERY_RESULT_64_BIT
)
2929 anv_batch_emit(&cmd_buffer
->batch
, GEN8_MI_STORE_REGISTER_MEM
,
2930 .RegisterAddress
= CS_GPR(2) + 4,
2931 /* FIXME: This is only lower 32 bits */
2932 .MemoryAddress
= { buffer
->bo
, dst_offset
+ 4 });
2934 dst_offset
+= destStride
;
2938 void anv_CmdInitAtomicCounters(
2939 VkCmdBuffer cmdBuffer
,
2940 VkPipelineBindPoint pipelineBindPoint
,
2941 uint32_t startCounter
,
2942 uint32_t counterCount
,
2943 const uint32_t* pData
)
2948 void anv_CmdLoadAtomicCounters(
2949 VkCmdBuffer cmdBuffer
,
2950 VkPipelineBindPoint pipelineBindPoint
,
2951 uint32_t startCounter
,
2952 uint32_t counterCount
,
2954 VkDeviceSize srcOffset
)
2959 void anv_CmdSaveAtomicCounters(
2960 VkCmdBuffer cmdBuffer
,
2961 VkPipelineBindPoint pipelineBindPoint
,
2962 uint32_t startCounter
,
2963 uint32_t counterCount
,
2964 VkBuffer destBuffer
,
2965 VkDeviceSize destOffset
)
2970 VkResult
anv_CreateFramebuffer(
2972 const VkFramebufferCreateInfo
* pCreateInfo
,
2973 VkFramebuffer
* pFramebuffer
)
2975 struct anv_device
*device
= (struct anv_device
*) _device
;
2976 struct anv_framebuffer
*framebuffer
;
2978 assert(pCreateInfo
->sType
== VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO
);
2980 framebuffer
= anv_device_alloc(device
, sizeof(*framebuffer
), 8,
2981 VK_SYSTEM_ALLOC_TYPE_API_OBJECT
);
2982 if (framebuffer
== NULL
)
2983 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
2985 framebuffer
->color_attachment_count
= pCreateInfo
->colorAttachmentCount
;
2986 for (uint32_t i
= 0; i
< pCreateInfo
->colorAttachmentCount
; i
++) {
2987 framebuffer
->color_attachments
[i
] =
2988 (struct anv_surface_view
*) pCreateInfo
->pColorAttachments
[i
].view
;
2991 if (pCreateInfo
->pDepthStencilAttachment
) {
2992 framebuffer
->depth_stencil
=
2993 (struct anv_depth_stencil_view
*) pCreateInfo
->pDepthStencilAttachment
->view
;
2996 framebuffer
->sample_count
= pCreateInfo
->sampleCount
;
2997 framebuffer
->width
= pCreateInfo
->width
;
2998 framebuffer
->height
= pCreateInfo
->height
;
2999 framebuffer
->layers
= pCreateInfo
->layers
;
3001 vkCreateDynamicViewportState((VkDevice
) device
,
3002 &(VkDynamicVpStateCreateInfo
) {
3003 .sType
= VK_STRUCTURE_TYPE_DYNAMIC_VP_STATE_CREATE_INFO
,
3004 .viewportAndScissorCount
= 2,
3005 .pViewports
= (VkViewport
[]) {
3009 .width
= pCreateInfo
->width
,
3010 .height
= pCreateInfo
->height
,
3015 .pScissors
= (VkRect
[]) {
3017 { pCreateInfo
->width
, pCreateInfo
->height
} },
3020 &framebuffer
->vp_state
);
3022 *pFramebuffer
= (VkFramebuffer
) framebuffer
;
3027 VkResult
anv_CreateRenderPass(
3029 const VkRenderPassCreateInfo
* pCreateInfo
,
3030 VkRenderPass
* pRenderPass
)
3032 struct anv_device
*device
= (struct anv_device
*) _device
;
3033 struct anv_render_pass
*pass
;
3036 assert(pCreateInfo
->sType
== VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO
);
3038 size
= sizeof(*pass
) +
3039 pCreateInfo
->layers
* sizeof(struct anv_render_pass_layer
);
3040 pass
= anv_device_alloc(device
, size
, 8,
3041 VK_SYSTEM_ALLOC_TYPE_API_OBJECT
);
3043 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
3045 pass
->render_area
= pCreateInfo
->renderArea
;
3047 pass
->num_layers
= pCreateInfo
->layers
;
3049 pass
->num_clear_layers
= 0;
3050 for (uint32_t i
= 0; i
< pCreateInfo
->layers
; i
++) {
3051 pass
->layers
[i
].color_load_op
= pCreateInfo
->pColorLoadOps
[i
];
3052 pass
->layers
[i
].clear_color
= pCreateInfo
->pColorLoadClearValues
[i
];
3053 if (pass
->layers
[i
].color_load_op
== VK_ATTACHMENT_LOAD_OP_CLEAR
)
3054 pass
->num_clear_layers
++;
3057 *pRenderPass
= (VkRenderPass
) pass
;
3063 anv_cmd_buffer_fill_render_targets(struct anv_cmd_buffer
*cmd_buffer
)
3065 struct anv_framebuffer
*framebuffer
= cmd_buffer
->framebuffer
;
3066 struct anv_bindings
*bindings
= cmd_buffer
->bindings
;
3068 for (uint32_t i
= 0; i
< framebuffer
->color_attachment_count
; i
++) {
3069 struct anv_surface_view
*view
= framebuffer
->color_attachments
[i
];
3071 bindings
->descriptors
[VK_SHADER_STAGE_FRAGMENT
].surfaces
[i
] = view
->surface_state
.offset
;
3072 bindings
->descriptors
[VK_SHADER_STAGE_FRAGMENT
].relocs
[i
].bo
= view
->bo
;
3073 bindings
->descriptors
[VK_SHADER_STAGE_FRAGMENT
].relocs
[i
].offset
= view
->offset
;
3075 cmd_buffer
->dirty
|= ANV_CMD_BUFFER_DESCRIPTOR_SET_DIRTY
;
3078 void anv_CmdBeginRenderPass(
3079 VkCmdBuffer cmdBuffer
,
3080 const VkRenderPassBegin
* pRenderPassBegin
)
3082 struct anv_cmd_buffer
*cmd_buffer
= (struct anv_cmd_buffer
*) cmdBuffer
;
3083 struct anv_render_pass
*pass
= (struct anv_render_pass
*) pRenderPassBegin
->renderPass
;
3084 struct anv_framebuffer
*framebuffer
=
3085 (struct anv_framebuffer
*) pRenderPassBegin
->framebuffer
;
3087 cmd_buffer
->framebuffer
= framebuffer
;
3089 anv_batch_emit(&cmd_buffer
->batch
, GEN8_3DSTATE_DRAWING_RECTANGLE
,
3090 .ClippedDrawingRectangleYMin
= pass
->render_area
.offset
.y
,
3091 .ClippedDrawingRectangleXMin
= pass
->render_area
.offset
.x
,
3092 .ClippedDrawingRectangleYMax
=
3093 pass
->render_area
.offset
.y
+ pass
->render_area
.extent
.height
- 1,
3094 .ClippedDrawingRectangleXMax
=
3095 pass
->render_area
.offset
.x
+ pass
->render_area
.extent
.width
- 1,
3096 .DrawingRectangleOriginY
= 0,
3097 .DrawingRectangleOriginX
= 0);
3099 anv_cmd_buffer_fill_render_targets(cmd_buffer
);
3101 anv_cmd_buffer_clear(cmd_buffer
, pass
);
3104 void anv_CmdEndRenderPass(
3105 VkCmdBuffer cmdBuffer
,
3106 VkRenderPass renderPass
)
3108 /* Emit a flushing pipe control at the end of a pass. This is kind of a
3109 * hack but it ensures that render targets always actually get written.
3110 * Eventually, we should do flushing based on image format transitions
3111 * or something of that nature.
3113 struct anv_cmd_buffer
*cmd_buffer
= (struct anv_cmd_buffer
*)cmdBuffer
;
3114 anv_batch_emit(&cmd_buffer
->batch
, GEN8_PIPE_CONTROL
,
3115 .PostSyncOperation
= NoWrite
,
3116 .RenderTargetCacheFlushEnable
= true,
3117 .InstructionCacheInvalidateEnable
= true,
3118 .DepthCacheFlushEnable
= true,
3119 .VFCacheInvalidationEnable
= true,
3120 .TextureCacheInvalidationEnable
= true,
3121 .CommandStreamerStallEnable
= true);