}
}
-void anv_CmdSetEvent(
- VkCommandBuffer commandBuffer,
- VkEvent event,
- VkPipelineStageFlags stageMask)
-{
- stub();
-}
-
-void anv_CmdResetEvent(
- VkCommandBuffer commandBuffer,
- VkEvent event,
- VkPipelineStageFlags stageMask)
-{
- stub();
-}
-
-void anv_CmdWaitEvents(
- VkCommandBuffer commandBuffer,
- uint32_t eventCount,
- const VkEvent* pEvents,
- VkPipelineStageFlags srcStageMask,
- VkPipelineStageFlags destStageMask,
- uint32_t memBarrierCount,
- const void* const* ppMemBarriers)
-{
- stub();
-}
-
struct anv_state
anv_cmd_buffer_push_constants(struct anv_cmd_buffer *cmd_buffer,
gl_shader_stage stage)
// Event functions
VkResult anv_CreateEvent(
- VkDevice device,
+ VkDevice _device,
const VkEventCreateInfo* pCreateInfo,
const VkAllocationCallbacks* pAllocator,
VkEvent* pEvent)
{
- stub_return(VK_ERROR_INCOMPATIBLE_DRIVER);
+ ANV_FROM_HANDLE(anv_device, device, _device);
+ struct anv_state state;
+ struct anv_event *event;
+
+ assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_EVENT_CREATE_INFO);
+
+ state = anv_state_pool_alloc(&device->dynamic_state_pool,
+ sizeof(*event), 4);
+ event = state.map;
+ event->state = state;
+ event->semaphore = VK_EVENT_RESET;
+
+ if (!device->info.has_llc) {
+ /* Make sure the writes we're flushing have landed. */
+ __builtin_ia32_sfence();
+ __builtin_ia32_clflush(event);
+ }
+
+ *pEvent = anv_event_to_handle(event);
+
+ return VK_SUCCESS;
}
void anv_DestroyEvent(
- VkDevice device,
- VkEvent event,
+ VkDevice _device,
+ VkEvent _event,
const VkAllocationCallbacks* pAllocator)
{
- stub();
+ ANV_FROM_HANDLE(anv_device, device, _device);
+ ANV_FROM_HANDLE(anv_event, event, _event);
+
+ anv_state_pool_free(&device->dynamic_state_pool, event->state);
}
VkResult anv_GetEventStatus(
- VkDevice device,
- VkEvent event)
+ VkDevice _device,
+ VkEvent _event)
{
- stub_return(VK_ERROR_INCOMPATIBLE_DRIVER);
+ ANV_FROM_HANDLE(anv_device, device, _device);
+ ANV_FROM_HANDLE(anv_event, event, _event);
+
+ if (!device->info.has_llc) {
+ /* Make sure the writes we're flushing have landed. */
+ __builtin_ia32_clflush(event);
+ __builtin_ia32_lfence();
+ }
+
+ return event->semaphore;
}
VkResult anv_SetEvent(
- VkDevice device,
- VkEvent event)
+ VkDevice _device,
+ VkEvent _event)
{
- stub_return(VK_ERROR_INCOMPATIBLE_DRIVER);
+ ANV_FROM_HANDLE(anv_device, device, _device);
+ ANV_FROM_HANDLE(anv_event, event, _event);
+
+ event->semaphore = VK_EVENT_SET;
+
+ if (!device->info.has_llc) {
+ /* Make sure the writes we're flushing have landed. */
+ __builtin_ia32_sfence();
+ __builtin_ia32_clflush(event);
+ }
+
+ return VK_SUCCESS;
}
VkResult anv_ResetEvent(
- VkDevice device,
- VkEvent event)
+ VkDevice _device,
+ VkEvent _event)
{
- stub_return(VK_ERROR_INCOMPATIBLE_DRIVER);
+ ANV_FROM_HANDLE(anv_device, device, _device);
+ ANV_FROM_HANDLE(anv_event, event, _event);
+
+ event->semaphore = VK_EVENT_RESET;
+
+ if (!device->info.has_llc) {
+ /* Make sure the writes we're flushing have landed. */
+ __builtin_ia32_sfence();
+ __builtin_ia32_clflush(event);
+ }
+
+ return VK_SUCCESS;
}
// Buffer functions
bool ready;
};
+struct anv_event {
+ uint32_t semaphore;
+ struct anv_state state;
+};
+
struct nir_shader;
struct anv_shader_module {
ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_descriptor_set_layout, VkDescriptorSetLayout)
ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_device_memory, VkDeviceMemory)
ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_fence, VkFence)
+ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_event, VkEvent)
ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_framebuffer, VkFramebuffer)
ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_image, VkImage)
ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_image_view, VkImageView);
.TextureCacheInvalidationEnable = true,
.CommandStreamerStallEnable = true);
}
+
+void genX(CmdSetEvent)(
+ VkCommandBuffer commandBuffer,
+ VkEvent event,
+ VkPipelineStageFlags stageMask)
+{
+ stub();
+}
+
+void genX(CmdResetEvent)(
+ VkCommandBuffer commandBuffer,
+ VkEvent event,
+ VkPipelineStageFlags stageMask)
+{
+ stub();
+}
+
+void genX(CmdWaitEvents)(
+ VkCommandBuffer commandBuffer,
+ uint32_t eventCount,
+ const VkEvent* pEvents,
+ VkPipelineStageFlags srcStageMask,
+ VkPipelineStageFlags destStageMask,
+ uint32_t memBarrierCount,
+ const void* const* ppMemBarriers)
+{
+ stub();
+}
dst_offset += destStride;
}
}
+
+void genX(CmdSetEvent)(
+ VkCommandBuffer commandBuffer,
+ VkEvent _event,
+ VkPipelineStageFlags stageMask)
+{
+ ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
+ ANV_FROM_HANDLE(anv_event, event, _event);
+
+ anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL),
+ .DestinationAddressType = DAT_PPGTT,
+ .PostSyncOperation = WriteImmediateData,
+ .Address = {
+ &cmd_buffer->device->dynamic_state_block_pool.bo,
+ event->state.offset
+ },
+ .ImmediateData = VK_EVENT_SET);
+}
+
+void genX(CmdResetEvent)(
+ VkCommandBuffer commandBuffer,
+ VkEvent _event,
+ VkPipelineStageFlags stageMask)
+{
+ ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
+ ANV_FROM_HANDLE(anv_event, event, _event);
+
+ anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL),
+ .DestinationAddressType = DAT_PPGTT,
+ .PostSyncOperation = WriteImmediateData,
+ .Address = {
+ &cmd_buffer->device->dynamic_state_block_pool.bo,
+ event->state.offset
+ },
+ .ImmediateData = VK_EVENT_RESET);
+}
+
+void genX(CmdWaitEvents)(
+ VkCommandBuffer commandBuffer,
+ uint32_t eventCount,
+ const VkEvent* pEvents,
+ VkPipelineStageFlags srcStageMask,
+ VkPipelineStageFlags destStageMask,
+ uint32_t memBarrierCount,
+ const void* const* ppMemBarriers)
+{
+ ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
+ for (uint32_t i = 0; i < eventCount; i++) {
+ ANV_FROM_HANDLE(anv_event, event, pEvents[i]);
+
+ anv_batch_emit(&cmd_buffer->batch, GENX(MI_SEMAPHORE_WAIT),
+ .WaitMode = PollingMode,
+ .CompareOperation = SAD_EQUAL_SDD,
+ .SemaphoreDataDword = VK_EVENT_SET,
+ .SemaphoreAddress = {
+ &cmd_buffer->device->dynamic_state_block_pool.bo,
+ event->state.offset
+ });
+ }
+
+ genX(CmdPipelineBarrier)(commandBuffer, srcStageMask, destStageMask,
+ false, /* byRegion */
+ memBarrierCount, ppMemBarriers);
+}