endif()
include_directories(${LLVM_INCLUDE_DIRS})
add_definitions(${LLVM_DEFINITIONS})
+set(CMAKE_THREAD_PREFER_PTHREAD 1)
+set(THREADS_PREFER_PTHREAD_FLAG 1)
+find_package(Threads REQUIRED)
if(WIN32)
add_definitions(NOMINMAX)
endif()
set(sources vulkan.cpp
api_objects.cpp)
add_library(kazan_vulkan STATIC ${sources})
+target_link_libraries(kazan_vulkan
+ Threads::Threads)
target_compile_definitions(kazan_vulkan PUBLIC VK_NO_PROTOTYPES)
if(UNIX AND NOT CYGWIN AND NOT CMAKE_SYSTEM_NAME STREQUAL "Android")
set(USE_X11 1)
assert(descriptor.format == VK_FORMAT_B8G8R8A8_UNORM && "unimplemented image format");
assert(descriptor.mip_levels == 1 && "mipmapping is unimplemented");
assert(descriptor.array_layers == 1 && "array images are unimplemented");
- assert(descriptor.tiling == VK_IMAGE_TILING_LINEAR
- && "non-linear image tiling is unimplemented");
+#warning implement non-linear image tiling
+
union
{
std::uint8_t bytes[4];
#include "util/system_memory_info.h"
#include "util/constexpr_array.h"
#include "util/optional.h"
+#include "util/circular_queue.h"
#include <memory>
#include <cassert>
#include <chrono>
#include <mutex>
#include <condition_variable>
#include <chrono>
+#include <thread>
namespace kazan
{
#warning finish implementing Vulkan_instance
};
-struct Vulkan_device : public Vulkan_dispatchable_object<Vulkan_device, VkDevice>
-{
- struct Queue : public Vulkan_dispatchable_object<Queue, VkQueue>
- {
- Vulkan_instance &instance;
- Vulkan_physical_device &physical_device;
- Vulkan_device &device;
- explicit Queue(Vulkan_device &device) noexcept : instance(device.instance),
- physical_device(device.physical_device),
- device(device)
- {
- }
- };
- Vulkan_instance &instance;
- Vulkan_physical_device &physical_device;
- VkPhysicalDeviceFeatures enabled_features;
- static constexpr std::size_t queue_count = 1;
- Queue queues[queue_count];
- Supported_extensions extensions; // includes both device and instance extensions
- explicit Vulkan_device(Vulkan_physical_device &physical_device,
- const VkPhysicalDeviceFeatures &enabled_features,
- const Supported_extensions &extensions) noexcept
- : instance(physical_device.instance),
- physical_device(physical_device),
- enabled_features(enabled_features),
- queues{Queue(*this)},
- extensions(extensions)
- {
- }
- void wait_idle()
- {
-#warning implement Vulkan_device::wait_idle
- }
- static util::variant<std::unique_ptr<Vulkan_device>, VkResult> create(
- Vulkan_physical_device &physical_device, const VkDeviceCreateInfo &create_info);
-};
+struct Vulkan_device;
struct Vulkan_semaphore : public Vulkan_nondispatchable_object<Vulkan_semaphore, VkSemaphore>
{
void signal() // empty function for if semaphores are needed later
{
}
+ void wait() // empty function for if semaphores are needed later
+ {
+ }
static std::unique_ptr<Vulkan_semaphore> create(Vulkan_device &device,
const VkSemaphoreCreateInfo &create_info);
};
const VkFence *fences,
bool wait_for_all,
std::uint64_t timeout);
+ VkResult wait(std::uint64_t timeout)
+ {
+ constexpr std::size_t fence_count = 1;
+ VkFence fences[fence_count] = {
+ to_handle(this),
+ };
+ return wait_multiple(fence_count, fences, true, timeout);
+ }
static std::unique_ptr<Vulkan_fence> create(Vulkan_device &device,
const VkFenceCreateInfo &create_info);
};
+struct Vulkan_device : public Vulkan_dispatchable_object<Vulkan_device, VkDevice>
+{
+ struct Job
+ {
+ virtual ~Job() = default;
+ virtual void run() noexcept = 0;
+ };
+ class Queue : public Vulkan_dispatchable_object<Queue, VkQueue>
+ {
+ private:
+ std::mutex mutex;
+ std::condition_variable cond;
+ util::Static_circular_deque<std::unique_ptr<Job>, 0x10> jobs;
+ std::thread executor_thread;
+ bool quit;
+ bool running_job;
+
+ private:
+ void thread_fn() noexcept
+ {
+ std::unique_lock<std::mutex> lock(mutex);
+ while(true)
+ {
+ if(jobs.empty())
+ {
+ if(quit)
+ return;
+ cond.wait(lock);
+ continue;
+ }
+ auto job = std::move(jobs.front());
+ bool was_full = jobs.full();
+ jobs.pop_front();
+ if(was_full)
+ cond.notify_all();
+ running_job = true;
+ lock.unlock();
+ job->run();
+ lock.lock();
+ running_job = false;
+ }
+ }
+
+ public:
+ Queue() : mutex(), cond(), jobs(), executor_thread(), quit(false), running_job(false)
+ {
+ executor_thread = std::thread(&Queue::thread_fn, this);
+ }
+ ~Queue()
+ {
+ std::unique_lock<std::mutex> lock(mutex);
+ quit = true;
+ cond.notify_all();
+ lock.unlock();
+ executor_thread.join();
+ }
+
+ private:
+ bool is_idle(std::unique_lock<std::mutex> &lock)
+ {
+ if(!jobs.empty())
+ return false;
+ if(running_job)
+ return false;
+ return true;
+ }
+
+ public:
+ bool is_idle()
+ {
+ std::unique_lock<std::mutex> lock(mutex);
+ return is_idle(lock);
+ }
+ void wait_idle()
+ {
+ std::unique_lock<std::mutex> lock(mutex);
+ while(!is_idle(lock))
+ cond.wait(lock);
+ }
+ void queue_job(std::unique_ptr<Job> job)
+ {
+ std::unique_lock<std::mutex> lock(mutex);
+ while(jobs.full())
+ cond.wait(lock);
+ bool was_idle = is_idle(lock);
+ jobs.push_back(std::move(job));
+ if(was_idle)
+ cond.notify_all();
+ }
+ void queue_fence_signal(Vulkan_fence &fence)
+ {
+ struct Signal_fence_job final : public Job
+ {
+ Vulkan_fence &fence;
+ explicit Signal_fence_job(Vulkan_fence &fence) noexcept : fence(fence)
+ {
+ }
+ virtual void run() noexcept override
+ {
+ fence.signal();
+ }
+ };
+ queue_job(std::make_unique<Signal_fence_job>(fence));
+ }
+ };
+ Vulkan_instance &instance;
+ Vulkan_physical_device &physical_device;
+ VkPhysicalDeviceFeatures enabled_features;
+ static constexpr std::size_t queue_count = 1;
+ std::unique_ptr<Queue> queues[queue_count];
+ Supported_extensions extensions; // includes both device and instance extensions
+ explicit Vulkan_device(Vulkan_physical_device &physical_device,
+ const VkPhysicalDeviceFeatures &enabled_features,
+ const Supported_extensions &extensions) noexcept
+ : instance(physical_device.instance),
+ physical_device(physical_device),
+ enabled_features(enabled_features),
+ queues{},
+ extensions(extensions)
+ {
+ for(auto &queue : queues)
+ queue = std::make_unique<Queue>();
+ }
+ void wait_idle()
+ {
+ for(auto &queue : queues)
+ queue->wait_idle();
+ }
+ static util::variant<std::unique_ptr<Vulkan_device>, VkResult> create(
+ Vulkan_physical_device &physical_device, const VkDeviceCreateInfo &create_info);
+};
+
struct Vulkan_image_descriptor
{
static constexpr VkImageCreateFlags supported_flags =
assert(queue);
auto *device_pointer = vulkan::Vulkan_device::from_handle(device);
static_assert(vulkan::Vulkan_device::queue_count == 1, "");
- *queue = to_handle(&device_pointer->queues[0]);
+ *queue = to_handle(device_pointer->queues[0].get());
}
extern "C" VKAPI_ATTR VkResult VKAPI_CALL vkQueueSubmit(VkQueue queue,
- uint32_t submitCount,
- const VkSubmitInfo *pSubmits,
+ uint32_t submit_count,
+ const VkSubmitInfo *submits,
VkFence fence)
{
-#warning finish implementing vkQueueSubmit
- assert(!"vkQueueSubmit is not implemented");
+ assert(queue);
+ assert(submit_count == 0 || submits);
+ return vulkan_icd::catch_exceptions_and_return_result(
+ [&]()
+ {
+ auto queue_pointer = vulkan::Vulkan_device::Queue::from_handle(queue);
+ for(std::size_t i = 0; i < submit_count; i++)
+ {
+ auto &submission = submits[i];
+ assert(submission.sType == VK_STRUCTURE_TYPE_SUBMIT_INFO);
+ struct Run_submission_job final : public vulkan::Vulkan_device::Job
+ {
+ std::vector<vulkan::Vulkan_semaphore *> wait_semaphores;
+ std::vector<vulkan::Vulkan_command_buffer *> command_buffers;
+ std::vector<vulkan::Vulkan_semaphore *> signal_semaphores;
+ Run_submission_job(
+ std::vector<vulkan::Vulkan_semaphore *> wait_semaphores,
+ std::vector<vulkan::Vulkan_command_buffer *> command_buffers,
+ std::vector<vulkan::Vulkan_semaphore *> signal_semaphores) noexcept
+ : wait_semaphores(std::move(wait_semaphores)),
+ command_buffers(std::move(command_buffers)),
+ signal_semaphores(std::move(signal_semaphores))
+ {
+ }
+ virtual void run() noexcept override
+ {
+ for(auto &i : wait_semaphores)
+ i->wait();
+ for(auto &i : command_buffers)
+ i->run();
+ for(auto &i : signal_semaphores)
+ i->signal();
+ }
+ };
+ std::vector<vulkan::Vulkan_semaphore *> wait_semaphores;
+ wait_semaphores.reserve(submission.waitSemaphoreCount);
+ for(std::uint32_t i = 0; i < submission.waitSemaphoreCount; i++)
+ {
+ assert(submission.pWaitSemaphores[i]);
+ wait_semaphores.push_back(
+ vulkan::Vulkan_semaphore::from_handle(submission.pWaitSemaphores[i]));
+ }
+ std::vector<vulkan::Vulkan_command_buffer *> command_buffers;
+ command_buffers.reserve(submission.commandBufferCount);
+ for(std::uint32_t i = 0; i < submission.commandBufferCount; i++)
+ {
+ assert(submission.pCommandBuffers[i]);
+ command_buffers.push_back(
+ vulkan::Vulkan_command_buffer::from_handle(submission.pCommandBuffers[i]));
+ }
+ std::vector<vulkan::Vulkan_semaphore *> signal_semaphores;
+ signal_semaphores.reserve(submission.signalSemaphoreCount);
+ for(std::uint32_t i = 0; i < submission.signalSemaphoreCount; i++)
+ {
+ assert(submission.pSignalSemaphores[i]);
+ signal_semaphores.push_back(
+ vulkan::Vulkan_semaphore::from_handle(submission.pSignalSemaphores[i]));
+ }
+ queue_pointer->queue_job(
+ std::make_unique<Run_submission_job>(std::move(wait_semaphores),
+ std::move(command_buffers),
+ std::move(signal_semaphores)));
+ }
+ if(fence)
+ queue_pointer->queue_fence_signal(*vulkan::Vulkan_fence::from_handle(fence));
+ return VK_SUCCESS;
+ });
}
extern "C" VKAPI_ATTR VkResult VKAPI_CALL vkQueueWaitIdle(VkQueue queue)
{
-#warning finish implementing vkQueueWaitIdle
- assert(!"vkQueueWaitIdle is not implemented");
+ assert(queue);
+ return vulkan_icd::catch_exceptions_and_return_result(
+ [&]()
+ {
+ auto queue_pointer = vulkan::Vulkan_device::Queue::from_handle(queue);
+ queue_pointer->wait_idle();
+ return VK_SUCCESS;
+ });
}
extern "C" VKAPI_ATTR VkResult VKAPI_CALL vkDeviceWaitIdle(VkDevice device)
{
+ assert(device);
return vulkan_icd::catch_exceptions_and_return_result(
[&]()
{
}
extern "C" VKAPI_ATTR VkResult VKAPI_CALL vkQueuePresentKHR(VkQueue queue,
- const VkPresentInfoKHR *pPresentInfo)
+ const VkPresentInfoKHR *present_info)
{
-#warning finish implementing vkQueuePresentKHR
- assert(!"vkQueuePresentKHR is not implemented");
+ assert(queue);
+ assert(present_info);
+ assert(present_info->sType == VK_STRUCTURE_TYPE_PRESENT_INFO_KHR);
+ assert(present_info->waitSemaphoreCount == 0 || present_info->pWaitSemaphores);
+ assert(present_info->swapchainCount > 0);
+ assert(present_info->pSwapchains);
+ assert(present_info->pImageIndices);
+ return vulkan_icd::catch_exceptions_and_return_result(
+ [&]()
+ {
+ auto *queue_pointer = vulkan::Vulkan_device::Queue::from_handle(queue);
+ if(present_info->waitSemaphoreCount > 0)
+ {
+ std::vector<vulkan::Vulkan_semaphore *> semaphores;
+ semaphores.reserve(present_info->waitSemaphoreCount);
+ for(std::uint32_t i = 0; i < present_info->waitSemaphoreCount; i++)
+ {
+ assert(present_info->pWaitSemaphores[i]);
+ semaphores.push_back(
+ vulkan::Vulkan_semaphore::from_handle(present_info->pWaitSemaphores[i]));
+ }
+ struct Wait_for_semaphores_job final : public vulkan::Vulkan_device::Job
+ {
+ std::vector<vulkan::Vulkan_semaphore *> semaphores;
+ explicit Wait_for_semaphores_job(
+ std::vector<vulkan::Vulkan_semaphore *> semaphores) noexcept
+ : semaphores(std::move(semaphores))
+ {
+ }
+ virtual void run() noexcept override
+ {
+ for(auto &i : semaphores)
+ i->wait();
+ }
+ };
+ queue_pointer->queue_job(
+ std::make_unique<Wait_for_semaphores_job>(std::move(semaphores)));
+ }
+ VkResult retval = VK_SUCCESS;
+ for(std::uint32_t i = 0; i < present_info->swapchainCount; i++)
+ {
+ auto *swapchain =
+ vulkan_icd::Vulkan_swapchain::from_handle(present_info->pSwapchains[i]);
+ assert(swapchain);
+ VkResult present_result =
+ swapchain->queue_present(present_info->pImageIndices[i], *queue_pointer);
+ if(present_result == VK_ERROR_DEVICE_LOST || retval == VK_ERROR_DEVICE_LOST)
+ retval = VK_ERROR_DEVICE_LOST;
+ else if(present_result == VK_ERROR_SURFACE_LOST_KHR
+ || retval == VK_ERROR_SURFACE_LOST_KHR)
+ retval = VK_ERROR_SURFACE_LOST_KHR;
+ else if(present_result == VK_ERROR_OUT_OF_DATE_KHR
+ || retval == VK_ERROR_OUT_OF_DATE_KHR)
+ retval = VK_ERROR_OUT_OF_DATE_KHR;
+ else if(present_result == VK_SUBOPTIMAL_KHR || retval == VK_SUBOPTIMAL_KHR)
+ retval = VK_SUBOPTIMAL_KHR;
+ if(present_info->pResults)
+ present_info->pResults[i] = present_result;
+ }
+ return retval;
+ });
}
namespace kazan
vulkan::Vulkan_semaphore *semaphore,
vulkan::Vulkan_fence *fence,
std::uint32_t &returned_image_index) = 0;
+ virtual VkResult queue_present(std::uint32_t image_index, vulkan::Vulkan_device::Queue &queue) = 0;
};
struct Wsi
util::Static_circular_deque<std::size_t, max_swapchain_image_count> presenting_image_queue;
std::uint32_t swapchain_width;
std::uint32_t swapchain_height;
+ Gc gc;
+ unsigned window_depth;
explicit Swapchain(Start_setup_results start_setup_results,
xcb_connection_t *connection,
xcb_window_t window,
window(window),
shm_is_supported(start_setup_results.shm_is_supported),
status(Status::Good),
- presenting_image_queue()
+ presenting_image_queue(),
+ gc(std::move(start_setup_results.gc)),
+ window_depth(start_setup_results.window_depth)
{
assert(create_info.sType == VK_STRUCTURE_TYPE_SWAPCHAIN_CREATE_INFO_KHR);
#warning formats other than VK_FORMAT_B8G8R8A8_UNORM are unimplemented
return VK_SUCCESS;
}
}
+ virtual VkResult queue_present(std::uint32_t image_index,
+ vulkan::Vulkan_device::Queue &queue) override
+ {
+ assert(image_index < images.size());
+ switch(status)
+ {
+ case Status::No_surface:
+ case Status::Setup_failed:
+ return VK_ERROR_SURFACE_LOST_KHR;
+ case Status::Out_of_date:
+ return VK_ERROR_OUT_OF_DATE_KHR;
+ case Status::Good:
+ break;
+ }
+ auto &image = get_image(image_index);
+ assert(image.owner == Image_owner::Application);
+ // wait for rendering to catch up
+ {
+ vulkan::Vulkan_fence fence(0);
+ queue.queue_fence_signal(fence);
+ fence.wait(-1);
+ }
+
+ if(shm_is_supported)
+ {
+ xcb_copy_area(connection,
+ image.pixmap.get(),
+ window,
+ gc.get(),
+ 0,
+ 0,
+ 0,
+ 0,
+ swapchain_width,
+ swapchain_height);
+ }
+ else
+ {
+ std::size_t image_size = image.descriptor.get_memory_size();
+ assert(static_cast<std::uint32_t>(image_size) == image_size);
+ xcb_put_image(connection,
+ XCB_IMAGE_FORMAT_Z_PIXMAP,
+ window,
+ gc.get(),
+ swapchain_width,
+ swapchain_height,
+ 0,
+ 0,
+ 0,
+ window_depth,
+ image_size,
+ static_cast<const std::uint8_t *>(image.memory.get()));
+ }
+ image.get_geometry_cookie = xcb_get_geometry(connection, window);
+ image.owner = Image_owner::Presentation_engine;
+ presenting_image_queue.push_back(image_index);
+ xcb_flush(connection);
+ return VK_SUCCESS;
+ }
};
};