From ac7eeebce4ae9107863623321b74b1c08389f180 Mon Sep 17 00:00:00 2001 From: Jason Ekstrand Date: Wed, 15 Jun 2016 16:13:46 -0700 Subject: [PATCH] anv/dump: Add support for dumping framebuffers Reviewed-by: Chad Versace --- src/intel/vulkan/anv_dump.c | 144 +++++++++++++++++++++++++++++ src/intel/vulkan/anv_private.h | 10 ++ src/intel/vulkan/genX_cmd_buffer.c | 4 + 3 files changed, 158 insertions(+) diff --git a/src/intel/vulkan/anv_dump.c b/src/intel/vulkan/anv_dump.c index 59a6f2af690..a84bcc9b35e 100644 --- a/src/intel/vulkan/anv_dump.c +++ b/src/intel/vulkan/anv_dump.c @@ -23,11 +23,33 @@ #include "anv_private.h" +#include "util/list.h" +#include "util/ralloc.h" + /* This file contains utility functions for help debugging. They can be * called from GDB or similar to help inspect images and buffers. + * + * To dump the framebuffers of an application after each render pass, all you + * have to do is the following + * + * 1) Start the application in GDB + * 2) Run until you get to the point where the rendering errors occur + * 3) Pause in GDB and set a breakpoint in anv_QueuePresentKHR + * 4) Continue until it reaches anv_QueuePresentKHR + * 5) Call anv_dump_start(queue->device, ANV_DUMP_FRAMEBUFFERS_BIT) + * 6) Continue until the next anv_QueuePresentKHR call + * 7) Call anv_dump_finish() to complete the dump and write files + * + * While it's a bit manual, the process does allow you to do some very + * valuable debugging by dumping every render target at the end of every + * render pass. It's worth noting that this assumes that the application + * creates all of the command buffers more-or-less in-order and between the + * two anv_QueuePresentKHR calls. */ struct dump_image { + struct list_head link; + const char *filename; VkExtent2D extent; @@ -288,3 +310,125 @@ anv_dump_image_to_ppm(struct anv_device *device, dump_image_write_to_ppm(device, &dump); dump_image_finish(device, &dump); } + +static pthread_mutex_t dump_mutex = PTHREAD_MUTEX_INITIALIZER; + +static enum anv_dump_action dump_actions = 0; + +/* Used to prevent recursive dumping */ +static enum anv_dump_action dump_old_actions; + +struct list_head dump_list; +static void *dump_ctx; +static struct anv_device *dump_device; +static unsigned dump_count; + +void +anv_dump_start(struct anv_device *device, enum anv_dump_action actions) +{ + pthread_mutex_lock(&dump_mutex); + + dump_device = device; + dump_actions = actions; + list_inithead(&dump_list); + dump_ctx = ralloc_context(NULL); + dump_count = 0; + + pthread_mutex_unlock(&dump_mutex); +} + +void +anv_dump_finish() +{ + anv_DeviceWaitIdle(anv_device_to_handle(dump_device)); + + pthread_mutex_lock(&dump_mutex); + + list_for_each_entry(struct dump_image, dump, &dump_list, link) { + dump_image_write_to_ppm(dump_device, dump); + dump_image_finish(dump_device, dump); + } + + dump_actions = 0; + dump_device = NULL; + list_inithead(&dump_list); + + ralloc_free(dump_ctx); + dump_ctx = NULL; + + pthread_mutex_unlock(&dump_mutex); +} + +static bool +dump_lock(enum anv_dump_action action) +{ + if (likely((dump_actions & action) == 0)) + return false; + + pthread_mutex_lock(&dump_mutex); + + /* Prevent recursive dumping */ + dump_old_actions = dump_actions; + dump_actions = 0; + + return true; +} + +static void +dump_unlock() +{ + dump_actions = dump_old_actions; + pthread_mutex_unlock(&dump_mutex); +} + +static void +dump_add_image(struct anv_cmd_buffer *cmd_buffer, struct anv_image *image, + VkImageAspectFlagBits aspect, + unsigned miplevel, unsigned array_layer, const char *filename) +{ + const uint32_t width = anv_minify(image->extent.width, miplevel); + const uint32_t height = anv_minify(image->extent.height, miplevel); + + struct dump_image *dump = ralloc(dump_ctx, struct dump_image); + + dump_image_init(cmd_buffer->device, dump, width, height, filename); + dump_image_do_blit(cmd_buffer->device, dump, cmd_buffer, image, + aspect, miplevel, array_layer); + + list_addtail(&dump->link, &dump_list); +} + +void +anv_dump_add_framebuffer(struct anv_cmd_buffer *cmd_buffer, + struct anv_framebuffer *fb) +{ + if (!dump_lock(ANV_DUMP_FRAMEBUFFERS_BIT)) + return; + + unsigned dump_idx = dump_count++; + + for (unsigned i = 0; i < fb->attachment_count; i++) { + struct anv_image_view *iview = fb->attachments[i]; + + uint32_t b; + for_each_bit(b, iview->image->aspects) { + VkImageAspectFlagBits aspect = (1 << b); + char suffix; + switch (aspect) { + case VK_IMAGE_ASPECT_COLOR_BIT: suffix = 'c'; break; + case VK_IMAGE_ASPECT_DEPTH_BIT: suffix = 'd'; break; + case VK_IMAGE_ASPECT_STENCIL_BIT: suffix = 's'; break; + default: + unreachable("Invalid aspect"); + } + + char *filename = ralloc_asprintf(dump_ctx, "framebuffer%04d-%d%c.ppm", + dump_idx, i, suffix); + + dump_add_image(cmd_buffer, (struct anv_image *)iview->image, aspect, + iview->base_mip, iview->base_layer, filename); + } + } + + dump_unlock(); +} diff --git a/src/intel/vulkan/anv_private.h b/src/intel/vulkan/anv_private.h index 673b4ed061c..7b2d1dd15b8 100644 --- a/src/intel/vulkan/anv_private.h +++ b/src/intel/vulkan/anv_private.h @@ -1848,6 +1848,16 @@ void anv_dump_image_to_ppm(struct anv_device *device, unsigned array_layer, VkImageAspectFlagBits aspect, const char *filename); +enum anv_dump_action { + ANV_DUMP_FRAMEBUFFERS_BIT = 0x1, +}; + +void anv_dump_start(struct anv_device *device, enum anv_dump_action actions); +void anv_dump_finish(void); + +void anv_dump_add_framebuffer(struct anv_cmd_buffer *cmd_buffer, + struct anv_framebuffer *fb); + #define ANV_DEFINE_HANDLE_CASTS(__anv_type, __VkType) \ \ static inline struct __anv_type * \ diff --git a/src/intel/vulkan/genX_cmd_buffer.c b/src/intel/vulkan/genX_cmd_buffer.c index 741d5bfd602..ed90a91499f 100644 --- a/src/intel/vulkan/genX_cmd_buffer.c +++ b/src/intel/vulkan/genX_cmd_buffer.c @@ -1191,6 +1191,10 @@ void genX(CmdEndRenderPass)( ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer); anv_cmd_buffer_resolve_subpass(cmd_buffer); + +#ifndef NDEBUG + anv_dump_add_framebuffer(cmd_buffer, cmd_buffer->state.framebuffer); +#endif } static void -- 2.30.2