anv: Get rid of the ANV_CALL macro
[mesa.git] / src / intel / vulkan / anv_dump.c
1 /*
2 * Copyright © 2015 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include "anv_private.h"
25
26 #include "util/list.h"
27 #include "util/ralloc.h"
28
29 /* This file contains utility functions for help debugging. They can be
30 * called from GDB or similar to help inspect images and buffers.
31 *
32 * To dump the framebuffers of an application after each render pass, all you
33 * have to do is the following
34 *
35 * 1) Start the application in GDB
36 * 2) Run until you get to the point where the rendering errors occur
37 * 3) Pause in GDB and set a breakpoint in anv_QueuePresentKHR
38 * 4) Continue until it reaches anv_QueuePresentKHR
39 * 5) Call anv_dump_start(queue->device, ANV_DUMP_FRAMEBUFFERS_BIT)
40 * 6) Continue until the next anv_QueuePresentKHR call
41 * 7) Call anv_dump_finish() to complete the dump and write files
42 *
43 * While it's a bit manual, the process does allow you to do some very
44 * valuable debugging by dumping every render target at the end of every
45 * render pass. It's worth noting that this assumes that the application
46 * creates all of the command buffers more-or-less in-order and between the
47 * two anv_QueuePresentKHR calls.
48 */
49
50 struct dump_image {
51 struct list_head link;
52
53 const char *filename;
54
55 VkExtent2D extent;
56 VkImage image;
57 VkDeviceMemory memory;
58 };
59
60 static void
61 dump_image_init(struct anv_device *device, struct dump_image *image,
62 uint32_t width, uint32_t height, const char *filename)
63 {
64 VkDevice vk_device = anv_device_to_handle(device);
65 MAYBE_UNUSED VkResult result;
66
67 image->filename = filename;
68 image->extent = (VkExtent2D) { width, height };
69
70 result = anv_CreateImage(vk_device,
71 &(VkImageCreateInfo) {
72 .sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO,
73 .imageType = VK_IMAGE_TYPE_2D,
74 .format = VK_FORMAT_R8G8B8A8_UNORM,
75 .extent = (VkExtent3D) { width, height, 1 },
76 .mipLevels = 1,
77 .arrayLayers = 1,
78 .samples = 1,
79 .tiling = VK_IMAGE_TILING_LINEAR,
80 .usage = VK_IMAGE_USAGE_TRANSFER_DST_BIT,
81 .flags = 0,
82 }, NULL, &image->image);
83 assert(result == VK_SUCCESS);
84
85 VkMemoryRequirements reqs;
86 anv_GetImageMemoryRequirements(vk_device, image->image, &reqs);
87
88 result = anv_AllocateMemory(vk_device,
89 &(VkMemoryAllocateInfo) {
90 .sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO,
91 .allocationSize = reqs.size,
92 .memoryTypeIndex = 0,
93 }, NULL, &image->memory);
94 assert(result == VK_SUCCESS);
95
96 result = anv_BindImageMemory(vk_device, image->image, image->memory, 0);
97 assert(result == VK_SUCCESS);
98 }
99
100 static void
101 dump_image_finish(struct anv_device *device, struct dump_image *image)
102 {
103 VkDevice vk_device = anv_device_to_handle(device);
104
105 anv_DestroyImage(vk_device, image->image, NULL);
106 anv_FreeMemory(vk_device, image->memory, NULL);
107 }
108
109 static void
110 dump_image_do_blit(struct anv_device *device, struct dump_image *image,
111 struct anv_cmd_buffer *cmd_buffer, struct anv_image *src,
112 VkImageAspectFlagBits aspect,
113 unsigned miplevel, unsigned array_layer)
114 {
115 PFN_vkCmdPipelineBarrier CmdPipelineBarrier =
116 (void *)anv_GetDeviceProcAddr(anv_device_to_handle(device),
117 "vkCmdPipelineBarrier");
118
119 CmdPipelineBarrier(anv_cmd_buffer_to_handle(cmd_buffer),
120 VK_PIPELINE_STAGE_TRANSFER_BIT,
121 VK_PIPELINE_STAGE_TRANSFER_BIT,
122 0, 0, NULL, 0, NULL, 1,
123 &(VkImageMemoryBarrier) {
124 .sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
125 .srcAccessMask = ~0,
126 .dstAccessMask = VK_ACCESS_TRANSFER_READ_BIT,
127 .oldLayout = VK_IMAGE_LAYOUT_GENERAL,
128 .newLayout = VK_IMAGE_LAYOUT_GENERAL,
129 .srcQueueFamilyIndex = 0,
130 .dstQueueFamilyIndex = 0,
131 .image = anv_image_to_handle(src),
132 .subresourceRange = (VkImageSubresourceRange) {
133 .aspectMask = aspect,
134 .baseMipLevel = miplevel,
135 .levelCount = 1,
136 .baseArrayLayer = array_layer,
137 .layerCount = 1,
138 },
139 });
140
141 /* We need to do a blit so the image needs to be declared as sampled. The
142 * only thing these are used for is making sure we create the correct
143 * views, so it should be find to just stomp it and set it back.
144 */
145 VkImageUsageFlags old_usage = src->usage;
146 src->usage |= VK_IMAGE_USAGE_SAMPLED_BIT;
147
148 anv_CmdBlitImage(anv_cmd_buffer_to_handle(cmd_buffer),
149 anv_image_to_handle(src), VK_IMAGE_LAYOUT_GENERAL,
150 image->image, VK_IMAGE_LAYOUT_GENERAL, 1,
151 &(VkImageBlit) {
152 .srcSubresource = {
153 .aspectMask = aspect,
154 .mipLevel = miplevel,
155 .baseArrayLayer = array_layer,
156 .layerCount = 1,
157 },
158 .srcOffsets = {
159 { 0, 0, 0 },
160 { image->extent.width, image->extent.height, 1 },
161 },
162 .dstSubresource = {
163 .aspectMask = VK_IMAGE_ASPECT_COLOR_BIT,
164 .mipLevel = 0,
165 .baseArrayLayer = 0,
166 .layerCount = 1,
167 },
168 .dstOffsets = {
169 { 0, 0, 0 },
170 { image->extent.width, image->extent.height, 1 },
171 },
172 }, VK_FILTER_NEAREST);
173
174 src->usage = old_usage;
175
176 CmdPipelineBarrier(anv_cmd_buffer_to_handle(cmd_buffer),
177 VK_PIPELINE_STAGE_TRANSFER_BIT,
178 VK_PIPELINE_STAGE_TRANSFER_BIT,
179 0, 0, NULL, 0, NULL, 1,
180 &(VkImageMemoryBarrier) {
181 .sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
182 .srcAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT,
183 .dstAccessMask = VK_ACCESS_HOST_READ_BIT,
184 .oldLayout = VK_IMAGE_LAYOUT_GENERAL,
185 .newLayout = VK_IMAGE_LAYOUT_GENERAL,
186 .srcQueueFamilyIndex = 0,
187 .dstQueueFamilyIndex = 0,
188 .image = image->image,
189 .subresourceRange = (VkImageSubresourceRange) {
190 .aspectMask = VK_IMAGE_ASPECT_COLOR_BIT,
191 .baseMipLevel = 0,
192 .levelCount = 1,
193 .baseArrayLayer = 0,
194 .layerCount = 1,
195 },
196 });
197 }
198
199 static void
200 dump_image_write_to_ppm(struct anv_device *device, struct dump_image *image)
201 {
202 VkDevice vk_device = anv_device_to_handle(device);
203 MAYBE_UNUSED VkResult result;
204
205 VkMemoryRequirements reqs;
206 anv_GetImageMemoryRequirements(vk_device, image->image, &reqs);
207
208 uint8_t *map;
209 result = anv_MapMemory(vk_device, image->memory, 0, reqs.size, 0, (void **)&map);
210 assert(result == VK_SUCCESS);
211
212 VkSubresourceLayout layout;
213 anv_GetImageSubresourceLayout(vk_device, image->image,
214 &(VkImageSubresource) {
215 .aspectMask = VK_IMAGE_ASPECT_COLOR_BIT,
216 .mipLevel = 0,
217 .arrayLayer = 0,
218 }, &layout);
219
220 map += layout.offset;
221
222 FILE *file = fopen(image->filename, "wb");
223 assert(file);
224
225 uint8_t *row = malloc(image->extent.width * 3);
226 assert(row);
227
228 fprintf(file, "P6\n%d %d\n255\n", image->extent.width, image->extent.height);
229 for (unsigned y = 0; y < image->extent.height; y++) {
230 for (unsigned x = 0; x < image->extent.width; x++) {
231 row[x * 3 + 0] = map[x * 4 + 0];
232 row[x * 3 + 1] = map[x * 4 + 1];
233 row[x * 3 + 2] = map[x * 4 + 2];
234 }
235 fwrite(row, 3, image->extent.width, file);
236
237 map += layout.rowPitch;
238 }
239 free(row);
240 fclose(file);
241
242 anv_UnmapMemory(vk_device, image->memory);
243 }
244
245 void
246 anv_dump_image_to_ppm(struct anv_device *device,
247 struct anv_image *image, unsigned miplevel,
248 unsigned array_layer, VkImageAspectFlagBits aspect,
249 const char *filename)
250 {
251 VkDevice vk_device = anv_device_to_handle(device);
252 MAYBE_UNUSED VkResult result;
253
254 const uint32_t width = anv_minify(image->extent.width, miplevel);
255 const uint32_t height = anv_minify(image->extent.height, miplevel);
256
257 struct dump_image dump;
258 dump_image_init(device, &dump, width, height, filename);
259
260 VkCommandPool commandPool;
261 result = anv_CreateCommandPool(vk_device,
262 &(VkCommandPoolCreateInfo) {
263 .sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO,
264 .queueFamilyIndex = 0,
265 .flags = 0,
266 }, NULL, &commandPool);
267 assert(result == VK_SUCCESS);
268
269 VkCommandBuffer cmd;
270 result = anv_AllocateCommandBuffers(vk_device,
271 &(VkCommandBufferAllocateInfo) {
272 .sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO,
273 .commandPool = commandPool,
274 .level = VK_COMMAND_BUFFER_LEVEL_PRIMARY,
275 .commandBufferCount = 1,
276 }, &cmd);
277 assert(result == VK_SUCCESS);
278
279 result = anv_BeginCommandBuffer(cmd,
280 &(VkCommandBufferBeginInfo) {
281 .sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO,
282 .flags = VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT,
283 });
284 assert(result == VK_SUCCESS);
285
286 dump_image_do_blit(device, &dump, anv_cmd_buffer_from_handle(cmd), image,
287 aspect, miplevel, array_layer);
288
289 result = anv_EndCommandBuffer(cmd);
290 assert(result == VK_SUCCESS);
291
292 VkFence fence;
293 result = anv_CreateFence(vk_device,
294 &(VkFenceCreateInfo) {
295 .sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO,
296 .flags = 0,
297 }, NULL, &fence);
298 assert(result == VK_SUCCESS);
299
300 result = anv_QueueSubmit(anv_queue_to_handle(&device->queue), 1,
301 &(VkSubmitInfo) {
302 .sType = VK_STRUCTURE_TYPE_SUBMIT_INFO,
303 .commandBufferCount = 1,
304 .pCommandBuffers = &cmd,
305 }, fence);
306 assert(result == VK_SUCCESS);
307
308 result = anv_WaitForFences(vk_device, 1, &fence, true, UINT64_MAX);
309 assert(result == VK_SUCCESS);
310
311 anv_DestroyFence(vk_device, fence, NULL);
312 anv_DestroyCommandPool(vk_device, commandPool, NULL);
313
314 dump_image_write_to_ppm(device, &dump);
315 dump_image_finish(device, &dump);
316 }
317
318 static pthread_mutex_t dump_mutex = PTHREAD_MUTEX_INITIALIZER;
319
320 static enum anv_dump_action dump_actions = 0;
321
322 /* Used to prevent recursive dumping */
323 static enum anv_dump_action dump_old_actions;
324
325 struct list_head dump_list;
326 static void *dump_ctx;
327 static struct anv_device *dump_device;
328 static unsigned dump_count;
329
330 void
331 anv_dump_start(struct anv_device *device, enum anv_dump_action actions)
332 {
333 pthread_mutex_lock(&dump_mutex);
334
335 dump_device = device;
336 dump_actions = actions;
337 list_inithead(&dump_list);
338 dump_ctx = ralloc_context(NULL);
339 dump_count = 0;
340
341 pthread_mutex_unlock(&dump_mutex);
342 }
343
344 void
345 anv_dump_finish()
346 {
347 anv_DeviceWaitIdle(anv_device_to_handle(dump_device));
348
349 pthread_mutex_lock(&dump_mutex);
350
351 list_for_each_entry(struct dump_image, dump, &dump_list, link) {
352 dump_image_write_to_ppm(dump_device, dump);
353 dump_image_finish(dump_device, dump);
354 }
355
356 dump_actions = 0;
357 dump_device = NULL;
358 list_inithead(&dump_list);
359
360 ralloc_free(dump_ctx);
361 dump_ctx = NULL;
362
363 pthread_mutex_unlock(&dump_mutex);
364 }
365
366 static bool
367 dump_lock(enum anv_dump_action action)
368 {
369 if (likely((dump_actions & action) == 0))
370 return false;
371
372 pthread_mutex_lock(&dump_mutex);
373
374 /* Prevent recursive dumping */
375 dump_old_actions = dump_actions;
376 dump_actions = 0;
377
378 return true;
379 }
380
381 static void
382 dump_unlock()
383 {
384 dump_actions = dump_old_actions;
385 pthread_mutex_unlock(&dump_mutex);
386 }
387
388 static void
389 dump_add_image(struct anv_cmd_buffer *cmd_buffer, struct anv_image *image,
390 VkImageAspectFlagBits aspect,
391 unsigned miplevel, unsigned array_layer, const char *filename)
392 {
393 const uint32_t width = anv_minify(image->extent.width, miplevel);
394 const uint32_t height = anv_minify(image->extent.height, miplevel);
395
396 struct dump_image *dump = ralloc(dump_ctx, struct dump_image);
397
398 dump_image_init(cmd_buffer->device, dump, width, height, filename);
399 dump_image_do_blit(cmd_buffer->device, dump, cmd_buffer, image,
400 aspect, miplevel, array_layer);
401
402 list_addtail(&dump->link, &dump_list);
403 }
404
405 void
406 anv_dump_add_framebuffer(struct anv_cmd_buffer *cmd_buffer,
407 struct anv_framebuffer *fb)
408 {
409 if (!dump_lock(ANV_DUMP_FRAMEBUFFERS_BIT))
410 return;
411
412 unsigned dump_idx = dump_count++;
413
414 for (unsigned i = 0; i < fb->attachment_count; i++) {
415 struct anv_image_view *iview = fb->attachments[i];
416
417 uint32_t b;
418 for_each_bit(b, iview->image->aspects) {
419 VkImageAspectFlagBits aspect = (1 << b);
420 char suffix;
421 switch (aspect) {
422 case VK_IMAGE_ASPECT_COLOR_BIT: suffix = 'c'; break;
423 case VK_IMAGE_ASPECT_DEPTH_BIT: suffix = 'd'; break;
424 case VK_IMAGE_ASPECT_STENCIL_BIT: suffix = 's'; break;
425 default:
426 unreachable("Invalid aspect");
427 }
428
429 char *filename = ralloc_asprintf(dump_ctx, "framebuffer%04d-%d%c.ppm",
430 dump_idx, i, suffix);
431
432 dump_add_image(cmd_buffer, (struct anv_image *)iview->image, aspect,
433 iview->isl.base_level, iview->isl.base_array_layer,
434 filename);
435 }
436 }
437
438 dump_unlock();
439 }