radeonsi: dump shader binary buffer contents
[mesa.git] / src / gallium / drivers / radeonsi / si_debug.c
1 /*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * on the rights to use, copy, modify, merge, publish, distribute, sub
9 * license, and/or sell copies of the Software, and to permit persons to whom
10 * the Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
20 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
21 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
22 * USE OR OTHER DEALINGS IN THE SOFTWARE.
23 */
24
25 #include "si_pipe.h"
26 #include "si_compute.h"
27 #include "sid.h"
28 #include "sid_tables.h"
29 #include "driver_ddebug/dd_util.h"
30 #include "util/u_dump.h"
31 #include "util/u_log.h"
32 #include "util/u_memory.h"
33 #include "util/u_string.h"
34 #include "ac_debug.h"
35
36 static void si_dump_bo_list(struct si_context *sctx,
37 const struct radeon_saved_cs *saved, FILE *f);
38
39 DEBUG_GET_ONCE_OPTION(replace_shaders, "RADEON_REPLACE_SHADERS", NULL)
40
41 /**
42 * Store a linearized copy of all chunks of \p cs together with the buffer
43 * list in \p saved.
44 */
45 void si_save_cs(struct radeon_winsys *ws, struct radeon_cmdbuf *cs,
46 struct radeon_saved_cs *saved, bool get_buffer_list)
47 {
48 uint32_t *buf;
49 unsigned i;
50
51 /* Save the IB chunks. */
52 saved->num_dw = cs->prev_dw + cs->current.cdw;
53 saved->ib = MALLOC(4 * saved->num_dw);
54 if (!saved->ib)
55 goto oom;
56
57 buf = saved->ib;
58 for (i = 0; i < cs->num_prev; ++i) {
59 memcpy(buf, cs->prev[i].buf, cs->prev[i].cdw * 4);
60 buf += cs->prev[i].cdw;
61 }
62 memcpy(buf, cs->current.buf, cs->current.cdw * 4);
63
64 if (!get_buffer_list)
65 return;
66
67 /* Save the buffer list. */
68 saved->bo_count = ws->cs_get_buffer_list(cs, NULL);
69 saved->bo_list = CALLOC(saved->bo_count,
70 sizeof(saved->bo_list[0]));
71 if (!saved->bo_list) {
72 FREE(saved->ib);
73 goto oom;
74 }
75 ws->cs_get_buffer_list(cs, saved->bo_list);
76
77 return;
78
79 oom:
80 fprintf(stderr, "%s: out of memory\n", __func__);
81 memset(saved, 0, sizeof(*saved));
82 }
83
84 void si_clear_saved_cs(struct radeon_saved_cs *saved)
85 {
86 FREE(saved->ib);
87 FREE(saved->bo_list);
88
89 memset(saved, 0, sizeof(*saved));
90 }
91
92 void si_destroy_saved_cs(struct si_saved_cs *scs)
93 {
94 si_clear_saved_cs(&scs->gfx);
95 si_resource_reference(&scs->trace_buf, NULL);
96 free(scs);
97 }
98
99 static void si_dump_shader(struct si_screen *sscreen,
100 enum pipe_shader_type processor,
101 const struct si_shader *shader, FILE *f)
102 {
103 if (shader->shader_log)
104 fwrite(shader->shader_log, shader->shader_log_size, 1, f);
105 else
106 si_shader_dump(sscreen, shader, NULL, processor, f, false);
107
108 if (shader->bo && sscreen->options.dump_shader_binary) {
109 unsigned size = shader->bo->b.b.width0;
110 fprintf(f, "BO: VA=%"PRIx64" Size=%u\n", shader->bo->gpu_address, size);
111
112 const char *mapped = sscreen->ws->buffer_map(shader->bo->buf, NULL,
113 PIPE_TRANSFER_UNSYNCHRONIZED |
114 PIPE_TRANSFER_READ |
115 RADEON_TRANSFER_TEMPORARY);
116
117 for (unsigned i = 0; i < size; i += 4) {
118 fprintf(f, " %4x: %08x\n", i, *(uint32_t*)(mapped + i));
119 }
120
121 sscreen->ws->buffer_unmap(shader->bo->buf);
122
123 fprintf(f, "\n");
124 }
125 }
126
127 struct si_log_chunk_shader {
128 /* The shader destroy code assumes a current context for unlinking of
129 * PM4 packets etc.
130 *
131 * While we should be able to destroy shaders without a context, doing
132 * so would happen only very rarely and be therefore likely to fail
133 * just when you're trying to debug something. Let's just remember the
134 * current context in the chunk.
135 */
136 struct si_context *ctx;
137 struct si_shader *shader;
138 enum pipe_shader_type processor;
139
140 /* For keep-alive reference counts */
141 struct si_shader_selector *sel;
142 struct si_compute *program;
143 };
144
145 static void
146 si_log_chunk_shader_destroy(void *data)
147 {
148 struct si_log_chunk_shader *chunk = data;
149 si_shader_selector_reference(chunk->ctx, &chunk->sel, NULL);
150 si_compute_reference(&chunk->program, NULL);
151 FREE(chunk);
152 }
153
154 static void
155 si_log_chunk_shader_print(void *data, FILE *f)
156 {
157 struct si_log_chunk_shader *chunk = data;
158 struct si_screen *sscreen = chunk->ctx->screen;
159 si_dump_shader(sscreen, chunk->processor,
160 chunk->shader, f);
161 }
162
163 static struct u_log_chunk_type si_log_chunk_type_shader = {
164 .destroy = si_log_chunk_shader_destroy,
165 .print = si_log_chunk_shader_print,
166 };
167
168 static void si_dump_gfx_shader(struct si_context *ctx,
169 const struct si_shader_ctx_state *state,
170 struct u_log_context *log)
171 {
172 struct si_shader *current = state->current;
173
174 if (!state->cso || !current)
175 return;
176
177 struct si_log_chunk_shader *chunk = CALLOC_STRUCT(si_log_chunk_shader);
178 chunk->ctx = ctx;
179 chunk->processor = state->cso->info.processor;
180 chunk->shader = current;
181 si_shader_selector_reference(ctx, &chunk->sel, current->selector);
182 u_log_chunk(log, &si_log_chunk_type_shader, chunk);
183 }
184
185 static void si_dump_compute_shader(struct si_context *ctx,
186 struct u_log_context *log)
187 {
188 const struct si_cs_shader_state *state = &ctx->cs_shader_state;
189
190 if (!state->program)
191 return;
192
193 struct si_log_chunk_shader *chunk = CALLOC_STRUCT(si_log_chunk_shader);
194 chunk->ctx = ctx;
195 chunk->processor = PIPE_SHADER_COMPUTE;
196 chunk->shader = &state->program->shader;
197 si_compute_reference(&chunk->program, state->program);
198 u_log_chunk(log, &si_log_chunk_type_shader, chunk);
199 }
200
201 /**
202 * Shader compiles can be overridden with arbitrary ELF objects by setting
203 * the environment variable RADEON_REPLACE_SHADERS=num1:filename1[;num2:filename2]
204 */
205 bool si_replace_shader(unsigned num, struct ac_shader_binary *binary)
206 {
207 const char *p = debug_get_option_replace_shaders();
208 const char *semicolon;
209 char *copy = NULL;
210 FILE *f;
211 long filesize, nread;
212 char *buf = NULL;
213 bool replaced = false;
214
215 if (!p)
216 return false;
217
218 while (*p) {
219 unsigned long i;
220 char *endp;
221 i = strtoul(p, &endp, 0);
222
223 p = endp;
224 if (*p != ':') {
225 fprintf(stderr, "RADEON_REPLACE_SHADERS formatted badly.\n");
226 exit(1);
227 }
228 ++p;
229
230 if (i == num)
231 break;
232
233 p = strchr(p, ';');
234 if (!p)
235 return false;
236 ++p;
237 }
238 if (!*p)
239 return false;
240
241 semicolon = strchr(p, ';');
242 if (semicolon) {
243 p = copy = strndup(p, semicolon - p);
244 if (!copy) {
245 fprintf(stderr, "out of memory\n");
246 return false;
247 }
248 }
249
250 fprintf(stderr, "radeonsi: replace shader %u by %s\n", num, p);
251
252 f = fopen(p, "r");
253 if (!f) {
254 perror("radeonsi: failed to open file");
255 goto out_free;
256 }
257
258 if (fseek(f, 0, SEEK_END) != 0)
259 goto file_error;
260
261 filesize = ftell(f);
262 if (filesize < 0)
263 goto file_error;
264
265 if (fseek(f, 0, SEEK_SET) != 0)
266 goto file_error;
267
268 buf = MALLOC(filesize);
269 if (!buf) {
270 fprintf(stderr, "out of memory\n");
271 goto out_close;
272 }
273
274 nread = fread(buf, 1, filesize, f);
275 if (nread != filesize)
276 goto file_error;
277
278 ac_elf_read(buf, filesize, binary);
279 replaced = true;
280
281 out_close:
282 fclose(f);
283 out_free:
284 FREE(buf);
285 free(copy);
286 return replaced;
287
288 file_error:
289 perror("radeonsi: reading shader");
290 goto out_close;
291 }
292
293 /* Parsed IBs are difficult to read without colors. Use "less -R file" to
294 * read them, or use "aha -b -f file" to convert them to html.
295 */
296 #define COLOR_RESET "\033[0m"
297 #define COLOR_RED "\033[31m"
298 #define COLOR_GREEN "\033[1;32m"
299 #define COLOR_YELLOW "\033[1;33m"
300 #define COLOR_CYAN "\033[1;36m"
301
302 static void si_dump_mmapped_reg(struct si_context *sctx, FILE *f,
303 unsigned offset)
304 {
305 struct radeon_winsys *ws = sctx->ws;
306 uint32_t value;
307
308 if (ws->read_registers(ws, offset, 1, &value))
309 ac_dump_reg(f, sctx->chip_class, offset, value, ~0);
310 }
311
312 static void si_dump_debug_registers(struct si_context *sctx, FILE *f)
313 {
314 if (!sctx->screen->info.has_read_registers_query)
315 return;
316
317 fprintf(f, "Memory-mapped registers:\n");
318 si_dump_mmapped_reg(sctx, f, R_008010_GRBM_STATUS);
319
320 /* No other registers can be read on DRM < 3.1.0. */
321 if (sctx->screen->info.drm_major < 3 ||
322 sctx->screen->info.drm_minor < 1) {
323 fprintf(f, "\n");
324 return;
325 }
326
327 si_dump_mmapped_reg(sctx, f, R_008008_GRBM_STATUS2);
328 si_dump_mmapped_reg(sctx, f, R_008014_GRBM_STATUS_SE0);
329 si_dump_mmapped_reg(sctx, f, R_008018_GRBM_STATUS_SE1);
330 si_dump_mmapped_reg(sctx, f, R_008038_GRBM_STATUS_SE2);
331 si_dump_mmapped_reg(sctx, f, R_00803C_GRBM_STATUS_SE3);
332 si_dump_mmapped_reg(sctx, f, R_00D034_SDMA0_STATUS_REG);
333 si_dump_mmapped_reg(sctx, f, R_00D834_SDMA1_STATUS_REG);
334 if (sctx->chip_class <= GFX8) {
335 si_dump_mmapped_reg(sctx, f, R_000E50_SRBM_STATUS);
336 si_dump_mmapped_reg(sctx, f, R_000E4C_SRBM_STATUS2);
337 si_dump_mmapped_reg(sctx, f, R_000E54_SRBM_STATUS3);
338 }
339 si_dump_mmapped_reg(sctx, f, R_008680_CP_STAT);
340 si_dump_mmapped_reg(sctx, f, R_008674_CP_STALLED_STAT1);
341 si_dump_mmapped_reg(sctx, f, R_008678_CP_STALLED_STAT2);
342 si_dump_mmapped_reg(sctx, f, R_008670_CP_STALLED_STAT3);
343 si_dump_mmapped_reg(sctx, f, R_008210_CP_CPC_STATUS);
344 si_dump_mmapped_reg(sctx, f, R_008214_CP_CPC_BUSY_STAT);
345 si_dump_mmapped_reg(sctx, f, R_008218_CP_CPC_STALLED_STAT1);
346 si_dump_mmapped_reg(sctx, f, R_00821C_CP_CPF_STATUS);
347 si_dump_mmapped_reg(sctx, f, R_008220_CP_CPF_BUSY_STAT);
348 si_dump_mmapped_reg(sctx, f, R_008224_CP_CPF_STALLED_STAT1);
349 fprintf(f, "\n");
350 }
351
352 struct si_log_chunk_cs {
353 struct si_context *ctx;
354 struct si_saved_cs *cs;
355 bool dump_bo_list;
356 unsigned gfx_begin, gfx_end;
357 unsigned compute_begin, compute_end;
358 };
359
360 static void si_log_chunk_type_cs_destroy(void *data)
361 {
362 struct si_log_chunk_cs *chunk = data;
363 si_saved_cs_reference(&chunk->cs, NULL);
364 free(chunk);
365 }
366
367 static void si_parse_current_ib(FILE *f, struct radeon_cmdbuf *cs,
368 unsigned begin, unsigned end,
369 int *last_trace_id, unsigned trace_id_count,
370 const char *name, enum chip_class chip_class)
371 {
372 unsigned orig_end = end;
373
374 assert(begin <= end);
375
376 fprintf(f, "------------------ %s begin (dw = %u) ------------------\n",
377 name, begin);
378
379 for (unsigned prev_idx = 0; prev_idx < cs->num_prev; ++prev_idx) {
380 struct radeon_cmdbuf_chunk *chunk = &cs->prev[prev_idx];
381
382 if (begin < chunk->cdw) {
383 ac_parse_ib_chunk(f, chunk->buf + begin,
384 MIN2(end, chunk->cdw) - begin,
385 last_trace_id, trace_id_count,
386 chip_class, NULL, NULL);
387 }
388
389 if (end <= chunk->cdw)
390 return;
391
392 if (begin < chunk->cdw)
393 fprintf(f, "\n---------- Next %s Chunk ----------\n\n",
394 name);
395
396 begin -= MIN2(begin, chunk->cdw);
397 end -= chunk->cdw;
398 }
399
400 assert(end <= cs->current.cdw);
401
402 ac_parse_ib_chunk(f, cs->current.buf + begin, end - begin, last_trace_id,
403 trace_id_count, chip_class, NULL, NULL);
404
405 fprintf(f, "------------------- %s end (dw = %u) -------------------\n\n",
406 name, orig_end);
407 }
408
409 static void si_log_chunk_type_cs_print(void *data, FILE *f)
410 {
411 struct si_log_chunk_cs *chunk = data;
412 struct si_context *ctx = chunk->ctx;
413 struct si_saved_cs *scs = chunk->cs;
414 int last_trace_id = -1;
415 int last_compute_trace_id = -1;
416
417 /* We are expecting that the ddebug pipe has already
418 * waited for the context, so this buffer should be idle.
419 * If the GPU is hung, there is no point in waiting for it.
420 */
421 uint32_t *map = ctx->ws->buffer_map(scs->trace_buf->buf,
422 NULL,
423 PIPE_TRANSFER_UNSYNCHRONIZED |
424 PIPE_TRANSFER_READ);
425 if (map) {
426 last_trace_id = map[0];
427 last_compute_trace_id = map[1];
428 }
429
430 if (chunk->gfx_end != chunk->gfx_begin) {
431 if (chunk->gfx_begin == 0) {
432 if (ctx->init_config)
433 ac_parse_ib(f, ctx->init_config->pm4, ctx->init_config->ndw,
434 NULL, 0, "IB2: Init config", ctx->chip_class,
435 NULL, NULL);
436
437 if (ctx->init_config_gs_rings)
438 ac_parse_ib(f, ctx->init_config_gs_rings->pm4,
439 ctx->init_config_gs_rings->ndw,
440 NULL, 0, "IB2: Init GS rings", ctx->chip_class,
441 NULL, NULL);
442 }
443
444 if (scs->flushed) {
445 ac_parse_ib(f, scs->gfx.ib + chunk->gfx_begin,
446 chunk->gfx_end - chunk->gfx_begin,
447 &last_trace_id, map ? 1 : 0, "IB", ctx->chip_class,
448 NULL, NULL);
449 } else {
450 si_parse_current_ib(f, ctx->gfx_cs, chunk->gfx_begin,
451 chunk->gfx_end, &last_trace_id, map ? 1 : 0,
452 "IB", ctx->chip_class);
453 }
454 }
455
456 if (chunk->compute_end != chunk->compute_begin) {
457 assert(ctx->prim_discard_compute_cs);
458
459 if (scs->flushed) {
460 ac_parse_ib(f, scs->compute.ib + chunk->compute_begin,
461 chunk->compute_end - chunk->compute_begin,
462 &last_compute_trace_id, map ? 1 : 0, "Compute IB", ctx->chip_class,
463 NULL, NULL);
464 } else {
465 si_parse_current_ib(f, ctx->prim_discard_compute_cs, chunk->compute_begin,
466 chunk->compute_end, &last_compute_trace_id,
467 map ? 1 : 0, "Compute IB", ctx->chip_class);
468 }
469 }
470
471 if (chunk->dump_bo_list) {
472 fprintf(f, "Flushing. Time: ");
473 util_dump_ns(f, scs->time_flush);
474 fprintf(f, "\n\n");
475 si_dump_bo_list(ctx, &scs->gfx, f);
476 }
477 }
478
479 static const struct u_log_chunk_type si_log_chunk_type_cs = {
480 .destroy = si_log_chunk_type_cs_destroy,
481 .print = si_log_chunk_type_cs_print,
482 };
483
484 static void si_log_cs(struct si_context *ctx, struct u_log_context *log,
485 bool dump_bo_list)
486 {
487 assert(ctx->current_saved_cs);
488
489 struct si_saved_cs *scs = ctx->current_saved_cs;
490 unsigned gfx_cur = ctx->gfx_cs->prev_dw + ctx->gfx_cs->current.cdw;
491 unsigned compute_cur = 0;
492
493 if (ctx->prim_discard_compute_cs)
494 compute_cur = ctx->prim_discard_compute_cs->prev_dw + ctx->prim_discard_compute_cs->current.cdw;
495
496 if (!dump_bo_list &&
497 gfx_cur == scs->gfx_last_dw &&
498 compute_cur == scs->compute_last_dw)
499 return;
500
501 struct si_log_chunk_cs *chunk = calloc(1, sizeof(*chunk));
502
503 chunk->ctx = ctx;
504 si_saved_cs_reference(&chunk->cs, scs);
505 chunk->dump_bo_list = dump_bo_list;
506
507 chunk->gfx_begin = scs->gfx_last_dw;
508 chunk->gfx_end = gfx_cur;
509 scs->gfx_last_dw = gfx_cur;
510
511 chunk->compute_begin = scs->compute_last_dw;
512 chunk->compute_end = compute_cur;
513 scs->compute_last_dw = compute_cur;
514
515 u_log_chunk(log, &si_log_chunk_type_cs, chunk);
516 }
517
518 void si_auto_log_cs(void *data, struct u_log_context *log)
519 {
520 struct si_context *ctx = (struct si_context *)data;
521 si_log_cs(ctx, log, false);
522 }
523
524 void si_log_hw_flush(struct si_context *sctx)
525 {
526 if (!sctx->log)
527 return;
528
529 si_log_cs(sctx, sctx->log, true);
530
531 if (&sctx->b == sctx->screen->aux_context) {
532 /* The aux context isn't captured by the ddebug wrapper,
533 * so we dump it on a flush-by-flush basis here.
534 */
535 FILE *f = dd_get_debug_file(false);
536 if (!f) {
537 fprintf(stderr, "radeonsi: error opening aux context dump file.\n");
538 } else {
539 dd_write_header(f, &sctx->screen->b, 0);
540
541 fprintf(f, "Aux context dump:\n\n");
542 u_log_new_page_print(sctx->log, f);
543
544 fclose(f);
545 }
546 }
547 }
548
549 static const char *priority_to_string(enum radeon_bo_priority priority)
550 {
551 #define ITEM(x) [RADEON_PRIO_##x] = #x
552 static const char *table[64] = {
553 ITEM(FENCE),
554 ITEM(TRACE),
555 ITEM(SO_FILLED_SIZE),
556 ITEM(QUERY),
557 ITEM(IB1),
558 ITEM(IB2),
559 ITEM(DRAW_INDIRECT),
560 ITEM(INDEX_BUFFER),
561 ITEM(CP_DMA),
562 ITEM(CONST_BUFFER),
563 ITEM(DESCRIPTORS),
564 ITEM(BORDER_COLORS),
565 ITEM(SAMPLER_BUFFER),
566 ITEM(VERTEX_BUFFER),
567 ITEM(SHADER_RW_BUFFER),
568 ITEM(COMPUTE_GLOBAL),
569 ITEM(SAMPLER_TEXTURE),
570 ITEM(SHADER_RW_IMAGE),
571 ITEM(SAMPLER_TEXTURE_MSAA),
572 ITEM(COLOR_BUFFER),
573 ITEM(DEPTH_BUFFER),
574 ITEM(COLOR_BUFFER_MSAA),
575 ITEM(DEPTH_BUFFER_MSAA),
576 ITEM(SEPARATE_META),
577 ITEM(SHADER_BINARY),
578 ITEM(SHADER_RINGS),
579 ITEM(SCRATCH_BUFFER),
580 };
581 #undef ITEM
582
583 assert(priority < ARRAY_SIZE(table));
584 return table[priority];
585 }
586
587 static int bo_list_compare_va(const struct radeon_bo_list_item *a,
588 const struct radeon_bo_list_item *b)
589 {
590 return a->vm_address < b->vm_address ? -1 :
591 a->vm_address > b->vm_address ? 1 : 0;
592 }
593
594 static void si_dump_bo_list(struct si_context *sctx,
595 const struct radeon_saved_cs *saved, FILE *f)
596 {
597 unsigned i,j;
598
599 if (!saved->bo_list)
600 return;
601
602 /* Sort the list according to VM adddresses first. */
603 qsort(saved->bo_list, saved->bo_count,
604 sizeof(saved->bo_list[0]), (void*)bo_list_compare_va);
605
606 fprintf(f, "Buffer list (in units of pages = 4kB):\n"
607 COLOR_YELLOW " Size VM start page "
608 "VM end page Usage" COLOR_RESET "\n");
609
610 for (i = 0; i < saved->bo_count; i++) {
611 /* Note: Buffer sizes are expected to be aligned to 4k by the winsys. */
612 const unsigned page_size = sctx->screen->info.gart_page_size;
613 uint64_t va = saved->bo_list[i].vm_address;
614 uint64_t size = saved->bo_list[i].bo_size;
615 bool hit = false;
616
617 /* If there's unused virtual memory between 2 buffers, print it. */
618 if (i) {
619 uint64_t previous_va_end = saved->bo_list[i-1].vm_address +
620 saved->bo_list[i-1].bo_size;
621
622 if (va > previous_va_end) {
623 fprintf(f, " %10"PRIu64" -- hole --\n",
624 (va - previous_va_end) / page_size);
625 }
626 }
627
628 /* Print the buffer. */
629 fprintf(f, " %10"PRIu64" 0x%013"PRIX64" 0x%013"PRIX64" ",
630 size / page_size, va / page_size, (va + size) / page_size);
631
632 /* Print the usage. */
633 for (j = 0; j < 32; j++) {
634 if (!(saved->bo_list[i].priority_usage & (1u << j)))
635 continue;
636
637 fprintf(f, "%s%s", !hit ? "" : ", ", priority_to_string(j));
638 hit = true;
639 }
640 fprintf(f, "\n");
641 }
642 fprintf(f, "\nNote: The holes represent memory not used by the IB.\n"
643 " Other buffers can still be allocated there.\n\n");
644 }
645
646 static void si_dump_framebuffer(struct si_context *sctx, struct u_log_context *log)
647 {
648 struct pipe_framebuffer_state *state = &sctx->framebuffer.state;
649 struct si_texture *tex;
650 int i;
651
652 for (i = 0; i < state->nr_cbufs; i++) {
653 if (!state->cbufs[i])
654 continue;
655
656 tex = (struct si_texture*)state->cbufs[i]->texture;
657 u_log_printf(log, COLOR_YELLOW "Color buffer %i:" COLOR_RESET "\n", i);
658 si_print_texture_info(sctx->screen, tex, log);
659 u_log_printf(log, "\n");
660 }
661
662 if (state->zsbuf) {
663 tex = (struct si_texture*)state->zsbuf->texture;
664 u_log_printf(log, COLOR_YELLOW "Depth-stencil buffer:" COLOR_RESET "\n");
665 si_print_texture_info(sctx->screen, tex, log);
666 u_log_printf(log, "\n");
667 }
668 }
669
670 typedef unsigned (*slot_remap_func)(unsigned);
671
672 struct si_log_chunk_desc_list {
673 /** Pointer to memory map of buffer where the list is uploader */
674 uint32_t *gpu_list;
675 /** Reference of buffer where the list is uploaded, so that gpu_list
676 * is kept live. */
677 struct si_resource *buf;
678
679 const char *shader_name;
680 const char *elem_name;
681 slot_remap_func slot_remap;
682 enum chip_class chip_class;
683 unsigned element_dw_size;
684 unsigned num_elements;
685
686 uint32_t list[0];
687 };
688
689 static void
690 si_log_chunk_desc_list_destroy(void *data)
691 {
692 struct si_log_chunk_desc_list *chunk = data;
693 si_resource_reference(&chunk->buf, NULL);
694 FREE(chunk);
695 }
696
697 static void
698 si_log_chunk_desc_list_print(void *data, FILE *f)
699 {
700 struct si_log_chunk_desc_list *chunk = data;
701
702 for (unsigned i = 0; i < chunk->num_elements; i++) {
703 unsigned cpu_dw_offset = i * chunk->element_dw_size;
704 unsigned gpu_dw_offset = chunk->slot_remap(i) * chunk->element_dw_size;
705 const char *list_note = chunk->gpu_list ? "GPU list" : "CPU list";
706 uint32_t *cpu_list = chunk->list + cpu_dw_offset;
707 uint32_t *gpu_list = chunk->gpu_list ? chunk->gpu_list + gpu_dw_offset : cpu_list;
708
709 fprintf(f, COLOR_GREEN "%s%s slot %u (%s):" COLOR_RESET "\n",
710 chunk->shader_name, chunk->elem_name, i, list_note);
711
712 switch (chunk->element_dw_size) {
713 case 4:
714 for (unsigned j = 0; j < 4; j++)
715 ac_dump_reg(f, chunk->chip_class,
716 R_008F00_SQ_BUF_RSRC_WORD0 + j*4,
717 gpu_list[j], 0xffffffff);
718 break;
719 case 8:
720 for (unsigned j = 0; j < 8; j++)
721 ac_dump_reg(f, chunk->chip_class,
722 R_008F10_SQ_IMG_RSRC_WORD0 + j*4,
723 gpu_list[j], 0xffffffff);
724
725 fprintf(f, COLOR_CYAN " Buffer:" COLOR_RESET "\n");
726 for (unsigned j = 0; j < 4; j++)
727 ac_dump_reg(f, chunk->chip_class,
728 R_008F00_SQ_BUF_RSRC_WORD0 + j*4,
729 gpu_list[4+j], 0xffffffff);
730 break;
731 case 16:
732 for (unsigned j = 0; j < 8; j++)
733 ac_dump_reg(f, chunk->chip_class,
734 R_008F10_SQ_IMG_RSRC_WORD0 + j*4,
735 gpu_list[j], 0xffffffff);
736
737 fprintf(f, COLOR_CYAN " Buffer:" COLOR_RESET "\n");
738 for (unsigned j = 0; j < 4; j++)
739 ac_dump_reg(f, chunk->chip_class,
740 R_008F00_SQ_BUF_RSRC_WORD0 + j*4,
741 gpu_list[4+j], 0xffffffff);
742
743 fprintf(f, COLOR_CYAN " FMASK:" COLOR_RESET "\n");
744 for (unsigned j = 0; j < 8; j++)
745 ac_dump_reg(f, chunk->chip_class,
746 R_008F10_SQ_IMG_RSRC_WORD0 + j*4,
747 gpu_list[8+j], 0xffffffff);
748
749 fprintf(f, COLOR_CYAN " Sampler state:" COLOR_RESET "\n");
750 for (unsigned j = 0; j < 4; j++)
751 ac_dump_reg(f, chunk->chip_class,
752 R_008F30_SQ_IMG_SAMP_WORD0 + j*4,
753 gpu_list[12+j], 0xffffffff);
754 break;
755 }
756
757 if (memcmp(gpu_list, cpu_list, chunk->element_dw_size * 4) != 0) {
758 fprintf(f, COLOR_RED "!!!!! This slot was corrupted in GPU memory !!!!!"
759 COLOR_RESET "\n");
760 }
761
762 fprintf(f, "\n");
763 }
764
765 }
766
767 static const struct u_log_chunk_type si_log_chunk_type_descriptor_list = {
768 .destroy = si_log_chunk_desc_list_destroy,
769 .print = si_log_chunk_desc_list_print,
770 };
771
772 static void si_dump_descriptor_list(struct si_screen *screen,
773 struct si_descriptors *desc,
774 const char *shader_name,
775 const char *elem_name,
776 unsigned element_dw_size,
777 unsigned num_elements,
778 slot_remap_func slot_remap,
779 struct u_log_context *log)
780 {
781 if (!desc->list)
782 return;
783
784 /* In some cases, the caller doesn't know how many elements are really
785 * uploaded. Reduce num_elements to fit in the range of active slots. */
786 unsigned active_range_dw_begin =
787 desc->first_active_slot * desc->element_dw_size;
788 unsigned active_range_dw_end =
789 active_range_dw_begin + desc->num_active_slots * desc->element_dw_size;
790
791 while (num_elements > 0) {
792 int i = slot_remap(num_elements - 1);
793 unsigned dw_begin = i * element_dw_size;
794 unsigned dw_end = dw_begin + element_dw_size;
795
796 if (dw_begin >= active_range_dw_begin && dw_end <= active_range_dw_end)
797 break;
798
799 num_elements--;
800 }
801
802 struct si_log_chunk_desc_list *chunk =
803 CALLOC_VARIANT_LENGTH_STRUCT(si_log_chunk_desc_list,
804 4 * element_dw_size * num_elements);
805 chunk->shader_name = shader_name;
806 chunk->elem_name = elem_name;
807 chunk->element_dw_size = element_dw_size;
808 chunk->num_elements = num_elements;
809 chunk->slot_remap = slot_remap;
810 chunk->chip_class = screen->info.chip_class;
811
812 si_resource_reference(&chunk->buf, desc->buffer);
813 chunk->gpu_list = desc->gpu_list;
814
815 for (unsigned i = 0; i < num_elements; ++i) {
816 memcpy(&chunk->list[i * element_dw_size],
817 &desc->list[slot_remap(i) * element_dw_size],
818 4 * element_dw_size);
819 }
820
821 u_log_chunk(log, &si_log_chunk_type_descriptor_list, chunk);
822 }
823
824 static unsigned si_identity(unsigned slot)
825 {
826 return slot;
827 }
828
829 static void si_dump_descriptors(struct si_context *sctx,
830 enum pipe_shader_type processor,
831 const struct tgsi_shader_info *info,
832 struct u_log_context *log)
833 {
834 struct si_descriptors *descs =
835 &sctx->descriptors[SI_DESCS_FIRST_SHADER +
836 processor * SI_NUM_SHADER_DESCS];
837 static const char *shader_name[] = {"VS", "PS", "GS", "TCS", "TES", "CS"};
838 const char *name = shader_name[processor];
839 unsigned enabled_constbuf, enabled_shaderbuf, enabled_samplers;
840 unsigned enabled_images;
841
842 if (info) {
843 enabled_constbuf = info->const_buffers_declared;
844 enabled_shaderbuf = info->shader_buffers_declared;
845 enabled_samplers = info->samplers_declared;
846 enabled_images = info->images_declared;
847 } else {
848 enabled_constbuf = sctx->const_and_shader_buffers[processor].enabled_mask >>
849 SI_NUM_SHADER_BUFFERS;
850 enabled_shaderbuf = sctx->const_and_shader_buffers[processor].enabled_mask &
851 u_bit_consecutive(0, SI_NUM_SHADER_BUFFERS);
852 enabled_shaderbuf = util_bitreverse(enabled_shaderbuf) >>
853 (32 - SI_NUM_SHADER_BUFFERS);
854 enabled_samplers = sctx->samplers[processor].enabled_mask;
855 enabled_images = sctx->images[processor].enabled_mask;
856 }
857
858 if (processor == PIPE_SHADER_VERTEX &&
859 sctx->vb_descriptors_buffer &&
860 sctx->vb_descriptors_gpu_list &&
861 sctx->vertex_elements) {
862 assert(info); /* only CS may not have an info struct */
863 struct si_descriptors desc = {};
864
865 desc.buffer = sctx->vb_descriptors_buffer;
866 desc.list = sctx->vb_descriptors_gpu_list;
867 desc.gpu_list = sctx->vb_descriptors_gpu_list;
868 desc.element_dw_size = 4;
869 desc.num_active_slots = sctx->vertex_elements->desc_list_byte_size / 16;
870
871 si_dump_descriptor_list(sctx->screen, &desc, name,
872 " - Vertex buffer", 4, info->num_inputs,
873 si_identity, log);
874 }
875
876 si_dump_descriptor_list(sctx->screen,
877 &descs[SI_SHADER_DESCS_CONST_AND_SHADER_BUFFERS],
878 name, " - Constant buffer", 4,
879 util_last_bit(enabled_constbuf),
880 si_get_constbuf_slot, log);
881 si_dump_descriptor_list(sctx->screen,
882 &descs[SI_SHADER_DESCS_CONST_AND_SHADER_BUFFERS],
883 name, " - Shader buffer", 4,
884 util_last_bit(enabled_shaderbuf),
885 si_get_shaderbuf_slot, log);
886 si_dump_descriptor_list(sctx->screen,
887 &descs[SI_SHADER_DESCS_SAMPLERS_AND_IMAGES],
888 name, " - Sampler", 16,
889 util_last_bit(enabled_samplers),
890 si_get_sampler_slot, log);
891 si_dump_descriptor_list(sctx->screen,
892 &descs[SI_SHADER_DESCS_SAMPLERS_AND_IMAGES],
893 name, " - Image", 8,
894 util_last_bit(enabled_images),
895 si_get_image_slot, log);
896 }
897
898 static void si_dump_gfx_descriptors(struct si_context *sctx,
899 const struct si_shader_ctx_state *state,
900 struct u_log_context *log)
901 {
902 if (!state->cso || !state->current)
903 return;
904
905 si_dump_descriptors(sctx, state->cso->type, &state->cso->info, log);
906 }
907
908 static void si_dump_compute_descriptors(struct si_context *sctx,
909 struct u_log_context *log)
910 {
911 if (!sctx->cs_shader_state.program)
912 return;
913
914 si_dump_descriptors(sctx, PIPE_SHADER_COMPUTE, NULL, log);
915 }
916
917 struct si_shader_inst {
918 const char *text; /* start of disassembly for this instruction */
919 unsigned textlen;
920 unsigned size; /* instruction size = 4 or 8 */
921 uint64_t addr; /* instruction address */
922 };
923
924 /**
925 * Split a disassembly string into instructions and add them to the array
926 * pointed to by \p instructions.
927 *
928 * Labels are considered to be part of the following instruction.
929 */
930 static void si_add_split_disasm(const char *disasm,
931 uint64_t *addr,
932 unsigned *num,
933 struct si_shader_inst *instructions)
934 {
935 const char *semicolon;
936
937 while ((semicolon = strchr(disasm, ';'))) {
938 struct si_shader_inst *inst = &instructions[(*num)++];
939 const char *end = util_strchrnul(semicolon, '\n');
940
941 inst->text = disasm;
942 inst->textlen = end - disasm;
943
944 inst->addr = *addr;
945 /* More than 16 chars after ";" means the instruction is 8 bytes long. */
946 inst->size = end - semicolon > 16 ? 8 : 4;
947 *addr += inst->size;
948
949 if (!(*end))
950 break;
951 disasm = end + 1;
952 }
953 }
954
955 /* If the shader is being executed, print its asm instructions, and annotate
956 * those that are being executed right now with information about waves that
957 * execute them. This is most useful during a GPU hang.
958 */
959 static void si_print_annotated_shader(struct si_shader *shader,
960 struct ac_wave_info *waves,
961 unsigned num_waves,
962 FILE *f)
963 {
964 if (!shader || !shader->binary.disasm_string)
965 return;
966
967 uint64_t start_addr = shader->bo->gpu_address;
968 uint64_t end_addr = start_addr + shader->bo->b.b.width0;
969 unsigned i;
970
971 /* See if any wave executes the shader. */
972 for (i = 0; i < num_waves; i++) {
973 if (start_addr <= waves[i].pc && waves[i].pc <= end_addr)
974 break;
975 }
976 if (i == num_waves)
977 return; /* the shader is not being executed */
978
979 /* Remember the first found wave. The waves are sorted according to PC. */
980 waves = &waves[i];
981 num_waves -= i;
982
983 /* Get the list of instructions.
984 * Buffer size / 4 is the upper bound of the instruction count.
985 */
986 unsigned num_inst = 0;
987 uint64_t inst_addr = start_addr;
988 struct si_shader_inst *instructions =
989 calloc(shader->bo->b.b.width0 / 4, sizeof(struct si_shader_inst));
990
991 if (shader->prolog) {
992 si_add_split_disasm(shader->prolog->binary.disasm_string,
993 &inst_addr, &num_inst, instructions);
994 }
995 if (shader->previous_stage) {
996 si_add_split_disasm(shader->previous_stage->binary.disasm_string,
997 &inst_addr, &num_inst, instructions);
998 }
999 if (shader->prolog2) {
1000 si_add_split_disasm(shader->prolog2->binary.disasm_string,
1001 &inst_addr, &num_inst, instructions);
1002 }
1003 si_add_split_disasm(shader->binary.disasm_string,
1004 &inst_addr, &num_inst, instructions);
1005 if (shader->epilog) {
1006 si_add_split_disasm(shader->epilog->binary.disasm_string,
1007 &inst_addr, &num_inst, instructions);
1008 }
1009
1010 fprintf(f, COLOR_YELLOW "%s - annotated disassembly:" COLOR_RESET "\n",
1011 si_get_shader_name(shader, shader->selector->type));
1012
1013 /* Print instructions with annotations. */
1014 for (i = 0; i < num_inst; i++) {
1015 struct si_shader_inst *inst = &instructions[i];
1016
1017 fprintf(f, "%.*s [PC=0x%"PRIx64", size=%u]\n",
1018 inst->textlen, inst->text, inst->addr, inst->size);
1019
1020 /* Print which waves execute the instruction right now. */
1021 while (num_waves && inst->addr == waves->pc) {
1022 fprintf(f,
1023 " " COLOR_GREEN "^ SE%u SH%u CU%u "
1024 "SIMD%u WAVE%u EXEC=%016"PRIx64 " ",
1025 waves->se, waves->sh, waves->cu, waves->simd,
1026 waves->wave, waves->exec);
1027
1028 if (inst->size == 4) {
1029 fprintf(f, "INST32=%08X" COLOR_RESET "\n",
1030 waves->inst_dw0);
1031 } else {
1032 fprintf(f, "INST64=%08X %08X" COLOR_RESET "\n",
1033 waves->inst_dw0, waves->inst_dw1);
1034 }
1035
1036 waves->matched = true;
1037 waves = &waves[1];
1038 num_waves--;
1039 }
1040 }
1041
1042 fprintf(f, "\n\n");
1043 free(instructions);
1044 }
1045
1046 static void si_dump_annotated_shaders(struct si_context *sctx, FILE *f)
1047 {
1048 struct ac_wave_info waves[AC_MAX_WAVES_PER_CHIP];
1049 unsigned num_waves = ac_get_wave_info(waves);
1050
1051 fprintf(f, COLOR_CYAN "The number of active waves = %u" COLOR_RESET
1052 "\n\n", num_waves);
1053
1054 si_print_annotated_shader(sctx->vs_shader.current, waves, num_waves, f);
1055 si_print_annotated_shader(sctx->tcs_shader.current, waves, num_waves, f);
1056 si_print_annotated_shader(sctx->tes_shader.current, waves, num_waves, f);
1057 si_print_annotated_shader(sctx->gs_shader.current, waves, num_waves, f);
1058 si_print_annotated_shader(sctx->ps_shader.current, waves, num_waves, f);
1059
1060 /* Print waves executing shaders that are not currently bound. */
1061 unsigned i;
1062 bool found = false;
1063 for (i = 0; i < num_waves; i++) {
1064 if (waves[i].matched)
1065 continue;
1066
1067 if (!found) {
1068 fprintf(f, COLOR_CYAN
1069 "Waves not executing currently-bound shaders:"
1070 COLOR_RESET "\n");
1071 found = true;
1072 }
1073 fprintf(f, " SE%u SH%u CU%u SIMD%u WAVE%u EXEC=%016"PRIx64
1074 " INST=%08X %08X PC=%"PRIx64"\n",
1075 waves[i].se, waves[i].sh, waves[i].cu, waves[i].simd,
1076 waves[i].wave, waves[i].exec, waves[i].inst_dw0,
1077 waves[i].inst_dw1, waves[i].pc);
1078 }
1079 if (found)
1080 fprintf(f, "\n\n");
1081 }
1082
1083 static void si_dump_command(const char *title, const char *command, FILE *f)
1084 {
1085 char line[2000];
1086
1087 FILE *p = popen(command, "r");
1088 if (!p)
1089 return;
1090
1091 fprintf(f, COLOR_YELLOW "%s: " COLOR_RESET "\n", title);
1092 while (fgets(line, sizeof(line), p))
1093 fputs(line, f);
1094 fprintf(f, "\n\n");
1095 pclose(p);
1096 }
1097
1098 static void si_dump_debug_state(struct pipe_context *ctx, FILE *f,
1099 unsigned flags)
1100 {
1101 struct si_context *sctx = (struct si_context*)ctx;
1102
1103 if (sctx->log)
1104 u_log_flush(sctx->log);
1105
1106 if (flags & PIPE_DUMP_DEVICE_STATUS_REGISTERS) {
1107 si_dump_debug_registers(sctx, f);
1108
1109 si_dump_annotated_shaders(sctx, f);
1110 si_dump_command("Active waves (raw data)", "umr -O halt_waves -wa | column -t", f);
1111 si_dump_command("Wave information", "umr -O halt_waves,bits -wa", f);
1112 }
1113 }
1114
1115 void si_log_draw_state(struct si_context *sctx, struct u_log_context *log)
1116 {
1117 struct si_shader_ctx_state *tcs_shader;
1118
1119 if (!log)
1120 return;
1121
1122 tcs_shader = &sctx->tcs_shader;
1123 if (sctx->tes_shader.cso && !sctx->tcs_shader.cso)
1124 tcs_shader = &sctx->fixed_func_tcs_shader;
1125
1126 si_dump_framebuffer(sctx, log);
1127
1128 si_dump_gfx_shader(sctx, &sctx->vs_shader, log);
1129 si_dump_gfx_shader(sctx, tcs_shader, log);
1130 si_dump_gfx_shader(sctx, &sctx->tes_shader, log);
1131 si_dump_gfx_shader(sctx, &sctx->gs_shader, log);
1132 si_dump_gfx_shader(sctx, &sctx->ps_shader, log);
1133
1134 si_dump_descriptor_list(sctx->screen,
1135 &sctx->descriptors[SI_DESCS_RW_BUFFERS],
1136 "", "RW buffers", 4,
1137 sctx->descriptors[SI_DESCS_RW_BUFFERS].num_active_slots,
1138 si_identity, log);
1139 si_dump_gfx_descriptors(sctx, &sctx->vs_shader, log);
1140 si_dump_gfx_descriptors(sctx, tcs_shader, log);
1141 si_dump_gfx_descriptors(sctx, &sctx->tes_shader, log);
1142 si_dump_gfx_descriptors(sctx, &sctx->gs_shader, log);
1143 si_dump_gfx_descriptors(sctx, &sctx->ps_shader, log);
1144 }
1145
1146 void si_log_compute_state(struct si_context *sctx, struct u_log_context *log)
1147 {
1148 if (!log)
1149 return;
1150
1151 si_dump_compute_shader(sctx, log);
1152 si_dump_compute_descriptors(sctx, log);
1153 }
1154
1155 static void si_dump_dma(struct si_context *sctx,
1156 struct radeon_saved_cs *saved, FILE *f)
1157 {
1158 static const char ib_name[] = "sDMA IB";
1159 unsigned i;
1160
1161 si_dump_bo_list(sctx, saved, f);
1162
1163 fprintf(f, "------------------ %s begin ------------------\n", ib_name);
1164
1165 for (i = 0; i < saved->num_dw; ++i) {
1166 fprintf(f, " %08x\n", saved->ib[i]);
1167 }
1168
1169 fprintf(f, "------------------- %s end -------------------\n", ib_name);
1170 fprintf(f, "\n");
1171
1172 fprintf(f, "SDMA Dump Done.\n");
1173 }
1174
1175 void si_check_vm_faults(struct si_context *sctx,
1176 struct radeon_saved_cs *saved, enum ring_type ring)
1177 {
1178 struct pipe_screen *screen = sctx->b.screen;
1179 FILE *f;
1180 uint64_t addr;
1181 char cmd_line[4096];
1182
1183 if (!ac_vm_fault_occured(sctx->chip_class,
1184 &sctx->dmesg_timestamp, &addr))
1185 return;
1186
1187 f = dd_get_debug_file(false);
1188 if (!f)
1189 return;
1190
1191 fprintf(f, "VM fault report.\n\n");
1192 if (os_get_command_line(cmd_line, sizeof(cmd_line)))
1193 fprintf(f, "Command: %s\n", cmd_line);
1194 fprintf(f, "Driver vendor: %s\n", screen->get_vendor(screen));
1195 fprintf(f, "Device vendor: %s\n", screen->get_device_vendor(screen));
1196 fprintf(f, "Device name: %s\n\n", screen->get_name(screen));
1197 fprintf(f, "Failing VM page: 0x%08"PRIx64"\n\n", addr);
1198
1199 if (sctx->apitrace_call_number)
1200 fprintf(f, "Last apitrace call: %u\n\n",
1201 sctx->apitrace_call_number);
1202
1203 switch (ring) {
1204 case RING_GFX: {
1205 struct u_log_context log;
1206 u_log_context_init(&log);
1207
1208 si_log_draw_state(sctx, &log);
1209 si_log_compute_state(sctx, &log);
1210 si_log_cs(sctx, &log, true);
1211
1212 u_log_new_page_print(&log, f);
1213 u_log_context_destroy(&log);
1214 break;
1215 }
1216 case RING_DMA:
1217 si_dump_dma(sctx, saved, f);
1218 break;
1219
1220 default:
1221 break;
1222 }
1223
1224 fclose(f);
1225
1226 fprintf(stderr, "Detected a VM fault, exiting...\n");
1227 exit(0);
1228 }
1229
1230 void si_init_debug_functions(struct si_context *sctx)
1231 {
1232 sctx->b.dump_debug_state = si_dump_debug_state;
1233
1234 /* Set the initial dmesg timestamp for this context, so that
1235 * only new messages will be checked for VM faults.
1236 */
1237 if (sctx->screen->debug_flags & DBG(CHECK_VM))
1238 ac_vm_fault_occured(sctx->chip_class,
1239 &sctx->dmesg_timestamp, NULL);
1240 }