radeonsi: move saved_cs functions from r600_pipe_common.c to si_debug.c
[mesa.git] / src / gallium / drivers / radeonsi / si_debug.c
1 /*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
22 */
23
24 #include "si_pipe.h"
25 #include "si_compute.h"
26 #include "sid.h"
27 #include "gfx9d.h"
28 #include "sid_tables.h"
29 #include "ddebug/dd_util.h"
30 #include "util/u_dump.h"
31 #include "util/u_log.h"
32 #include "util/u_memory.h"
33 #include "ac_debug.h"
34
35 static void si_dump_bo_list(struct si_context *sctx,
36 const struct radeon_saved_cs *saved, FILE *f);
37
38 DEBUG_GET_ONCE_OPTION(replace_shaders, "RADEON_REPLACE_SHADERS", NULL)
39
40 /**
41 * Store a linearized copy of all chunks of \p cs together with the buffer
42 * list in \p saved.
43 */
44 void si_save_cs(struct radeon_winsys *ws, struct radeon_winsys_cs *cs,
45 struct radeon_saved_cs *saved, bool get_buffer_list)
46 {
47 uint32_t *buf;
48 unsigned i;
49
50 /* Save the IB chunks. */
51 saved->num_dw = cs->prev_dw + cs->current.cdw;
52 saved->ib = MALLOC(4 * saved->num_dw);
53 if (!saved->ib)
54 goto oom;
55
56 buf = saved->ib;
57 for (i = 0; i < cs->num_prev; ++i) {
58 memcpy(buf, cs->prev[i].buf, cs->prev[i].cdw * 4);
59 buf += cs->prev[i].cdw;
60 }
61 memcpy(buf, cs->current.buf, cs->current.cdw * 4);
62
63 if (!get_buffer_list)
64 return;
65
66 /* Save the buffer list. */
67 saved->bo_count = ws->cs_get_buffer_list(cs, NULL);
68 saved->bo_list = CALLOC(saved->bo_count,
69 sizeof(saved->bo_list[0]));
70 if (!saved->bo_list) {
71 FREE(saved->ib);
72 goto oom;
73 }
74 ws->cs_get_buffer_list(cs, saved->bo_list);
75
76 return;
77
78 oom:
79 fprintf(stderr, "%s: out of memory\n", __func__);
80 memset(saved, 0, sizeof(*saved));
81 }
82
83 void si_clear_saved_cs(struct radeon_saved_cs *saved)
84 {
85 FREE(saved->ib);
86 FREE(saved->bo_list);
87
88 memset(saved, 0, sizeof(*saved));
89 }
90
91 void si_destroy_saved_cs(struct si_saved_cs *scs)
92 {
93 si_clear_saved_cs(&scs->gfx);
94 r600_resource_reference(&scs->trace_buf, NULL);
95 free(scs);
96 }
97
98 static void si_dump_shader(struct si_screen *sscreen,
99 enum pipe_shader_type processor,
100 const struct si_shader *shader, FILE *f)
101 {
102 if (shader->shader_log)
103 fwrite(shader->shader_log, shader->shader_log_size, 1, f);
104 else
105 si_shader_dump(sscreen, shader, NULL, processor, f, false);
106 }
107
108 struct si_log_chunk_shader {
109 /* The shader destroy code assumes a current context for unlinking of
110 * PM4 packets etc.
111 *
112 * While we should be able to destroy shaders without a context, doing
113 * so would happen only very rarely and be therefore likely to fail
114 * just when you're trying to debug something. Let's just remember the
115 * current context in the chunk.
116 */
117 struct si_context *ctx;
118 struct si_shader *shader;
119 enum pipe_shader_type processor;
120
121 /* For keep-alive reference counts */
122 struct si_shader_selector *sel;
123 struct si_compute *program;
124 };
125
126 static void
127 si_log_chunk_shader_destroy(void *data)
128 {
129 struct si_log_chunk_shader *chunk = data;
130 si_shader_selector_reference(chunk->ctx, &chunk->sel, NULL);
131 si_compute_reference(&chunk->program, NULL);
132 FREE(chunk);
133 }
134
135 static void
136 si_log_chunk_shader_print(void *data, FILE *f)
137 {
138 struct si_log_chunk_shader *chunk = data;
139 struct si_screen *sscreen = chunk->ctx->screen;
140 si_dump_shader(sscreen, chunk->processor,
141 chunk->shader, f);
142 }
143
144 static struct u_log_chunk_type si_log_chunk_type_shader = {
145 .destroy = si_log_chunk_shader_destroy,
146 .print = si_log_chunk_shader_print,
147 };
148
149 static void si_dump_gfx_shader(struct si_context *ctx,
150 const struct si_shader_ctx_state *state,
151 struct u_log_context *log)
152 {
153 struct si_shader *current = state->current;
154
155 if (!state->cso || !current)
156 return;
157
158 struct si_log_chunk_shader *chunk = CALLOC_STRUCT(si_log_chunk_shader);
159 chunk->ctx = ctx;
160 chunk->processor = state->cso->info.processor;
161 chunk->shader = current;
162 si_shader_selector_reference(ctx, &chunk->sel, current->selector);
163 u_log_chunk(log, &si_log_chunk_type_shader, chunk);
164 }
165
166 static void si_dump_compute_shader(struct si_context *ctx,
167 struct u_log_context *log)
168 {
169 const struct si_cs_shader_state *state = &ctx->cs_shader_state;
170
171 if (!state->program)
172 return;
173
174 struct si_log_chunk_shader *chunk = CALLOC_STRUCT(si_log_chunk_shader);
175 chunk->ctx = ctx;
176 chunk->processor = PIPE_SHADER_COMPUTE;
177 chunk->shader = &state->program->shader;
178 si_compute_reference(&chunk->program, state->program);
179 u_log_chunk(log, &si_log_chunk_type_shader, chunk);
180 }
181
182 /**
183 * Shader compiles can be overridden with arbitrary ELF objects by setting
184 * the environment variable RADEON_REPLACE_SHADERS=num1:filename1[;num2:filename2]
185 */
186 bool si_replace_shader(unsigned num, struct ac_shader_binary *binary)
187 {
188 const char *p = debug_get_option_replace_shaders();
189 const char *semicolon;
190 char *copy = NULL;
191 FILE *f;
192 long filesize, nread;
193 char *buf = NULL;
194 bool replaced = false;
195
196 if (!p)
197 return false;
198
199 while (*p) {
200 unsigned long i;
201 char *endp;
202 i = strtoul(p, &endp, 0);
203
204 p = endp;
205 if (*p != ':') {
206 fprintf(stderr, "RADEON_REPLACE_SHADERS formatted badly.\n");
207 exit(1);
208 }
209 ++p;
210
211 if (i == num)
212 break;
213
214 p = strchr(p, ';');
215 if (!p)
216 return false;
217 ++p;
218 }
219 if (!*p)
220 return false;
221
222 semicolon = strchr(p, ';');
223 if (semicolon) {
224 p = copy = strndup(p, semicolon - p);
225 if (!copy) {
226 fprintf(stderr, "out of memory\n");
227 return false;
228 }
229 }
230
231 fprintf(stderr, "radeonsi: replace shader %u by %s\n", num, p);
232
233 f = fopen(p, "r");
234 if (!f) {
235 perror("radeonsi: failed to open file");
236 goto out_free;
237 }
238
239 if (fseek(f, 0, SEEK_END) != 0)
240 goto file_error;
241
242 filesize = ftell(f);
243 if (filesize < 0)
244 goto file_error;
245
246 if (fseek(f, 0, SEEK_SET) != 0)
247 goto file_error;
248
249 buf = MALLOC(filesize);
250 if (!buf) {
251 fprintf(stderr, "out of memory\n");
252 goto out_close;
253 }
254
255 nread = fread(buf, 1, filesize, f);
256 if (nread != filesize)
257 goto file_error;
258
259 ac_elf_read(buf, filesize, binary);
260 replaced = true;
261
262 out_close:
263 fclose(f);
264 out_free:
265 FREE(buf);
266 free(copy);
267 return replaced;
268
269 file_error:
270 perror("radeonsi: reading shader");
271 goto out_close;
272 }
273
274 /* Parsed IBs are difficult to read without colors. Use "less -R file" to
275 * read them, or use "aha -b -f file" to convert them to html.
276 */
277 #define COLOR_RESET "\033[0m"
278 #define COLOR_RED "\033[31m"
279 #define COLOR_GREEN "\033[1;32m"
280 #define COLOR_YELLOW "\033[1;33m"
281 #define COLOR_CYAN "\033[1;36m"
282
283 static void si_dump_mmapped_reg(struct si_context *sctx, FILE *f,
284 unsigned offset)
285 {
286 struct radeon_winsys *ws = sctx->b.ws;
287 uint32_t value;
288
289 if (ws->read_registers(ws, offset, 1, &value))
290 ac_dump_reg(f, sctx->b.chip_class, offset, value, ~0);
291 }
292
293 static void si_dump_debug_registers(struct si_context *sctx, FILE *f)
294 {
295 if (sctx->screen->info.drm_major == 2 &&
296 sctx->screen->info.drm_minor < 42)
297 return; /* no radeon support */
298
299 fprintf(f, "Memory-mapped registers:\n");
300 si_dump_mmapped_reg(sctx, f, R_008010_GRBM_STATUS);
301
302 /* No other registers can be read on DRM < 3.1.0. */
303 if (sctx->screen->info.drm_major < 3 ||
304 sctx->screen->info.drm_minor < 1) {
305 fprintf(f, "\n");
306 return;
307 }
308
309 si_dump_mmapped_reg(sctx, f, R_008008_GRBM_STATUS2);
310 si_dump_mmapped_reg(sctx, f, R_008014_GRBM_STATUS_SE0);
311 si_dump_mmapped_reg(sctx, f, R_008018_GRBM_STATUS_SE1);
312 si_dump_mmapped_reg(sctx, f, R_008038_GRBM_STATUS_SE2);
313 si_dump_mmapped_reg(sctx, f, R_00803C_GRBM_STATUS_SE3);
314 si_dump_mmapped_reg(sctx, f, R_00D034_SDMA0_STATUS_REG);
315 si_dump_mmapped_reg(sctx, f, R_00D834_SDMA1_STATUS_REG);
316 if (sctx->b.chip_class <= VI) {
317 si_dump_mmapped_reg(sctx, f, R_000E50_SRBM_STATUS);
318 si_dump_mmapped_reg(sctx, f, R_000E4C_SRBM_STATUS2);
319 si_dump_mmapped_reg(sctx, f, R_000E54_SRBM_STATUS3);
320 }
321 si_dump_mmapped_reg(sctx, f, R_008680_CP_STAT);
322 si_dump_mmapped_reg(sctx, f, R_008674_CP_STALLED_STAT1);
323 si_dump_mmapped_reg(sctx, f, R_008678_CP_STALLED_STAT2);
324 si_dump_mmapped_reg(sctx, f, R_008670_CP_STALLED_STAT3);
325 si_dump_mmapped_reg(sctx, f, R_008210_CP_CPC_STATUS);
326 si_dump_mmapped_reg(sctx, f, R_008214_CP_CPC_BUSY_STAT);
327 si_dump_mmapped_reg(sctx, f, R_008218_CP_CPC_STALLED_STAT1);
328 si_dump_mmapped_reg(sctx, f, R_00821C_CP_CPF_STATUS);
329 si_dump_mmapped_reg(sctx, f, R_008220_CP_CPF_BUSY_STAT);
330 si_dump_mmapped_reg(sctx, f, R_008224_CP_CPF_STALLED_STAT1);
331 fprintf(f, "\n");
332 }
333
334 struct si_log_chunk_cs {
335 struct si_context *ctx;
336 struct si_saved_cs *cs;
337 bool dump_bo_list;
338 unsigned gfx_begin, gfx_end;
339 };
340
341 static void si_log_chunk_type_cs_destroy(void *data)
342 {
343 struct si_log_chunk_cs *chunk = data;
344 si_saved_cs_reference(&chunk->cs, NULL);
345 free(chunk);
346 }
347
348 static void si_parse_current_ib(FILE *f, struct radeon_winsys_cs *cs,
349 unsigned begin, unsigned end,
350 int *last_trace_id, unsigned trace_id_count,
351 const char *name, enum chip_class chip_class)
352 {
353 unsigned orig_end = end;
354
355 assert(begin <= end);
356
357 fprintf(f, "------------------ %s begin (dw = %u) ------------------\n",
358 name, begin);
359
360 for (unsigned prev_idx = 0; prev_idx < cs->num_prev; ++prev_idx) {
361 struct radeon_winsys_cs_chunk *chunk = &cs->prev[prev_idx];
362
363 if (begin < chunk->cdw) {
364 ac_parse_ib_chunk(f, chunk->buf + begin,
365 MIN2(end, chunk->cdw) - begin,
366 last_trace_id, trace_id_count,
367 chip_class, NULL, NULL);
368 }
369
370 if (end <= chunk->cdw)
371 return;
372
373 if (begin < chunk->cdw)
374 fprintf(f, "\n---------- Next %s Chunk ----------\n\n",
375 name);
376
377 begin -= MIN2(begin, chunk->cdw);
378 end -= chunk->cdw;
379 }
380
381 assert(end <= cs->current.cdw);
382
383 ac_parse_ib_chunk(f, cs->current.buf + begin, end - begin, last_trace_id,
384 trace_id_count, chip_class, NULL, NULL);
385
386 fprintf(f, "------------------- %s end (dw = %u) -------------------\n\n",
387 name, orig_end);
388 }
389
390 static void si_log_chunk_type_cs_print(void *data, FILE *f)
391 {
392 struct si_log_chunk_cs *chunk = data;
393 struct si_context *ctx = chunk->ctx;
394 struct si_saved_cs *scs = chunk->cs;
395 int last_trace_id = -1;
396
397 /* We are expecting that the ddebug pipe has already
398 * waited for the context, so this buffer should be idle.
399 * If the GPU is hung, there is no point in waiting for it.
400 */
401 uint32_t *map = ctx->b.ws->buffer_map(scs->trace_buf->buf,
402 NULL,
403 PIPE_TRANSFER_UNSYNCHRONIZED |
404 PIPE_TRANSFER_READ);
405 if (map)
406 last_trace_id = map[0];
407
408 if (chunk->gfx_end != chunk->gfx_begin) {
409 if (chunk->gfx_begin == 0) {
410 if (ctx->init_config)
411 ac_parse_ib(f, ctx->init_config->pm4, ctx->init_config->ndw,
412 NULL, 0, "IB2: Init config", ctx->b.chip_class,
413 NULL, NULL);
414
415 if (ctx->init_config_gs_rings)
416 ac_parse_ib(f, ctx->init_config_gs_rings->pm4,
417 ctx->init_config_gs_rings->ndw,
418 NULL, 0, "IB2: Init GS rings", ctx->b.chip_class,
419 NULL, NULL);
420 }
421
422 if (scs->flushed) {
423 ac_parse_ib(f, scs->gfx.ib + chunk->gfx_begin,
424 chunk->gfx_end - chunk->gfx_begin,
425 &last_trace_id, map ? 1 : 0, "IB", ctx->b.chip_class,
426 NULL, NULL);
427 } else {
428 si_parse_current_ib(f, ctx->b.gfx.cs, chunk->gfx_begin,
429 chunk->gfx_end, &last_trace_id, map ? 1 : 0,
430 "IB", ctx->b.chip_class);
431 }
432 }
433
434 if (chunk->dump_bo_list) {
435 fprintf(f, "Flushing. Time: ");
436 util_dump_ns(f, scs->time_flush);
437 fprintf(f, "\n\n");
438 si_dump_bo_list(ctx, &scs->gfx, f);
439 }
440 }
441
442 static const struct u_log_chunk_type si_log_chunk_type_cs = {
443 .destroy = si_log_chunk_type_cs_destroy,
444 .print = si_log_chunk_type_cs_print,
445 };
446
447 static void si_log_cs(struct si_context *ctx, struct u_log_context *log,
448 bool dump_bo_list)
449 {
450 assert(ctx->current_saved_cs);
451
452 struct si_saved_cs *scs = ctx->current_saved_cs;
453 unsigned gfx_cur = ctx->b.gfx.cs->prev_dw + ctx->b.gfx.cs->current.cdw;
454
455 if (!dump_bo_list &&
456 gfx_cur == scs->gfx_last_dw)
457 return;
458
459 struct si_log_chunk_cs *chunk = calloc(1, sizeof(*chunk));
460
461 chunk->ctx = ctx;
462 si_saved_cs_reference(&chunk->cs, scs);
463 chunk->dump_bo_list = dump_bo_list;
464
465 chunk->gfx_begin = scs->gfx_last_dw;
466 chunk->gfx_end = gfx_cur;
467 scs->gfx_last_dw = gfx_cur;
468
469 u_log_chunk(log, &si_log_chunk_type_cs, chunk);
470 }
471
472 void si_auto_log_cs(void *data, struct u_log_context *log)
473 {
474 struct si_context *ctx = (struct si_context *)data;
475 si_log_cs(ctx, log, false);
476 }
477
478 void si_log_hw_flush(struct si_context *sctx)
479 {
480 if (!sctx->b.log)
481 return;
482
483 si_log_cs(sctx, sctx->b.log, true);
484 }
485
486 static const char *priority_to_string(enum radeon_bo_priority priority)
487 {
488 #define ITEM(x) [RADEON_PRIO_##x] = #x
489 static const char *table[64] = {
490 ITEM(FENCE),
491 ITEM(TRACE),
492 ITEM(SO_FILLED_SIZE),
493 ITEM(QUERY),
494 ITEM(IB1),
495 ITEM(IB2),
496 ITEM(DRAW_INDIRECT),
497 ITEM(INDEX_BUFFER),
498 ITEM(VCE),
499 ITEM(UVD),
500 ITEM(SDMA_BUFFER),
501 ITEM(SDMA_TEXTURE),
502 ITEM(CP_DMA),
503 ITEM(CONST_BUFFER),
504 ITEM(DESCRIPTORS),
505 ITEM(BORDER_COLORS),
506 ITEM(SAMPLER_BUFFER),
507 ITEM(VERTEX_BUFFER),
508 ITEM(SHADER_RW_BUFFER),
509 ITEM(COMPUTE_GLOBAL),
510 ITEM(SAMPLER_TEXTURE),
511 ITEM(SHADER_RW_IMAGE),
512 ITEM(SAMPLER_TEXTURE_MSAA),
513 ITEM(COLOR_BUFFER),
514 ITEM(DEPTH_BUFFER),
515 ITEM(COLOR_BUFFER_MSAA),
516 ITEM(DEPTH_BUFFER_MSAA),
517 ITEM(CMASK),
518 ITEM(DCC),
519 ITEM(HTILE),
520 ITEM(SHADER_BINARY),
521 ITEM(SHADER_RINGS),
522 ITEM(SCRATCH_BUFFER),
523 };
524 #undef ITEM
525
526 assert(priority < ARRAY_SIZE(table));
527 return table[priority];
528 }
529
530 static int bo_list_compare_va(const struct radeon_bo_list_item *a,
531 const struct radeon_bo_list_item *b)
532 {
533 return a->vm_address < b->vm_address ? -1 :
534 a->vm_address > b->vm_address ? 1 : 0;
535 }
536
537 static void si_dump_bo_list(struct si_context *sctx,
538 const struct radeon_saved_cs *saved, FILE *f)
539 {
540 unsigned i,j;
541
542 if (!saved->bo_list)
543 return;
544
545 /* Sort the list according to VM adddresses first. */
546 qsort(saved->bo_list, saved->bo_count,
547 sizeof(saved->bo_list[0]), (void*)bo_list_compare_va);
548
549 fprintf(f, "Buffer list (in units of pages = 4kB):\n"
550 COLOR_YELLOW " Size VM start page "
551 "VM end page Usage" COLOR_RESET "\n");
552
553 for (i = 0; i < saved->bo_count; i++) {
554 /* Note: Buffer sizes are expected to be aligned to 4k by the winsys. */
555 const unsigned page_size = sctx->b.screen->info.gart_page_size;
556 uint64_t va = saved->bo_list[i].vm_address;
557 uint64_t size = saved->bo_list[i].bo_size;
558 bool hit = false;
559
560 /* If there's unused virtual memory between 2 buffers, print it. */
561 if (i) {
562 uint64_t previous_va_end = saved->bo_list[i-1].vm_address +
563 saved->bo_list[i-1].bo_size;
564
565 if (va > previous_va_end) {
566 fprintf(f, " %10"PRIu64" -- hole --\n",
567 (va - previous_va_end) / page_size);
568 }
569 }
570
571 /* Print the buffer. */
572 fprintf(f, " %10"PRIu64" 0x%013"PRIX64" 0x%013"PRIX64" ",
573 size / page_size, va / page_size, (va + size) / page_size);
574
575 /* Print the usage. */
576 for (j = 0; j < 64; j++) {
577 if (!(saved->bo_list[i].priority_usage & (1ull << j)))
578 continue;
579
580 fprintf(f, "%s%s", !hit ? "" : ", ", priority_to_string(j));
581 hit = true;
582 }
583 fprintf(f, "\n");
584 }
585 fprintf(f, "\nNote: The holes represent memory not used by the IB.\n"
586 " Other buffers can still be allocated there.\n\n");
587 }
588
589 static void si_dump_framebuffer(struct si_context *sctx, struct u_log_context *log)
590 {
591 struct pipe_framebuffer_state *state = &sctx->framebuffer.state;
592 struct r600_texture *rtex;
593 int i;
594
595 for (i = 0; i < state->nr_cbufs; i++) {
596 if (!state->cbufs[i])
597 continue;
598
599 rtex = (struct r600_texture*)state->cbufs[i]->texture;
600 u_log_printf(log, COLOR_YELLOW "Color buffer %i:" COLOR_RESET "\n", i);
601 si_print_texture_info(sctx->b.screen, rtex, log);
602 u_log_printf(log, "\n");
603 }
604
605 if (state->zsbuf) {
606 rtex = (struct r600_texture*)state->zsbuf->texture;
607 u_log_printf(log, COLOR_YELLOW "Depth-stencil buffer:" COLOR_RESET "\n");
608 si_print_texture_info(sctx->b.screen, rtex, log);
609 u_log_printf(log, "\n");
610 }
611 }
612
613 typedef unsigned (*slot_remap_func)(unsigned);
614
615 struct si_log_chunk_desc_list {
616 /** Pointer to memory map of buffer where the list is uploader */
617 uint32_t *gpu_list;
618 /** Reference of buffer where the list is uploaded, so that gpu_list
619 * is kept live. */
620 struct r600_resource *buf;
621
622 const char *shader_name;
623 const char *elem_name;
624 slot_remap_func slot_remap;
625 enum chip_class chip_class;
626 unsigned element_dw_size;
627 unsigned num_elements;
628
629 uint32_t list[0];
630 };
631
632 static void
633 si_log_chunk_desc_list_destroy(void *data)
634 {
635 struct si_log_chunk_desc_list *chunk = data;
636 r600_resource_reference(&chunk->buf, NULL);
637 FREE(chunk);
638 }
639
640 static void
641 si_log_chunk_desc_list_print(void *data, FILE *f)
642 {
643 struct si_log_chunk_desc_list *chunk = data;
644
645 for (unsigned i = 0; i < chunk->num_elements; i++) {
646 unsigned cpu_dw_offset = i * chunk->element_dw_size;
647 unsigned gpu_dw_offset = chunk->slot_remap(i) * chunk->element_dw_size;
648 const char *list_note = chunk->gpu_list ? "GPU list" : "CPU list";
649 uint32_t *cpu_list = chunk->list + cpu_dw_offset;
650 uint32_t *gpu_list = chunk->gpu_list ? chunk->gpu_list + gpu_dw_offset : cpu_list;
651
652 fprintf(f, COLOR_GREEN "%s%s slot %u (%s):" COLOR_RESET "\n",
653 chunk->shader_name, chunk->elem_name, i, list_note);
654
655 switch (chunk->element_dw_size) {
656 case 4:
657 for (unsigned j = 0; j < 4; j++)
658 ac_dump_reg(f, chunk->chip_class,
659 R_008F00_SQ_BUF_RSRC_WORD0 + j*4,
660 gpu_list[j], 0xffffffff);
661 break;
662 case 8:
663 for (unsigned j = 0; j < 8; j++)
664 ac_dump_reg(f, chunk->chip_class,
665 R_008F10_SQ_IMG_RSRC_WORD0 + j*4,
666 gpu_list[j], 0xffffffff);
667
668 fprintf(f, COLOR_CYAN " Buffer:" COLOR_RESET "\n");
669 for (unsigned j = 0; j < 4; j++)
670 ac_dump_reg(f, chunk->chip_class,
671 R_008F00_SQ_BUF_RSRC_WORD0 + j*4,
672 gpu_list[4+j], 0xffffffff);
673 break;
674 case 16:
675 for (unsigned j = 0; j < 8; j++)
676 ac_dump_reg(f, chunk->chip_class,
677 R_008F10_SQ_IMG_RSRC_WORD0 + j*4,
678 gpu_list[j], 0xffffffff);
679
680 fprintf(f, COLOR_CYAN " Buffer:" COLOR_RESET "\n");
681 for (unsigned j = 0; j < 4; j++)
682 ac_dump_reg(f, chunk->chip_class,
683 R_008F00_SQ_BUF_RSRC_WORD0 + j*4,
684 gpu_list[4+j], 0xffffffff);
685
686 fprintf(f, COLOR_CYAN " FMASK:" COLOR_RESET "\n");
687 for (unsigned j = 0; j < 8; j++)
688 ac_dump_reg(f, chunk->chip_class,
689 R_008F10_SQ_IMG_RSRC_WORD0 + j*4,
690 gpu_list[8+j], 0xffffffff);
691
692 fprintf(f, COLOR_CYAN " Sampler state:" COLOR_RESET "\n");
693 for (unsigned j = 0; j < 4; j++)
694 ac_dump_reg(f, chunk->chip_class,
695 R_008F30_SQ_IMG_SAMP_WORD0 + j*4,
696 gpu_list[12+j], 0xffffffff);
697 break;
698 }
699
700 if (memcmp(gpu_list, cpu_list, chunk->element_dw_size * 4) != 0) {
701 fprintf(f, COLOR_RED "!!!!! This slot was corrupted in GPU memory !!!!!"
702 COLOR_RESET "\n");
703 }
704
705 fprintf(f, "\n");
706 }
707
708 }
709
710 static const struct u_log_chunk_type si_log_chunk_type_descriptor_list = {
711 .destroy = si_log_chunk_desc_list_destroy,
712 .print = si_log_chunk_desc_list_print,
713 };
714
715 static void si_dump_descriptor_list(struct si_screen *screen,
716 struct si_descriptors *desc,
717 const char *shader_name,
718 const char *elem_name,
719 unsigned element_dw_size,
720 unsigned num_elements,
721 slot_remap_func slot_remap,
722 struct u_log_context *log)
723 {
724 if (!desc->list)
725 return;
726
727 /* In some cases, the caller doesn't know how many elements are really
728 * uploaded. Reduce num_elements to fit in the range of active slots. */
729 unsigned active_range_dw_begin =
730 desc->first_active_slot * desc->element_dw_size;
731 unsigned active_range_dw_end =
732 active_range_dw_begin + desc->num_active_slots * desc->element_dw_size;
733
734 while (num_elements > 0) {
735 int i = slot_remap(num_elements - 1);
736 unsigned dw_begin = i * element_dw_size;
737 unsigned dw_end = dw_begin + element_dw_size;
738
739 if (dw_begin >= active_range_dw_begin && dw_end <= active_range_dw_end)
740 break;
741
742 num_elements--;
743 }
744
745 struct si_log_chunk_desc_list *chunk =
746 CALLOC_VARIANT_LENGTH_STRUCT(si_log_chunk_desc_list,
747 4 * element_dw_size * num_elements);
748 chunk->shader_name = shader_name;
749 chunk->elem_name = elem_name;
750 chunk->element_dw_size = element_dw_size;
751 chunk->num_elements = num_elements;
752 chunk->slot_remap = slot_remap;
753 chunk->chip_class = screen->info.chip_class;
754
755 r600_resource_reference(&chunk->buf, desc->buffer);
756 chunk->gpu_list = desc->gpu_list;
757
758 for (unsigned i = 0; i < num_elements; ++i) {
759 memcpy(&chunk->list[i * element_dw_size],
760 &desc->list[slot_remap(i) * element_dw_size],
761 4 * element_dw_size);
762 }
763
764 u_log_chunk(log, &si_log_chunk_type_descriptor_list, chunk);
765 }
766
767 static unsigned si_identity(unsigned slot)
768 {
769 return slot;
770 }
771
772 static void si_dump_descriptors(struct si_context *sctx,
773 enum pipe_shader_type processor,
774 const struct tgsi_shader_info *info,
775 struct u_log_context *log)
776 {
777 struct si_descriptors *descs =
778 &sctx->descriptors[SI_DESCS_FIRST_SHADER +
779 processor * SI_NUM_SHADER_DESCS];
780 static const char *shader_name[] = {"VS", "PS", "GS", "TCS", "TES", "CS"};
781 const char *name = shader_name[processor];
782 unsigned enabled_constbuf, enabled_shaderbuf, enabled_samplers;
783 unsigned enabled_images;
784
785 if (info) {
786 enabled_constbuf = info->const_buffers_declared;
787 enabled_shaderbuf = info->shader_buffers_declared;
788 enabled_samplers = info->samplers_declared;
789 enabled_images = info->images_declared;
790 } else {
791 enabled_constbuf = sctx->const_and_shader_buffers[processor].enabled_mask >>
792 SI_NUM_SHADER_BUFFERS;
793 enabled_shaderbuf = sctx->const_and_shader_buffers[processor].enabled_mask &
794 u_bit_consecutive(0, SI_NUM_SHADER_BUFFERS);
795 enabled_shaderbuf = util_bitreverse(enabled_shaderbuf) >>
796 (32 - SI_NUM_SHADER_BUFFERS);
797 enabled_samplers = sctx->samplers[processor].enabled_mask;
798 enabled_images = sctx->images[processor].enabled_mask;
799 }
800
801 if (processor == PIPE_SHADER_VERTEX &&
802 sctx->vb_descriptors_buffer &&
803 sctx->vb_descriptors_gpu_list &&
804 sctx->vertex_elements) {
805 assert(info); /* only CS may not have an info struct */
806 struct si_descriptors desc = {};
807
808 desc.buffer = sctx->vb_descriptors_buffer;
809 desc.list = sctx->vb_descriptors_gpu_list;
810 desc.gpu_list = sctx->vb_descriptors_gpu_list;
811 desc.element_dw_size = 4;
812 desc.num_active_slots = sctx->vertex_elements->desc_list_byte_size / 16;
813
814 si_dump_descriptor_list(sctx->screen, &desc, name,
815 " - Vertex buffer", 4, info->num_inputs,
816 si_identity, log);
817 }
818
819 si_dump_descriptor_list(sctx->screen,
820 &descs[SI_SHADER_DESCS_CONST_AND_SHADER_BUFFERS],
821 name, " - Constant buffer", 4,
822 util_last_bit(enabled_constbuf),
823 si_get_constbuf_slot, log);
824 si_dump_descriptor_list(sctx->screen,
825 &descs[SI_SHADER_DESCS_CONST_AND_SHADER_BUFFERS],
826 name, " - Shader buffer", 4,
827 util_last_bit(enabled_shaderbuf),
828 si_get_shaderbuf_slot, log);
829 si_dump_descriptor_list(sctx->screen,
830 &descs[SI_SHADER_DESCS_SAMPLERS_AND_IMAGES],
831 name, " - Sampler", 16,
832 util_last_bit(enabled_samplers),
833 si_get_sampler_slot, log);
834 si_dump_descriptor_list(sctx->screen,
835 &descs[SI_SHADER_DESCS_SAMPLERS_AND_IMAGES],
836 name, " - Image", 8,
837 util_last_bit(enabled_images),
838 si_get_image_slot, log);
839 }
840
841 static void si_dump_gfx_descriptors(struct si_context *sctx,
842 const struct si_shader_ctx_state *state,
843 struct u_log_context *log)
844 {
845 if (!state->cso || !state->current)
846 return;
847
848 si_dump_descriptors(sctx, state->cso->type, &state->cso->info, log);
849 }
850
851 static void si_dump_compute_descriptors(struct si_context *sctx,
852 struct u_log_context *log)
853 {
854 if (!sctx->cs_shader_state.program)
855 return;
856
857 si_dump_descriptors(sctx, PIPE_SHADER_COMPUTE, NULL, log);
858 }
859
860 struct si_shader_inst {
861 char text[160]; /* one disasm line */
862 unsigned offset; /* instruction offset */
863 unsigned size; /* instruction size = 4 or 8 */
864 };
865
866 /* Split a disassembly string into lines and add them to the array pointed
867 * to by "instructions". */
868 static void si_add_split_disasm(const char *disasm,
869 uint64_t start_addr,
870 unsigned *num,
871 struct si_shader_inst *instructions)
872 {
873 struct si_shader_inst *last_inst = *num ? &instructions[*num - 1] : NULL;
874 char *next;
875
876 while ((next = strchr(disasm, '\n'))) {
877 struct si_shader_inst *inst = &instructions[*num];
878 unsigned len = next - disasm;
879
880 assert(len < ARRAY_SIZE(inst->text));
881 memcpy(inst->text, disasm, len);
882 inst->text[len] = 0;
883 inst->offset = last_inst ? last_inst->offset + last_inst->size : 0;
884
885 const char *semicolon = strchr(disasm, ';');
886 assert(semicolon);
887 /* More than 16 chars after ";" means the instruction is 8 bytes long. */
888 inst->size = next - semicolon > 16 ? 8 : 4;
889
890 snprintf(inst->text + len, ARRAY_SIZE(inst->text) - len,
891 " [PC=0x%"PRIx64", off=%u, size=%u]",
892 start_addr + inst->offset, inst->offset, inst->size);
893
894 last_inst = inst;
895 (*num)++;
896 disasm = next + 1;
897 }
898 }
899
900 /* If the shader is being executed, print its asm instructions, and annotate
901 * those that are being executed right now with information about waves that
902 * execute them. This is most useful during a GPU hang.
903 */
904 static void si_print_annotated_shader(struct si_shader *shader,
905 struct ac_wave_info *waves,
906 unsigned num_waves,
907 FILE *f)
908 {
909 if (!shader || !shader->binary.disasm_string)
910 return;
911
912 uint64_t start_addr = shader->bo->gpu_address;
913 uint64_t end_addr = start_addr + shader->bo->b.b.width0;
914 unsigned i;
915
916 /* See if any wave executes the shader. */
917 for (i = 0; i < num_waves; i++) {
918 if (start_addr <= waves[i].pc && waves[i].pc <= end_addr)
919 break;
920 }
921 if (i == num_waves)
922 return; /* the shader is not being executed */
923
924 /* Remember the first found wave. The waves are sorted according to PC. */
925 waves = &waves[i];
926 num_waves -= i;
927
928 /* Get the list of instructions.
929 * Buffer size / 4 is the upper bound of the instruction count.
930 */
931 unsigned num_inst = 0;
932 struct si_shader_inst *instructions =
933 calloc(shader->bo->b.b.width0 / 4, sizeof(struct si_shader_inst));
934
935 if (shader->prolog) {
936 si_add_split_disasm(shader->prolog->binary.disasm_string,
937 start_addr, &num_inst, instructions);
938 }
939 if (shader->previous_stage) {
940 si_add_split_disasm(shader->previous_stage->binary.disasm_string,
941 start_addr, &num_inst, instructions);
942 }
943 if (shader->prolog2) {
944 si_add_split_disasm(shader->prolog2->binary.disasm_string,
945 start_addr, &num_inst, instructions);
946 }
947 si_add_split_disasm(shader->binary.disasm_string,
948 start_addr, &num_inst, instructions);
949 if (shader->epilog) {
950 si_add_split_disasm(shader->epilog->binary.disasm_string,
951 start_addr, &num_inst, instructions);
952 }
953
954 fprintf(f, COLOR_YELLOW "%s - annotated disassembly:" COLOR_RESET "\n",
955 si_get_shader_name(shader, shader->selector->type));
956
957 /* Print instructions with annotations. */
958 for (i = 0; i < num_inst; i++) {
959 struct si_shader_inst *inst = &instructions[i];
960
961 fprintf(f, "%s\n", inst->text);
962
963 /* Print which waves execute the instruction right now. */
964 while (num_waves && start_addr + inst->offset == waves->pc) {
965 fprintf(f,
966 " " COLOR_GREEN "^ SE%u SH%u CU%u "
967 "SIMD%u WAVE%u EXEC=%016"PRIx64 " ",
968 waves->se, waves->sh, waves->cu, waves->simd,
969 waves->wave, waves->exec);
970
971 if (inst->size == 4) {
972 fprintf(f, "INST32=%08X" COLOR_RESET "\n",
973 waves->inst_dw0);
974 } else {
975 fprintf(f, "INST64=%08X %08X" COLOR_RESET "\n",
976 waves->inst_dw0, waves->inst_dw1);
977 }
978
979 waves->matched = true;
980 waves = &waves[1];
981 num_waves--;
982 }
983 }
984
985 fprintf(f, "\n\n");
986 free(instructions);
987 }
988
989 static void si_dump_annotated_shaders(struct si_context *sctx, FILE *f)
990 {
991 struct ac_wave_info waves[AC_MAX_WAVES_PER_CHIP];
992 unsigned num_waves = ac_get_wave_info(waves);
993
994 fprintf(f, COLOR_CYAN "The number of active waves = %u" COLOR_RESET
995 "\n\n", num_waves);
996
997 si_print_annotated_shader(sctx->vs_shader.current, waves, num_waves, f);
998 si_print_annotated_shader(sctx->tcs_shader.current, waves, num_waves, f);
999 si_print_annotated_shader(sctx->tes_shader.current, waves, num_waves, f);
1000 si_print_annotated_shader(sctx->gs_shader.current, waves, num_waves, f);
1001 si_print_annotated_shader(sctx->ps_shader.current, waves, num_waves, f);
1002
1003 /* Print waves executing shaders that are not currently bound. */
1004 unsigned i;
1005 bool found = false;
1006 for (i = 0; i < num_waves; i++) {
1007 if (waves[i].matched)
1008 continue;
1009
1010 if (!found) {
1011 fprintf(f, COLOR_CYAN
1012 "Waves not executing currently-bound shaders:"
1013 COLOR_RESET "\n");
1014 found = true;
1015 }
1016 fprintf(f, " SE%u SH%u CU%u SIMD%u WAVE%u EXEC=%016"PRIx64
1017 " INST=%08X %08X PC=%"PRIx64"\n",
1018 waves[i].se, waves[i].sh, waves[i].cu, waves[i].simd,
1019 waves[i].wave, waves[i].exec, waves[i].inst_dw0,
1020 waves[i].inst_dw1, waves[i].pc);
1021 }
1022 if (found)
1023 fprintf(f, "\n\n");
1024 }
1025
1026 static void si_dump_command(const char *title, const char *command, FILE *f)
1027 {
1028 char line[2000];
1029
1030 FILE *p = popen(command, "r");
1031 if (!p)
1032 return;
1033
1034 fprintf(f, COLOR_YELLOW "%s: " COLOR_RESET "\n", title);
1035 while (fgets(line, sizeof(line), p))
1036 fputs(line, f);
1037 fprintf(f, "\n\n");
1038 pclose(p);
1039 }
1040
1041 static void si_dump_debug_state(struct pipe_context *ctx, FILE *f,
1042 unsigned flags)
1043 {
1044 struct si_context *sctx = (struct si_context*)ctx;
1045
1046 if (sctx->b.log)
1047 u_log_flush(sctx->b.log);
1048
1049 if (flags & PIPE_DUMP_DEVICE_STATUS_REGISTERS) {
1050 si_dump_debug_registers(sctx, f);
1051
1052 si_dump_annotated_shaders(sctx, f);
1053 si_dump_command("Active waves (raw data)", "umr -wa | column -t", f);
1054 si_dump_command("Wave information", "umr -O bits -wa", f);
1055 }
1056 }
1057
1058 void si_log_draw_state(struct si_context *sctx, struct u_log_context *log)
1059 {
1060 if (!log)
1061 return;
1062
1063 si_dump_framebuffer(sctx, log);
1064
1065 si_dump_gfx_shader(sctx, &sctx->vs_shader, log);
1066 si_dump_gfx_shader(sctx, &sctx->tcs_shader, log);
1067 si_dump_gfx_shader(sctx, &sctx->tes_shader, log);
1068 si_dump_gfx_shader(sctx, &sctx->gs_shader, log);
1069 si_dump_gfx_shader(sctx, &sctx->ps_shader, log);
1070
1071 si_dump_descriptor_list(sctx->screen,
1072 &sctx->descriptors[SI_DESCS_RW_BUFFERS],
1073 "", "RW buffers", 4, SI_NUM_RW_BUFFERS,
1074 si_identity, log);
1075 si_dump_gfx_descriptors(sctx, &sctx->vs_shader, log);
1076 si_dump_gfx_descriptors(sctx, &sctx->tcs_shader, log);
1077 si_dump_gfx_descriptors(sctx, &sctx->tes_shader, log);
1078 si_dump_gfx_descriptors(sctx, &sctx->gs_shader, log);
1079 si_dump_gfx_descriptors(sctx, &sctx->ps_shader, log);
1080 }
1081
1082 void si_log_compute_state(struct si_context *sctx, struct u_log_context *log)
1083 {
1084 if (!log)
1085 return;
1086
1087 si_dump_compute_shader(sctx, log);
1088 si_dump_compute_descriptors(sctx, log);
1089 }
1090
1091 static void si_dump_dma(struct si_context *sctx,
1092 struct radeon_saved_cs *saved, FILE *f)
1093 {
1094 static const char ib_name[] = "sDMA IB";
1095 unsigned i;
1096
1097 si_dump_bo_list(sctx, saved, f);
1098
1099 fprintf(f, "------------------ %s begin ------------------\n", ib_name);
1100
1101 for (i = 0; i < saved->num_dw; ++i) {
1102 fprintf(f, " %08x\n", saved->ib[i]);
1103 }
1104
1105 fprintf(f, "------------------- %s end -------------------\n", ib_name);
1106 fprintf(f, "\n");
1107
1108 fprintf(f, "SDMA Dump Done.\n");
1109 }
1110
1111 void si_check_vm_faults(struct r600_common_context *ctx,
1112 struct radeon_saved_cs *saved, enum ring_type ring)
1113 {
1114 struct si_context *sctx = (struct si_context *)ctx;
1115 struct pipe_screen *screen = sctx->b.b.screen;
1116 FILE *f;
1117 uint64_t addr;
1118 char cmd_line[4096];
1119
1120 if (!ac_vm_fault_occured(sctx->b.chip_class,
1121 &sctx->dmesg_timestamp, &addr))
1122 return;
1123
1124 f = dd_get_debug_file(false);
1125 if (!f)
1126 return;
1127
1128 fprintf(f, "VM fault report.\n\n");
1129 if (os_get_command_line(cmd_line, sizeof(cmd_line)))
1130 fprintf(f, "Command: %s\n", cmd_line);
1131 fprintf(f, "Driver vendor: %s\n", screen->get_vendor(screen));
1132 fprintf(f, "Device vendor: %s\n", screen->get_device_vendor(screen));
1133 fprintf(f, "Device name: %s\n\n", screen->get_name(screen));
1134 fprintf(f, "Failing VM page: 0x%08"PRIx64"\n\n", addr);
1135
1136 if (sctx->apitrace_call_number)
1137 fprintf(f, "Last apitrace call: %u\n\n",
1138 sctx->apitrace_call_number);
1139
1140 switch (ring) {
1141 case RING_GFX: {
1142 struct u_log_context log;
1143 u_log_context_init(&log);
1144
1145 si_log_draw_state(sctx, &log);
1146 si_log_compute_state(sctx, &log);
1147 si_log_cs(sctx, &log, true);
1148
1149 u_log_new_page_print(&log, f);
1150 u_log_context_destroy(&log);
1151 break;
1152 }
1153 case RING_DMA:
1154 si_dump_dma(sctx, saved, f);
1155 break;
1156
1157 default:
1158 break;
1159 }
1160
1161 fclose(f);
1162
1163 fprintf(stderr, "Detected a VM fault, exiting...\n");
1164 exit(0);
1165 }
1166
1167 void si_init_debug_functions(struct si_context *sctx)
1168 {
1169 sctx->b.b.dump_debug_state = si_dump_debug_state;
1170
1171 /* Set the initial dmesg timestamp for this context, so that
1172 * only new messages will be checked for VM faults.
1173 */
1174 if (sctx->screen->debug_flags & DBG(CHECK_VM))
1175 ac_vm_fault_occured(sctx->b.chip_class,
1176 &sctx->dmesg_timestamp, NULL);
1177 }