radeonsi: cull primitives with async compute for large draw calls
[mesa.git] / src / gallium / drivers / radeonsi / si_debug.c
1 /*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * on the rights to use, copy, modify, merge, publish, distribute, sub
9 * license, and/or sell copies of the Software, and to permit persons to whom
10 * the Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
20 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
21 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
22 * USE OR OTHER DEALINGS IN THE SOFTWARE.
23 */
24
25 #include "si_pipe.h"
26 #include "si_compute.h"
27 #include "sid.h"
28 #include "gfx9d.h"
29 #include "sid_tables.h"
30 #include "driver_ddebug/dd_util.h"
31 #include "util/u_dump.h"
32 #include "util/u_log.h"
33 #include "util/u_memory.h"
34 #include "util/u_string.h"
35 #include "ac_debug.h"
36
37 static void si_dump_bo_list(struct si_context *sctx,
38 const struct radeon_saved_cs *saved, FILE *f);
39
40 DEBUG_GET_ONCE_OPTION(replace_shaders, "RADEON_REPLACE_SHADERS", NULL)
41
42 /**
43 * Store a linearized copy of all chunks of \p cs together with the buffer
44 * list in \p saved.
45 */
46 void si_save_cs(struct radeon_winsys *ws, struct radeon_cmdbuf *cs,
47 struct radeon_saved_cs *saved, bool get_buffer_list)
48 {
49 uint32_t *buf;
50 unsigned i;
51
52 /* Save the IB chunks. */
53 saved->num_dw = cs->prev_dw + cs->current.cdw;
54 saved->ib = MALLOC(4 * saved->num_dw);
55 if (!saved->ib)
56 goto oom;
57
58 buf = saved->ib;
59 for (i = 0; i < cs->num_prev; ++i) {
60 memcpy(buf, cs->prev[i].buf, cs->prev[i].cdw * 4);
61 buf += cs->prev[i].cdw;
62 }
63 memcpy(buf, cs->current.buf, cs->current.cdw * 4);
64
65 if (!get_buffer_list)
66 return;
67
68 /* Save the buffer list. */
69 saved->bo_count = ws->cs_get_buffer_list(cs, NULL);
70 saved->bo_list = CALLOC(saved->bo_count,
71 sizeof(saved->bo_list[0]));
72 if (!saved->bo_list) {
73 FREE(saved->ib);
74 goto oom;
75 }
76 ws->cs_get_buffer_list(cs, saved->bo_list);
77
78 return;
79
80 oom:
81 fprintf(stderr, "%s: out of memory\n", __func__);
82 memset(saved, 0, sizeof(*saved));
83 }
84
85 void si_clear_saved_cs(struct radeon_saved_cs *saved)
86 {
87 FREE(saved->ib);
88 FREE(saved->bo_list);
89
90 memset(saved, 0, sizeof(*saved));
91 }
92
93 void si_destroy_saved_cs(struct si_saved_cs *scs)
94 {
95 si_clear_saved_cs(&scs->gfx);
96 si_resource_reference(&scs->trace_buf, NULL);
97 free(scs);
98 }
99
100 static void si_dump_shader(struct si_screen *sscreen,
101 enum pipe_shader_type processor,
102 const struct si_shader *shader, FILE *f)
103 {
104 if (shader->shader_log)
105 fwrite(shader->shader_log, shader->shader_log_size, 1, f);
106 else
107 si_shader_dump(sscreen, shader, NULL, processor, f, false);
108 }
109
110 struct si_log_chunk_shader {
111 /* The shader destroy code assumes a current context for unlinking of
112 * PM4 packets etc.
113 *
114 * While we should be able to destroy shaders without a context, doing
115 * so would happen only very rarely and be therefore likely to fail
116 * just when you're trying to debug something. Let's just remember the
117 * current context in the chunk.
118 */
119 struct si_context *ctx;
120 struct si_shader *shader;
121 enum pipe_shader_type processor;
122
123 /* For keep-alive reference counts */
124 struct si_shader_selector *sel;
125 struct si_compute *program;
126 };
127
128 static void
129 si_log_chunk_shader_destroy(void *data)
130 {
131 struct si_log_chunk_shader *chunk = data;
132 si_shader_selector_reference(chunk->ctx, &chunk->sel, NULL);
133 si_compute_reference(&chunk->program, NULL);
134 FREE(chunk);
135 }
136
137 static void
138 si_log_chunk_shader_print(void *data, FILE *f)
139 {
140 struct si_log_chunk_shader *chunk = data;
141 struct si_screen *sscreen = chunk->ctx->screen;
142 si_dump_shader(sscreen, chunk->processor,
143 chunk->shader, f);
144 }
145
146 static struct u_log_chunk_type si_log_chunk_type_shader = {
147 .destroy = si_log_chunk_shader_destroy,
148 .print = si_log_chunk_shader_print,
149 };
150
151 static void si_dump_gfx_shader(struct si_context *ctx,
152 const struct si_shader_ctx_state *state,
153 struct u_log_context *log)
154 {
155 struct si_shader *current = state->current;
156
157 if (!state->cso || !current)
158 return;
159
160 struct si_log_chunk_shader *chunk = CALLOC_STRUCT(si_log_chunk_shader);
161 chunk->ctx = ctx;
162 chunk->processor = state->cso->info.processor;
163 chunk->shader = current;
164 si_shader_selector_reference(ctx, &chunk->sel, current->selector);
165 u_log_chunk(log, &si_log_chunk_type_shader, chunk);
166 }
167
168 static void si_dump_compute_shader(struct si_context *ctx,
169 struct u_log_context *log)
170 {
171 const struct si_cs_shader_state *state = &ctx->cs_shader_state;
172
173 if (!state->program)
174 return;
175
176 struct si_log_chunk_shader *chunk = CALLOC_STRUCT(si_log_chunk_shader);
177 chunk->ctx = ctx;
178 chunk->processor = PIPE_SHADER_COMPUTE;
179 chunk->shader = &state->program->shader;
180 si_compute_reference(&chunk->program, state->program);
181 u_log_chunk(log, &si_log_chunk_type_shader, chunk);
182 }
183
184 /**
185 * Shader compiles can be overridden with arbitrary ELF objects by setting
186 * the environment variable RADEON_REPLACE_SHADERS=num1:filename1[;num2:filename2]
187 */
188 bool si_replace_shader(unsigned num, struct ac_shader_binary *binary)
189 {
190 const char *p = debug_get_option_replace_shaders();
191 const char *semicolon;
192 char *copy = NULL;
193 FILE *f;
194 long filesize, nread;
195 char *buf = NULL;
196 bool replaced = false;
197
198 if (!p)
199 return false;
200
201 while (*p) {
202 unsigned long i;
203 char *endp;
204 i = strtoul(p, &endp, 0);
205
206 p = endp;
207 if (*p != ':') {
208 fprintf(stderr, "RADEON_REPLACE_SHADERS formatted badly.\n");
209 exit(1);
210 }
211 ++p;
212
213 if (i == num)
214 break;
215
216 p = strchr(p, ';');
217 if (!p)
218 return false;
219 ++p;
220 }
221 if (!*p)
222 return false;
223
224 semicolon = strchr(p, ';');
225 if (semicolon) {
226 p = copy = strndup(p, semicolon - p);
227 if (!copy) {
228 fprintf(stderr, "out of memory\n");
229 return false;
230 }
231 }
232
233 fprintf(stderr, "radeonsi: replace shader %u by %s\n", num, p);
234
235 f = fopen(p, "r");
236 if (!f) {
237 perror("radeonsi: failed to open file");
238 goto out_free;
239 }
240
241 if (fseek(f, 0, SEEK_END) != 0)
242 goto file_error;
243
244 filesize = ftell(f);
245 if (filesize < 0)
246 goto file_error;
247
248 if (fseek(f, 0, SEEK_SET) != 0)
249 goto file_error;
250
251 buf = MALLOC(filesize);
252 if (!buf) {
253 fprintf(stderr, "out of memory\n");
254 goto out_close;
255 }
256
257 nread = fread(buf, 1, filesize, f);
258 if (nread != filesize)
259 goto file_error;
260
261 ac_elf_read(buf, filesize, binary);
262 replaced = true;
263
264 out_close:
265 fclose(f);
266 out_free:
267 FREE(buf);
268 free(copy);
269 return replaced;
270
271 file_error:
272 perror("radeonsi: reading shader");
273 goto out_close;
274 }
275
276 /* Parsed IBs are difficult to read without colors. Use "less -R file" to
277 * read them, or use "aha -b -f file" to convert them to html.
278 */
279 #define COLOR_RESET "\033[0m"
280 #define COLOR_RED "\033[31m"
281 #define COLOR_GREEN "\033[1;32m"
282 #define COLOR_YELLOW "\033[1;33m"
283 #define COLOR_CYAN "\033[1;36m"
284
285 static void si_dump_mmapped_reg(struct si_context *sctx, FILE *f,
286 unsigned offset)
287 {
288 struct radeon_winsys *ws = sctx->ws;
289 uint32_t value;
290
291 if (ws->read_registers(ws, offset, 1, &value))
292 ac_dump_reg(f, sctx->chip_class, offset, value, ~0);
293 }
294
295 static void si_dump_debug_registers(struct si_context *sctx, FILE *f)
296 {
297 if (!sctx->screen->info.has_read_registers_query)
298 return;
299
300 fprintf(f, "Memory-mapped registers:\n");
301 si_dump_mmapped_reg(sctx, f, R_008010_GRBM_STATUS);
302
303 /* No other registers can be read on DRM < 3.1.0. */
304 if (sctx->screen->info.drm_major < 3 ||
305 sctx->screen->info.drm_minor < 1) {
306 fprintf(f, "\n");
307 return;
308 }
309
310 si_dump_mmapped_reg(sctx, f, R_008008_GRBM_STATUS2);
311 si_dump_mmapped_reg(sctx, f, R_008014_GRBM_STATUS_SE0);
312 si_dump_mmapped_reg(sctx, f, R_008018_GRBM_STATUS_SE1);
313 si_dump_mmapped_reg(sctx, f, R_008038_GRBM_STATUS_SE2);
314 si_dump_mmapped_reg(sctx, f, R_00803C_GRBM_STATUS_SE3);
315 si_dump_mmapped_reg(sctx, f, R_00D034_SDMA0_STATUS_REG);
316 si_dump_mmapped_reg(sctx, f, R_00D834_SDMA1_STATUS_REG);
317 if (sctx->chip_class <= GFX8) {
318 si_dump_mmapped_reg(sctx, f, R_000E50_SRBM_STATUS);
319 si_dump_mmapped_reg(sctx, f, R_000E4C_SRBM_STATUS2);
320 si_dump_mmapped_reg(sctx, f, R_000E54_SRBM_STATUS3);
321 }
322 si_dump_mmapped_reg(sctx, f, R_008680_CP_STAT);
323 si_dump_mmapped_reg(sctx, f, R_008674_CP_STALLED_STAT1);
324 si_dump_mmapped_reg(sctx, f, R_008678_CP_STALLED_STAT2);
325 si_dump_mmapped_reg(sctx, f, R_008670_CP_STALLED_STAT3);
326 si_dump_mmapped_reg(sctx, f, R_008210_CP_CPC_STATUS);
327 si_dump_mmapped_reg(sctx, f, R_008214_CP_CPC_BUSY_STAT);
328 si_dump_mmapped_reg(sctx, f, R_008218_CP_CPC_STALLED_STAT1);
329 si_dump_mmapped_reg(sctx, f, R_00821C_CP_CPF_STATUS);
330 si_dump_mmapped_reg(sctx, f, R_008220_CP_CPF_BUSY_STAT);
331 si_dump_mmapped_reg(sctx, f, R_008224_CP_CPF_STALLED_STAT1);
332 fprintf(f, "\n");
333 }
334
335 struct si_log_chunk_cs {
336 struct si_context *ctx;
337 struct si_saved_cs *cs;
338 bool dump_bo_list;
339 unsigned gfx_begin, gfx_end;
340 unsigned compute_begin, compute_end;
341 };
342
343 static void si_log_chunk_type_cs_destroy(void *data)
344 {
345 struct si_log_chunk_cs *chunk = data;
346 si_saved_cs_reference(&chunk->cs, NULL);
347 free(chunk);
348 }
349
350 static void si_parse_current_ib(FILE *f, struct radeon_cmdbuf *cs,
351 unsigned begin, unsigned end,
352 int *last_trace_id, unsigned trace_id_count,
353 const char *name, enum chip_class chip_class)
354 {
355 unsigned orig_end = end;
356
357 assert(begin <= end);
358
359 fprintf(f, "------------------ %s begin (dw = %u) ------------------\n",
360 name, begin);
361
362 for (unsigned prev_idx = 0; prev_idx < cs->num_prev; ++prev_idx) {
363 struct radeon_cmdbuf_chunk *chunk = &cs->prev[prev_idx];
364
365 if (begin < chunk->cdw) {
366 ac_parse_ib_chunk(f, chunk->buf + begin,
367 MIN2(end, chunk->cdw) - begin,
368 last_trace_id, trace_id_count,
369 chip_class, NULL, NULL);
370 }
371
372 if (end <= chunk->cdw)
373 return;
374
375 if (begin < chunk->cdw)
376 fprintf(f, "\n---------- Next %s Chunk ----------\n\n",
377 name);
378
379 begin -= MIN2(begin, chunk->cdw);
380 end -= chunk->cdw;
381 }
382
383 assert(end <= cs->current.cdw);
384
385 ac_parse_ib_chunk(f, cs->current.buf + begin, end - begin, last_trace_id,
386 trace_id_count, chip_class, NULL, NULL);
387
388 fprintf(f, "------------------- %s end (dw = %u) -------------------\n\n",
389 name, orig_end);
390 }
391
392 static void si_log_chunk_type_cs_print(void *data, FILE *f)
393 {
394 struct si_log_chunk_cs *chunk = data;
395 struct si_context *ctx = chunk->ctx;
396 struct si_saved_cs *scs = chunk->cs;
397 int last_trace_id = -1;
398 int last_compute_trace_id = -1;
399
400 /* We are expecting that the ddebug pipe has already
401 * waited for the context, so this buffer should be idle.
402 * If the GPU is hung, there is no point in waiting for it.
403 */
404 uint32_t *map = ctx->ws->buffer_map(scs->trace_buf->buf,
405 NULL,
406 PIPE_TRANSFER_UNSYNCHRONIZED |
407 PIPE_TRANSFER_READ);
408 if (map) {
409 last_trace_id = map[0];
410 last_compute_trace_id = map[1];
411 }
412
413 if (chunk->gfx_end != chunk->gfx_begin) {
414 if (chunk->gfx_begin == 0) {
415 if (ctx->init_config)
416 ac_parse_ib(f, ctx->init_config->pm4, ctx->init_config->ndw,
417 NULL, 0, "IB2: Init config", ctx->chip_class,
418 NULL, NULL);
419
420 if (ctx->init_config_gs_rings)
421 ac_parse_ib(f, ctx->init_config_gs_rings->pm4,
422 ctx->init_config_gs_rings->ndw,
423 NULL, 0, "IB2: Init GS rings", ctx->chip_class,
424 NULL, NULL);
425 }
426
427 if (scs->flushed) {
428 ac_parse_ib(f, scs->gfx.ib + chunk->gfx_begin,
429 chunk->gfx_end - chunk->gfx_begin,
430 &last_trace_id, map ? 1 : 0, "IB", ctx->chip_class,
431 NULL, NULL);
432 } else {
433 si_parse_current_ib(f, ctx->gfx_cs, chunk->gfx_begin,
434 chunk->gfx_end, &last_trace_id, map ? 1 : 0,
435 "IB", ctx->chip_class);
436 }
437 }
438
439 if (chunk->compute_end != chunk->compute_begin) {
440 assert(ctx->prim_discard_compute_cs);
441
442 if (scs->flushed) {
443 ac_parse_ib(f, scs->compute.ib + chunk->compute_begin,
444 chunk->compute_end - chunk->compute_begin,
445 &last_compute_trace_id, map ? 1 : 0, "Compute IB", ctx->chip_class,
446 NULL, NULL);
447 } else {
448 si_parse_current_ib(f, ctx->prim_discard_compute_cs, chunk->compute_begin,
449 chunk->compute_end, &last_compute_trace_id,
450 map ? 1 : 0, "Compute IB", ctx->chip_class);
451 }
452 }
453
454 if (chunk->dump_bo_list) {
455 fprintf(f, "Flushing. Time: ");
456 util_dump_ns(f, scs->time_flush);
457 fprintf(f, "\n\n");
458 si_dump_bo_list(ctx, &scs->gfx, f);
459 }
460 }
461
462 static const struct u_log_chunk_type si_log_chunk_type_cs = {
463 .destroy = si_log_chunk_type_cs_destroy,
464 .print = si_log_chunk_type_cs_print,
465 };
466
467 static void si_log_cs(struct si_context *ctx, struct u_log_context *log,
468 bool dump_bo_list)
469 {
470 assert(ctx->current_saved_cs);
471
472 struct si_saved_cs *scs = ctx->current_saved_cs;
473 unsigned gfx_cur = ctx->gfx_cs->prev_dw + ctx->gfx_cs->current.cdw;
474 unsigned compute_cur = 0;
475
476 if (ctx->prim_discard_compute_cs)
477 compute_cur = ctx->prim_discard_compute_cs->prev_dw + ctx->prim_discard_compute_cs->current.cdw;
478
479 if (!dump_bo_list &&
480 gfx_cur == scs->gfx_last_dw &&
481 compute_cur == scs->compute_last_dw)
482 return;
483
484 struct si_log_chunk_cs *chunk = calloc(1, sizeof(*chunk));
485
486 chunk->ctx = ctx;
487 si_saved_cs_reference(&chunk->cs, scs);
488 chunk->dump_bo_list = dump_bo_list;
489
490 chunk->gfx_begin = scs->gfx_last_dw;
491 chunk->gfx_end = gfx_cur;
492 scs->gfx_last_dw = gfx_cur;
493
494 chunk->compute_begin = scs->compute_last_dw;
495 chunk->compute_end = compute_cur;
496 scs->compute_last_dw = compute_cur;
497
498 u_log_chunk(log, &si_log_chunk_type_cs, chunk);
499 }
500
501 void si_auto_log_cs(void *data, struct u_log_context *log)
502 {
503 struct si_context *ctx = (struct si_context *)data;
504 si_log_cs(ctx, log, false);
505 }
506
507 void si_log_hw_flush(struct si_context *sctx)
508 {
509 if (!sctx->log)
510 return;
511
512 si_log_cs(sctx, sctx->log, true);
513
514 if (&sctx->b == sctx->screen->aux_context) {
515 /* The aux context isn't captured by the ddebug wrapper,
516 * so we dump it on a flush-by-flush basis here.
517 */
518 FILE *f = dd_get_debug_file(false);
519 if (!f) {
520 fprintf(stderr, "radeonsi: error opening aux context dump file.\n");
521 } else {
522 dd_write_header(f, &sctx->screen->b, 0);
523
524 fprintf(f, "Aux context dump:\n\n");
525 u_log_new_page_print(sctx->log, f);
526
527 fclose(f);
528 }
529 }
530 }
531
532 static const char *priority_to_string(enum radeon_bo_priority priority)
533 {
534 #define ITEM(x) [RADEON_PRIO_##x] = #x
535 static const char *table[64] = {
536 ITEM(FENCE),
537 ITEM(TRACE),
538 ITEM(SO_FILLED_SIZE),
539 ITEM(QUERY),
540 ITEM(IB1),
541 ITEM(IB2),
542 ITEM(DRAW_INDIRECT),
543 ITEM(INDEX_BUFFER),
544 ITEM(CP_DMA),
545 ITEM(CONST_BUFFER),
546 ITEM(DESCRIPTORS),
547 ITEM(BORDER_COLORS),
548 ITEM(SAMPLER_BUFFER),
549 ITEM(VERTEX_BUFFER),
550 ITEM(SHADER_RW_BUFFER),
551 ITEM(COMPUTE_GLOBAL),
552 ITEM(SAMPLER_TEXTURE),
553 ITEM(SHADER_RW_IMAGE),
554 ITEM(SAMPLER_TEXTURE_MSAA),
555 ITEM(COLOR_BUFFER),
556 ITEM(DEPTH_BUFFER),
557 ITEM(COLOR_BUFFER_MSAA),
558 ITEM(DEPTH_BUFFER_MSAA),
559 ITEM(SEPARATE_META),
560 ITEM(SHADER_BINARY),
561 ITEM(SHADER_RINGS),
562 ITEM(SCRATCH_BUFFER),
563 };
564 #undef ITEM
565
566 assert(priority < ARRAY_SIZE(table));
567 return table[priority];
568 }
569
570 static int bo_list_compare_va(const struct radeon_bo_list_item *a,
571 const struct radeon_bo_list_item *b)
572 {
573 return a->vm_address < b->vm_address ? -1 :
574 a->vm_address > b->vm_address ? 1 : 0;
575 }
576
577 static void si_dump_bo_list(struct si_context *sctx,
578 const struct radeon_saved_cs *saved, FILE *f)
579 {
580 unsigned i,j;
581
582 if (!saved->bo_list)
583 return;
584
585 /* Sort the list according to VM adddresses first. */
586 qsort(saved->bo_list, saved->bo_count,
587 sizeof(saved->bo_list[0]), (void*)bo_list_compare_va);
588
589 fprintf(f, "Buffer list (in units of pages = 4kB):\n"
590 COLOR_YELLOW " Size VM start page "
591 "VM end page Usage" COLOR_RESET "\n");
592
593 for (i = 0; i < saved->bo_count; i++) {
594 /* Note: Buffer sizes are expected to be aligned to 4k by the winsys. */
595 const unsigned page_size = sctx->screen->info.gart_page_size;
596 uint64_t va = saved->bo_list[i].vm_address;
597 uint64_t size = saved->bo_list[i].bo_size;
598 bool hit = false;
599
600 /* If there's unused virtual memory between 2 buffers, print it. */
601 if (i) {
602 uint64_t previous_va_end = saved->bo_list[i-1].vm_address +
603 saved->bo_list[i-1].bo_size;
604
605 if (va > previous_va_end) {
606 fprintf(f, " %10"PRIu64" -- hole --\n",
607 (va - previous_va_end) / page_size);
608 }
609 }
610
611 /* Print the buffer. */
612 fprintf(f, " %10"PRIu64" 0x%013"PRIX64" 0x%013"PRIX64" ",
613 size / page_size, va / page_size, (va + size) / page_size);
614
615 /* Print the usage. */
616 for (j = 0; j < 32; j++) {
617 if (!(saved->bo_list[i].priority_usage & (1u << j)))
618 continue;
619
620 fprintf(f, "%s%s", !hit ? "" : ", ", priority_to_string(j));
621 hit = true;
622 }
623 fprintf(f, "\n");
624 }
625 fprintf(f, "\nNote: The holes represent memory not used by the IB.\n"
626 " Other buffers can still be allocated there.\n\n");
627 }
628
629 static void si_dump_framebuffer(struct si_context *sctx, struct u_log_context *log)
630 {
631 struct pipe_framebuffer_state *state = &sctx->framebuffer.state;
632 struct si_texture *tex;
633 int i;
634
635 for (i = 0; i < state->nr_cbufs; i++) {
636 if (!state->cbufs[i])
637 continue;
638
639 tex = (struct si_texture*)state->cbufs[i]->texture;
640 u_log_printf(log, COLOR_YELLOW "Color buffer %i:" COLOR_RESET "\n", i);
641 si_print_texture_info(sctx->screen, tex, log);
642 u_log_printf(log, "\n");
643 }
644
645 if (state->zsbuf) {
646 tex = (struct si_texture*)state->zsbuf->texture;
647 u_log_printf(log, COLOR_YELLOW "Depth-stencil buffer:" COLOR_RESET "\n");
648 si_print_texture_info(sctx->screen, tex, log);
649 u_log_printf(log, "\n");
650 }
651 }
652
653 typedef unsigned (*slot_remap_func)(unsigned);
654
655 struct si_log_chunk_desc_list {
656 /** Pointer to memory map of buffer where the list is uploader */
657 uint32_t *gpu_list;
658 /** Reference of buffer where the list is uploaded, so that gpu_list
659 * is kept live. */
660 struct si_resource *buf;
661
662 const char *shader_name;
663 const char *elem_name;
664 slot_remap_func slot_remap;
665 enum chip_class chip_class;
666 unsigned element_dw_size;
667 unsigned num_elements;
668
669 uint32_t list[0];
670 };
671
672 static void
673 si_log_chunk_desc_list_destroy(void *data)
674 {
675 struct si_log_chunk_desc_list *chunk = data;
676 si_resource_reference(&chunk->buf, NULL);
677 FREE(chunk);
678 }
679
680 static void
681 si_log_chunk_desc_list_print(void *data, FILE *f)
682 {
683 struct si_log_chunk_desc_list *chunk = data;
684
685 for (unsigned i = 0; i < chunk->num_elements; i++) {
686 unsigned cpu_dw_offset = i * chunk->element_dw_size;
687 unsigned gpu_dw_offset = chunk->slot_remap(i) * chunk->element_dw_size;
688 const char *list_note = chunk->gpu_list ? "GPU list" : "CPU list";
689 uint32_t *cpu_list = chunk->list + cpu_dw_offset;
690 uint32_t *gpu_list = chunk->gpu_list ? chunk->gpu_list + gpu_dw_offset : cpu_list;
691
692 fprintf(f, COLOR_GREEN "%s%s slot %u (%s):" COLOR_RESET "\n",
693 chunk->shader_name, chunk->elem_name, i, list_note);
694
695 switch (chunk->element_dw_size) {
696 case 4:
697 for (unsigned j = 0; j < 4; j++)
698 ac_dump_reg(f, chunk->chip_class,
699 R_008F00_SQ_BUF_RSRC_WORD0 + j*4,
700 gpu_list[j], 0xffffffff);
701 break;
702 case 8:
703 for (unsigned j = 0; j < 8; j++)
704 ac_dump_reg(f, chunk->chip_class,
705 R_008F10_SQ_IMG_RSRC_WORD0 + j*4,
706 gpu_list[j], 0xffffffff);
707
708 fprintf(f, COLOR_CYAN " Buffer:" COLOR_RESET "\n");
709 for (unsigned j = 0; j < 4; j++)
710 ac_dump_reg(f, chunk->chip_class,
711 R_008F00_SQ_BUF_RSRC_WORD0 + j*4,
712 gpu_list[4+j], 0xffffffff);
713 break;
714 case 16:
715 for (unsigned j = 0; j < 8; j++)
716 ac_dump_reg(f, chunk->chip_class,
717 R_008F10_SQ_IMG_RSRC_WORD0 + j*4,
718 gpu_list[j], 0xffffffff);
719
720 fprintf(f, COLOR_CYAN " Buffer:" COLOR_RESET "\n");
721 for (unsigned j = 0; j < 4; j++)
722 ac_dump_reg(f, chunk->chip_class,
723 R_008F00_SQ_BUF_RSRC_WORD0 + j*4,
724 gpu_list[4+j], 0xffffffff);
725
726 fprintf(f, COLOR_CYAN " FMASK:" COLOR_RESET "\n");
727 for (unsigned j = 0; j < 8; j++)
728 ac_dump_reg(f, chunk->chip_class,
729 R_008F10_SQ_IMG_RSRC_WORD0 + j*4,
730 gpu_list[8+j], 0xffffffff);
731
732 fprintf(f, COLOR_CYAN " Sampler state:" COLOR_RESET "\n");
733 for (unsigned j = 0; j < 4; j++)
734 ac_dump_reg(f, chunk->chip_class,
735 R_008F30_SQ_IMG_SAMP_WORD0 + j*4,
736 gpu_list[12+j], 0xffffffff);
737 break;
738 }
739
740 if (memcmp(gpu_list, cpu_list, chunk->element_dw_size * 4) != 0) {
741 fprintf(f, COLOR_RED "!!!!! This slot was corrupted in GPU memory !!!!!"
742 COLOR_RESET "\n");
743 }
744
745 fprintf(f, "\n");
746 }
747
748 }
749
750 static const struct u_log_chunk_type si_log_chunk_type_descriptor_list = {
751 .destroy = si_log_chunk_desc_list_destroy,
752 .print = si_log_chunk_desc_list_print,
753 };
754
755 static void si_dump_descriptor_list(struct si_screen *screen,
756 struct si_descriptors *desc,
757 const char *shader_name,
758 const char *elem_name,
759 unsigned element_dw_size,
760 unsigned num_elements,
761 slot_remap_func slot_remap,
762 struct u_log_context *log)
763 {
764 if (!desc->list)
765 return;
766
767 /* In some cases, the caller doesn't know how many elements are really
768 * uploaded. Reduce num_elements to fit in the range of active slots. */
769 unsigned active_range_dw_begin =
770 desc->first_active_slot * desc->element_dw_size;
771 unsigned active_range_dw_end =
772 active_range_dw_begin + desc->num_active_slots * desc->element_dw_size;
773
774 while (num_elements > 0) {
775 int i = slot_remap(num_elements - 1);
776 unsigned dw_begin = i * element_dw_size;
777 unsigned dw_end = dw_begin + element_dw_size;
778
779 if (dw_begin >= active_range_dw_begin && dw_end <= active_range_dw_end)
780 break;
781
782 num_elements--;
783 }
784
785 struct si_log_chunk_desc_list *chunk =
786 CALLOC_VARIANT_LENGTH_STRUCT(si_log_chunk_desc_list,
787 4 * element_dw_size * num_elements);
788 chunk->shader_name = shader_name;
789 chunk->elem_name = elem_name;
790 chunk->element_dw_size = element_dw_size;
791 chunk->num_elements = num_elements;
792 chunk->slot_remap = slot_remap;
793 chunk->chip_class = screen->info.chip_class;
794
795 si_resource_reference(&chunk->buf, desc->buffer);
796 chunk->gpu_list = desc->gpu_list;
797
798 for (unsigned i = 0; i < num_elements; ++i) {
799 memcpy(&chunk->list[i * element_dw_size],
800 &desc->list[slot_remap(i) * element_dw_size],
801 4 * element_dw_size);
802 }
803
804 u_log_chunk(log, &si_log_chunk_type_descriptor_list, chunk);
805 }
806
807 static unsigned si_identity(unsigned slot)
808 {
809 return slot;
810 }
811
812 static void si_dump_descriptors(struct si_context *sctx,
813 enum pipe_shader_type processor,
814 const struct tgsi_shader_info *info,
815 struct u_log_context *log)
816 {
817 struct si_descriptors *descs =
818 &sctx->descriptors[SI_DESCS_FIRST_SHADER +
819 processor * SI_NUM_SHADER_DESCS];
820 static const char *shader_name[] = {"VS", "PS", "GS", "TCS", "TES", "CS"};
821 const char *name = shader_name[processor];
822 unsigned enabled_constbuf, enabled_shaderbuf, enabled_samplers;
823 unsigned enabled_images;
824
825 if (info) {
826 enabled_constbuf = info->const_buffers_declared;
827 enabled_shaderbuf = info->shader_buffers_declared;
828 enabled_samplers = info->samplers_declared;
829 enabled_images = info->images_declared;
830 } else {
831 enabled_constbuf = sctx->const_and_shader_buffers[processor].enabled_mask >>
832 SI_NUM_SHADER_BUFFERS;
833 enabled_shaderbuf = sctx->const_and_shader_buffers[processor].enabled_mask &
834 u_bit_consecutive(0, SI_NUM_SHADER_BUFFERS);
835 enabled_shaderbuf = util_bitreverse(enabled_shaderbuf) >>
836 (32 - SI_NUM_SHADER_BUFFERS);
837 enabled_samplers = sctx->samplers[processor].enabled_mask;
838 enabled_images = sctx->images[processor].enabled_mask;
839 }
840
841 if (processor == PIPE_SHADER_VERTEX &&
842 sctx->vb_descriptors_buffer &&
843 sctx->vb_descriptors_gpu_list &&
844 sctx->vertex_elements) {
845 assert(info); /* only CS may not have an info struct */
846 struct si_descriptors desc = {};
847
848 desc.buffer = sctx->vb_descriptors_buffer;
849 desc.list = sctx->vb_descriptors_gpu_list;
850 desc.gpu_list = sctx->vb_descriptors_gpu_list;
851 desc.element_dw_size = 4;
852 desc.num_active_slots = sctx->vertex_elements->desc_list_byte_size / 16;
853
854 si_dump_descriptor_list(sctx->screen, &desc, name,
855 " - Vertex buffer", 4, info->num_inputs,
856 si_identity, log);
857 }
858
859 si_dump_descriptor_list(sctx->screen,
860 &descs[SI_SHADER_DESCS_CONST_AND_SHADER_BUFFERS],
861 name, " - Constant buffer", 4,
862 util_last_bit(enabled_constbuf),
863 si_get_constbuf_slot, log);
864 si_dump_descriptor_list(sctx->screen,
865 &descs[SI_SHADER_DESCS_CONST_AND_SHADER_BUFFERS],
866 name, " - Shader buffer", 4,
867 util_last_bit(enabled_shaderbuf),
868 si_get_shaderbuf_slot, log);
869 si_dump_descriptor_list(sctx->screen,
870 &descs[SI_SHADER_DESCS_SAMPLERS_AND_IMAGES],
871 name, " - Sampler", 16,
872 util_last_bit(enabled_samplers),
873 si_get_sampler_slot, log);
874 si_dump_descriptor_list(sctx->screen,
875 &descs[SI_SHADER_DESCS_SAMPLERS_AND_IMAGES],
876 name, " - Image", 8,
877 util_last_bit(enabled_images),
878 si_get_image_slot, log);
879 }
880
881 static void si_dump_gfx_descriptors(struct si_context *sctx,
882 const struct si_shader_ctx_state *state,
883 struct u_log_context *log)
884 {
885 if (!state->cso || !state->current)
886 return;
887
888 si_dump_descriptors(sctx, state->cso->type, &state->cso->info, log);
889 }
890
891 static void si_dump_compute_descriptors(struct si_context *sctx,
892 struct u_log_context *log)
893 {
894 if (!sctx->cs_shader_state.program)
895 return;
896
897 si_dump_descriptors(sctx, PIPE_SHADER_COMPUTE, NULL, log);
898 }
899
900 struct si_shader_inst {
901 const char *text; /* start of disassembly for this instruction */
902 unsigned textlen;
903 unsigned size; /* instruction size = 4 or 8 */
904 uint64_t addr; /* instruction address */
905 };
906
907 /**
908 * Split a disassembly string into instructions and add them to the array
909 * pointed to by \p instructions.
910 *
911 * Labels are considered to be part of the following instruction.
912 */
913 static void si_add_split_disasm(const char *disasm,
914 uint64_t *addr,
915 unsigned *num,
916 struct si_shader_inst *instructions)
917 {
918 const char *semicolon;
919
920 while ((semicolon = strchr(disasm, ';'))) {
921 struct si_shader_inst *inst = &instructions[(*num)++];
922 const char *end = util_strchrnul(semicolon, '\n');
923
924 inst->text = disasm;
925 inst->textlen = end - disasm;
926
927 inst->addr = *addr;
928 /* More than 16 chars after ";" means the instruction is 8 bytes long. */
929 inst->size = end - semicolon > 16 ? 8 : 4;
930 *addr += inst->size;
931
932 if (!(*end))
933 break;
934 disasm = end + 1;
935 }
936 }
937
938 /* If the shader is being executed, print its asm instructions, and annotate
939 * those that are being executed right now with information about waves that
940 * execute them. This is most useful during a GPU hang.
941 */
942 static void si_print_annotated_shader(struct si_shader *shader,
943 struct ac_wave_info *waves,
944 unsigned num_waves,
945 FILE *f)
946 {
947 if (!shader || !shader->binary.disasm_string)
948 return;
949
950 uint64_t start_addr = shader->bo->gpu_address;
951 uint64_t end_addr = start_addr + shader->bo->b.b.width0;
952 unsigned i;
953
954 /* See if any wave executes the shader. */
955 for (i = 0; i < num_waves; i++) {
956 if (start_addr <= waves[i].pc && waves[i].pc <= end_addr)
957 break;
958 }
959 if (i == num_waves)
960 return; /* the shader is not being executed */
961
962 /* Remember the first found wave. The waves are sorted according to PC. */
963 waves = &waves[i];
964 num_waves -= i;
965
966 /* Get the list of instructions.
967 * Buffer size / 4 is the upper bound of the instruction count.
968 */
969 unsigned num_inst = 0;
970 uint64_t inst_addr = start_addr;
971 struct si_shader_inst *instructions =
972 calloc(shader->bo->b.b.width0 / 4, sizeof(struct si_shader_inst));
973
974 if (shader->prolog) {
975 si_add_split_disasm(shader->prolog->binary.disasm_string,
976 &inst_addr, &num_inst, instructions);
977 }
978 if (shader->previous_stage) {
979 si_add_split_disasm(shader->previous_stage->binary.disasm_string,
980 &inst_addr, &num_inst, instructions);
981 }
982 if (shader->prolog2) {
983 si_add_split_disasm(shader->prolog2->binary.disasm_string,
984 &inst_addr, &num_inst, instructions);
985 }
986 si_add_split_disasm(shader->binary.disasm_string,
987 &inst_addr, &num_inst, instructions);
988 if (shader->epilog) {
989 si_add_split_disasm(shader->epilog->binary.disasm_string,
990 &inst_addr, &num_inst, instructions);
991 }
992
993 fprintf(f, COLOR_YELLOW "%s - annotated disassembly:" COLOR_RESET "\n",
994 si_get_shader_name(shader, shader->selector->type));
995
996 /* Print instructions with annotations. */
997 for (i = 0; i < num_inst; i++) {
998 struct si_shader_inst *inst = &instructions[i];
999
1000 fprintf(f, "%.*s [PC=0x%"PRIx64", size=%u]\n",
1001 inst->textlen, inst->text, inst->addr, inst->size);
1002
1003 /* Print which waves execute the instruction right now. */
1004 while (num_waves && inst->addr == waves->pc) {
1005 fprintf(f,
1006 " " COLOR_GREEN "^ SE%u SH%u CU%u "
1007 "SIMD%u WAVE%u EXEC=%016"PRIx64 " ",
1008 waves->se, waves->sh, waves->cu, waves->simd,
1009 waves->wave, waves->exec);
1010
1011 if (inst->size == 4) {
1012 fprintf(f, "INST32=%08X" COLOR_RESET "\n",
1013 waves->inst_dw0);
1014 } else {
1015 fprintf(f, "INST64=%08X %08X" COLOR_RESET "\n",
1016 waves->inst_dw0, waves->inst_dw1);
1017 }
1018
1019 waves->matched = true;
1020 waves = &waves[1];
1021 num_waves--;
1022 }
1023 }
1024
1025 fprintf(f, "\n\n");
1026 free(instructions);
1027 }
1028
1029 static void si_dump_annotated_shaders(struct si_context *sctx, FILE *f)
1030 {
1031 struct ac_wave_info waves[AC_MAX_WAVES_PER_CHIP];
1032 unsigned num_waves = ac_get_wave_info(waves);
1033
1034 fprintf(f, COLOR_CYAN "The number of active waves = %u" COLOR_RESET
1035 "\n\n", num_waves);
1036
1037 si_print_annotated_shader(sctx->vs_shader.current, waves, num_waves, f);
1038 si_print_annotated_shader(sctx->tcs_shader.current, waves, num_waves, f);
1039 si_print_annotated_shader(sctx->tes_shader.current, waves, num_waves, f);
1040 si_print_annotated_shader(sctx->gs_shader.current, waves, num_waves, f);
1041 si_print_annotated_shader(sctx->ps_shader.current, waves, num_waves, f);
1042
1043 /* Print waves executing shaders that are not currently bound. */
1044 unsigned i;
1045 bool found = false;
1046 for (i = 0; i < num_waves; i++) {
1047 if (waves[i].matched)
1048 continue;
1049
1050 if (!found) {
1051 fprintf(f, COLOR_CYAN
1052 "Waves not executing currently-bound shaders:"
1053 COLOR_RESET "\n");
1054 found = true;
1055 }
1056 fprintf(f, " SE%u SH%u CU%u SIMD%u WAVE%u EXEC=%016"PRIx64
1057 " INST=%08X %08X PC=%"PRIx64"\n",
1058 waves[i].se, waves[i].sh, waves[i].cu, waves[i].simd,
1059 waves[i].wave, waves[i].exec, waves[i].inst_dw0,
1060 waves[i].inst_dw1, waves[i].pc);
1061 }
1062 if (found)
1063 fprintf(f, "\n\n");
1064 }
1065
1066 static void si_dump_command(const char *title, const char *command, FILE *f)
1067 {
1068 char line[2000];
1069
1070 FILE *p = popen(command, "r");
1071 if (!p)
1072 return;
1073
1074 fprintf(f, COLOR_YELLOW "%s: " COLOR_RESET "\n", title);
1075 while (fgets(line, sizeof(line), p))
1076 fputs(line, f);
1077 fprintf(f, "\n\n");
1078 pclose(p);
1079 }
1080
1081 static void si_dump_debug_state(struct pipe_context *ctx, FILE *f,
1082 unsigned flags)
1083 {
1084 struct si_context *sctx = (struct si_context*)ctx;
1085
1086 if (sctx->log)
1087 u_log_flush(sctx->log);
1088
1089 if (flags & PIPE_DUMP_DEVICE_STATUS_REGISTERS) {
1090 si_dump_debug_registers(sctx, f);
1091
1092 si_dump_annotated_shaders(sctx, f);
1093 si_dump_command("Active waves (raw data)", "umr -O halt_waves -wa | column -t", f);
1094 si_dump_command("Wave information", "umr -O halt_waves,bits -wa", f);
1095 }
1096 }
1097
1098 void si_log_draw_state(struct si_context *sctx, struct u_log_context *log)
1099 {
1100 struct si_shader_ctx_state *tcs_shader;
1101
1102 if (!log)
1103 return;
1104
1105 tcs_shader = &sctx->tcs_shader;
1106 if (sctx->tes_shader.cso && !sctx->tcs_shader.cso)
1107 tcs_shader = &sctx->fixed_func_tcs_shader;
1108
1109 si_dump_framebuffer(sctx, log);
1110
1111 si_dump_gfx_shader(sctx, &sctx->vs_shader, log);
1112 si_dump_gfx_shader(sctx, tcs_shader, log);
1113 si_dump_gfx_shader(sctx, &sctx->tes_shader, log);
1114 si_dump_gfx_shader(sctx, &sctx->gs_shader, log);
1115 si_dump_gfx_shader(sctx, &sctx->ps_shader, log);
1116
1117 si_dump_descriptor_list(sctx->screen,
1118 &sctx->descriptors[SI_DESCS_RW_BUFFERS],
1119 "", "RW buffers", 4,
1120 sctx->descriptors[SI_DESCS_RW_BUFFERS].num_active_slots,
1121 si_identity, log);
1122 si_dump_gfx_descriptors(sctx, &sctx->vs_shader, log);
1123 si_dump_gfx_descriptors(sctx, tcs_shader, log);
1124 si_dump_gfx_descriptors(sctx, &sctx->tes_shader, log);
1125 si_dump_gfx_descriptors(sctx, &sctx->gs_shader, log);
1126 si_dump_gfx_descriptors(sctx, &sctx->ps_shader, log);
1127 }
1128
1129 void si_log_compute_state(struct si_context *sctx, struct u_log_context *log)
1130 {
1131 if (!log)
1132 return;
1133
1134 si_dump_compute_shader(sctx, log);
1135 si_dump_compute_descriptors(sctx, log);
1136 }
1137
1138 static void si_dump_dma(struct si_context *sctx,
1139 struct radeon_saved_cs *saved, FILE *f)
1140 {
1141 static const char ib_name[] = "sDMA IB";
1142 unsigned i;
1143
1144 si_dump_bo_list(sctx, saved, f);
1145
1146 fprintf(f, "------------------ %s begin ------------------\n", ib_name);
1147
1148 for (i = 0; i < saved->num_dw; ++i) {
1149 fprintf(f, " %08x\n", saved->ib[i]);
1150 }
1151
1152 fprintf(f, "------------------- %s end -------------------\n", ib_name);
1153 fprintf(f, "\n");
1154
1155 fprintf(f, "SDMA Dump Done.\n");
1156 }
1157
1158 void si_check_vm_faults(struct si_context *sctx,
1159 struct radeon_saved_cs *saved, enum ring_type ring)
1160 {
1161 struct pipe_screen *screen = sctx->b.screen;
1162 FILE *f;
1163 uint64_t addr;
1164 char cmd_line[4096];
1165
1166 if (!ac_vm_fault_occured(sctx->chip_class,
1167 &sctx->dmesg_timestamp, &addr))
1168 return;
1169
1170 f = dd_get_debug_file(false);
1171 if (!f)
1172 return;
1173
1174 fprintf(f, "VM fault report.\n\n");
1175 if (os_get_command_line(cmd_line, sizeof(cmd_line)))
1176 fprintf(f, "Command: %s\n", cmd_line);
1177 fprintf(f, "Driver vendor: %s\n", screen->get_vendor(screen));
1178 fprintf(f, "Device vendor: %s\n", screen->get_device_vendor(screen));
1179 fprintf(f, "Device name: %s\n\n", screen->get_name(screen));
1180 fprintf(f, "Failing VM page: 0x%08"PRIx64"\n\n", addr);
1181
1182 if (sctx->apitrace_call_number)
1183 fprintf(f, "Last apitrace call: %u\n\n",
1184 sctx->apitrace_call_number);
1185
1186 switch (ring) {
1187 case RING_GFX: {
1188 struct u_log_context log;
1189 u_log_context_init(&log);
1190
1191 si_log_draw_state(sctx, &log);
1192 si_log_compute_state(sctx, &log);
1193 si_log_cs(sctx, &log, true);
1194
1195 u_log_new_page_print(&log, f);
1196 u_log_context_destroy(&log);
1197 break;
1198 }
1199 case RING_DMA:
1200 si_dump_dma(sctx, saved, f);
1201 break;
1202
1203 default:
1204 break;
1205 }
1206
1207 fclose(f);
1208
1209 fprintf(stderr, "Detected a VM fault, exiting...\n");
1210 exit(0);
1211 }
1212
1213 void si_init_debug_functions(struct si_context *sctx)
1214 {
1215 sctx->b.dump_debug_state = si_dump_debug_state;
1216
1217 /* Set the initial dmesg timestamp for this context, so that
1218 * only new messages will be checked for VM faults.
1219 */
1220 if (sctx->screen->debug_flags & DBG(CHECK_VM))
1221 ac_vm_fault_occured(sctx->chip_class,
1222 &sctx->dmesg_timestamp, NULL);
1223 }