ac: add radeon_info::is_amdgpu instead of checking drm_major == 3
[mesa.git] / src / gallium / drivers / radeonsi / si_debug.c
1 /*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * on the rights to use, copy, modify, merge, publish, distribute, sub
9 * license, and/or sell copies of the Software, and to permit persons to whom
10 * the Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
20 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
21 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
22 * USE OR OTHER DEALINGS IN THE SOFTWARE.
23 */
24
25 #include "si_pipe.h"
26 #include "si_compute.h"
27 #include "sid.h"
28 #include "sid_tables.h"
29 #include "driver_ddebug/dd_util.h"
30 #include "util/u_dump.h"
31 #include "util/u_log.h"
32 #include "util/u_memory.h"
33 #include "util/u_string.h"
34 #include "ac_debug.h"
35 #include "ac_rtld.h"
36
37 static void si_dump_bo_list(struct si_context *sctx,
38 const struct radeon_saved_cs *saved, FILE *f);
39
40 DEBUG_GET_ONCE_OPTION(replace_shaders, "RADEON_REPLACE_SHADERS", NULL)
41
42 /**
43 * Store a linearized copy of all chunks of \p cs together with the buffer
44 * list in \p saved.
45 */
46 void si_save_cs(struct radeon_winsys *ws, struct radeon_cmdbuf *cs,
47 struct radeon_saved_cs *saved, bool get_buffer_list)
48 {
49 uint32_t *buf;
50 unsigned i;
51
52 /* Save the IB chunks. */
53 saved->num_dw = cs->prev_dw + cs->current.cdw;
54 saved->ib = MALLOC(4 * saved->num_dw);
55 if (!saved->ib)
56 goto oom;
57
58 buf = saved->ib;
59 for (i = 0; i < cs->num_prev; ++i) {
60 memcpy(buf, cs->prev[i].buf, cs->prev[i].cdw * 4);
61 buf += cs->prev[i].cdw;
62 }
63 memcpy(buf, cs->current.buf, cs->current.cdw * 4);
64
65 if (!get_buffer_list)
66 return;
67
68 /* Save the buffer list. */
69 saved->bo_count = ws->cs_get_buffer_list(cs, NULL);
70 saved->bo_list = CALLOC(saved->bo_count,
71 sizeof(saved->bo_list[0]));
72 if (!saved->bo_list) {
73 FREE(saved->ib);
74 goto oom;
75 }
76 ws->cs_get_buffer_list(cs, saved->bo_list);
77
78 return;
79
80 oom:
81 fprintf(stderr, "%s: out of memory\n", __func__);
82 memset(saved, 0, sizeof(*saved));
83 }
84
85 void si_clear_saved_cs(struct radeon_saved_cs *saved)
86 {
87 FREE(saved->ib);
88 FREE(saved->bo_list);
89
90 memset(saved, 0, sizeof(*saved));
91 }
92
93 void si_destroy_saved_cs(struct si_saved_cs *scs)
94 {
95 si_clear_saved_cs(&scs->gfx);
96 si_resource_reference(&scs->trace_buf, NULL);
97 free(scs);
98 }
99
100 static void si_dump_shader(struct si_screen *sscreen,
101 enum pipe_shader_type processor,
102 struct si_shader *shader, FILE *f)
103 {
104 if (shader->shader_log)
105 fwrite(shader->shader_log, shader->shader_log_size, 1, f);
106 else
107 si_shader_dump(sscreen, shader, NULL, processor, f, false);
108
109 if (shader->bo && sscreen->options.dump_shader_binary) {
110 unsigned size = shader->bo->b.b.width0;
111 fprintf(f, "BO: VA=%"PRIx64" Size=%u\n", shader->bo->gpu_address, size);
112
113 const char *mapped = sscreen->ws->buffer_map(shader->bo->buf, NULL,
114 PIPE_TRANSFER_UNSYNCHRONIZED |
115 PIPE_TRANSFER_READ |
116 RADEON_TRANSFER_TEMPORARY);
117
118 for (unsigned i = 0; i < size; i += 4) {
119 fprintf(f, " %4x: %08x\n", i, *(uint32_t*)(mapped + i));
120 }
121
122 sscreen->ws->buffer_unmap(shader->bo->buf);
123
124 fprintf(f, "\n");
125 }
126 }
127
128 struct si_log_chunk_shader {
129 /* The shader destroy code assumes a current context for unlinking of
130 * PM4 packets etc.
131 *
132 * While we should be able to destroy shaders without a context, doing
133 * so would happen only very rarely and be therefore likely to fail
134 * just when you're trying to debug something. Let's just remember the
135 * current context in the chunk.
136 */
137 struct si_context *ctx;
138 struct si_shader *shader;
139 enum pipe_shader_type processor;
140
141 /* For keep-alive reference counts */
142 struct si_shader_selector *sel;
143 struct si_compute *program;
144 };
145
146 static void
147 si_log_chunk_shader_destroy(void *data)
148 {
149 struct si_log_chunk_shader *chunk = data;
150 si_shader_selector_reference(chunk->ctx, &chunk->sel, NULL);
151 si_compute_reference(&chunk->program, NULL);
152 FREE(chunk);
153 }
154
155 static void
156 si_log_chunk_shader_print(void *data, FILE *f)
157 {
158 struct si_log_chunk_shader *chunk = data;
159 struct si_screen *sscreen = chunk->ctx->screen;
160 si_dump_shader(sscreen, chunk->processor,
161 chunk->shader, f);
162 }
163
164 static struct u_log_chunk_type si_log_chunk_type_shader = {
165 .destroy = si_log_chunk_shader_destroy,
166 .print = si_log_chunk_shader_print,
167 };
168
169 static void si_dump_gfx_shader(struct si_context *ctx,
170 const struct si_shader_ctx_state *state,
171 struct u_log_context *log)
172 {
173 struct si_shader *current = state->current;
174
175 if (!state->cso || !current)
176 return;
177
178 struct si_log_chunk_shader *chunk = CALLOC_STRUCT(si_log_chunk_shader);
179 chunk->ctx = ctx;
180 chunk->processor = state->cso->info.processor;
181 chunk->shader = current;
182 si_shader_selector_reference(ctx, &chunk->sel, current->selector);
183 u_log_chunk(log, &si_log_chunk_type_shader, chunk);
184 }
185
186 static void si_dump_compute_shader(struct si_context *ctx,
187 struct u_log_context *log)
188 {
189 const struct si_cs_shader_state *state = &ctx->cs_shader_state;
190
191 if (!state->program)
192 return;
193
194 struct si_log_chunk_shader *chunk = CALLOC_STRUCT(si_log_chunk_shader);
195 chunk->ctx = ctx;
196 chunk->processor = PIPE_SHADER_COMPUTE;
197 chunk->shader = &state->program->shader;
198 si_compute_reference(&chunk->program, state->program);
199 u_log_chunk(log, &si_log_chunk_type_shader, chunk);
200 }
201
202 /**
203 * Shader compiles can be overridden with arbitrary ELF objects by setting
204 * the environment variable RADEON_REPLACE_SHADERS=num1:filename1[;num2:filename2]
205 *
206 * TODO: key this off some hash
207 */
208 bool si_replace_shader(unsigned num, struct si_shader_binary *binary)
209 {
210 const char *p = debug_get_option_replace_shaders();
211 const char *semicolon;
212 char *copy = NULL;
213 FILE *f;
214 long filesize, nread;
215 bool replaced = false;
216
217 if (!p)
218 return false;
219
220 while (*p) {
221 unsigned long i;
222 char *endp;
223 i = strtoul(p, &endp, 0);
224
225 p = endp;
226 if (*p != ':') {
227 fprintf(stderr, "RADEON_REPLACE_SHADERS formatted badly.\n");
228 exit(1);
229 }
230 ++p;
231
232 if (i == num)
233 break;
234
235 p = strchr(p, ';');
236 if (!p)
237 return false;
238 ++p;
239 }
240 if (!*p)
241 return false;
242
243 semicolon = strchr(p, ';');
244 if (semicolon) {
245 p = copy = strndup(p, semicolon - p);
246 if (!copy) {
247 fprintf(stderr, "out of memory\n");
248 return false;
249 }
250 }
251
252 fprintf(stderr, "radeonsi: replace shader %u by %s\n", num, p);
253
254 f = fopen(p, "r");
255 if (!f) {
256 perror("radeonsi: failed to open file");
257 goto out_free;
258 }
259
260 if (fseek(f, 0, SEEK_END) != 0)
261 goto file_error;
262
263 filesize = ftell(f);
264 if (filesize < 0)
265 goto file_error;
266
267 if (fseek(f, 0, SEEK_SET) != 0)
268 goto file_error;
269
270 binary->elf_buffer = MALLOC(filesize);
271 if (!binary->elf_buffer) {
272 fprintf(stderr, "out of memory\n");
273 goto out_close;
274 }
275
276 nread = fread((void*)binary->elf_buffer, 1, filesize, f);
277 if (nread != filesize) {
278 FREE((void*)binary->elf_buffer);
279 binary->elf_buffer = NULL;
280 goto file_error;
281 }
282
283 binary->elf_size = nread;
284 replaced = true;
285
286 out_close:
287 fclose(f);
288 out_free:
289 free(copy);
290 return replaced;
291
292 file_error:
293 perror("radeonsi: reading shader");
294 goto out_close;
295 }
296
297 /* Parsed IBs are difficult to read without colors. Use "less -R file" to
298 * read them, or use "aha -b -f file" to convert them to html.
299 */
300 #define COLOR_RESET "\033[0m"
301 #define COLOR_RED "\033[31m"
302 #define COLOR_GREEN "\033[1;32m"
303 #define COLOR_YELLOW "\033[1;33m"
304 #define COLOR_CYAN "\033[1;36m"
305
306 static void si_dump_mmapped_reg(struct si_context *sctx, FILE *f,
307 unsigned offset)
308 {
309 struct radeon_winsys *ws = sctx->ws;
310 uint32_t value;
311
312 if (ws->read_registers(ws, offset, 1, &value))
313 ac_dump_reg(f, sctx->chip_class, offset, value, ~0);
314 }
315
316 static void si_dump_debug_registers(struct si_context *sctx, FILE *f)
317 {
318 if (!sctx->screen->info.has_read_registers_query)
319 return;
320
321 fprintf(f, "Memory-mapped registers:\n");
322 si_dump_mmapped_reg(sctx, f, R_008010_GRBM_STATUS);
323
324 /* No other registers can be read on DRM < 3.1.0. */
325 if (!sctx->screen->info.is_amdgpu ||
326 sctx->screen->info.drm_minor < 1) {
327 fprintf(f, "\n");
328 return;
329 }
330
331 si_dump_mmapped_reg(sctx, f, R_008008_GRBM_STATUS2);
332 si_dump_mmapped_reg(sctx, f, R_008014_GRBM_STATUS_SE0);
333 si_dump_mmapped_reg(sctx, f, R_008018_GRBM_STATUS_SE1);
334 si_dump_mmapped_reg(sctx, f, R_008038_GRBM_STATUS_SE2);
335 si_dump_mmapped_reg(sctx, f, R_00803C_GRBM_STATUS_SE3);
336 si_dump_mmapped_reg(sctx, f, R_00D034_SDMA0_STATUS_REG);
337 si_dump_mmapped_reg(sctx, f, R_00D834_SDMA1_STATUS_REG);
338 if (sctx->chip_class <= GFX8) {
339 si_dump_mmapped_reg(sctx, f, R_000E50_SRBM_STATUS);
340 si_dump_mmapped_reg(sctx, f, R_000E4C_SRBM_STATUS2);
341 si_dump_mmapped_reg(sctx, f, R_000E54_SRBM_STATUS3);
342 }
343 si_dump_mmapped_reg(sctx, f, R_008680_CP_STAT);
344 si_dump_mmapped_reg(sctx, f, R_008674_CP_STALLED_STAT1);
345 si_dump_mmapped_reg(sctx, f, R_008678_CP_STALLED_STAT2);
346 si_dump_mmapped_reg(sctx, f, R_008670_CP_STALLED_STAT3);
347 si_dump_mmapped_reg(sctx, f, R_008210_CP_CPC_STATUS);
348 si_dump_mmapped_reg(sctx, f, R_008214_CP_CPC_BUSY_STAT);
349 si_dump_mmapped_reg(sctx, f, R_008218_CP_CPC_STALLED_STAT1);
350 si_dump_mmapped_reg(sctx, f, R_00821C_CP_CPF_STATUS);
351 si_dump_mmapped_reg(sctx, f, R_008220_CP_CPF_BUSY_STAT);
352 si_dump_mmapped_reg(sctx, f, R_008224_CP_CPF_STALLED_STAT1);
353 fprintf(f, "\n");
354 }
355
356 struct si_log_chunk_cs {
357 struct si_context *ctx;
358 struct si_saved_cs *cs;
359 bool dump_bo_list;
360 unsigned gfx_begin, gfx_end;
361 unsigned compute_begin, compute_end;
362 };
363
364 static void si_log_chunk_type_cs_destroy(void *data)
365 {
366 struct si_log_chunk_cs *chunk = data;
367 si_saved_cs_reference(&chunk->cs, NULL);
368 free(chunk);
369 }
370
371 static void si_parse_current_ib(FILE *f, struct radeon_cmdbuf *cs,
372 unsigned begin, unsigned end,
373 int *last_trace_id, unsigned trace_id_count,
374 const char *name, enum chip_class chip_class)
375 {
376 unsigned orig_end = end;
377
378 assert(begin <= end);
379
380 fprintf(f, "------------------ %s begin (dw = %u) ------------------\n",
381 name, begin);
382
383 for (unsigned prev_idx = 0; prev_idx < cs->num_prev; ++prev_idx) {
384 struct radeon_cmdbuf_chunk *chunk = &cs->prev[prev_idx];
385
386 if (begin < chunk->cdw) {
387 ac_parse_ib_chunk(f, chunk->buf + begin,
388 MIN2(end, chunk->cdw) - begin,
389 last_trace_id, trace_id_count,
390 chip_class, NULL, NULL);
391 }
392
393 if (end <= chunk->cdw)
394 return;
395
396 if (begin < chunk->cdw)
397 fprintf(f, "\n---------- Next %s Chunk ----------\n\n",
398 name);
399
400 begin -= MIN2(begin, chunk->cdw);
401 end -= chunk->cdw;
402 }
403
404 assert(end <= cs->current.cdw);
405
406 ac_parse_ib_chunk(f, cs->current.buf + begin, end - begin, last_trace_id,
407 trace_id_count, chip_class, NULL, NULL);
408
409 fprintf(f, "------------------- %s end (dw = %u) -------------------\n\n",
410 name, orig_end);
411 }
412
413 static void si_log_chunk_type_cs_print(void *data, FILE *f)
414 {
415 struct si_log_chunk_cs *chunk = data;
416 struct si_context *ctx = chunk->ctx;
417 struct si_saved_cs *scs = chunk->cs;
418 int last_trace_id = -1;
419 int last_compute_trace_id = -1;
420
421 /* We are expecting that the ddebug pipe has already
422 * waited for the context, so this buffer should be idle.
423 * If the GPU is hung, there is no point in waiting for it.
424 */
425 uint32_t *map = ctx->ws->buffer_map(scs->trace_buf->buf,
426 NULL,
427 PIPE_TRANSFER_UNSYNCHRONIZED |
428 PIPE_TRANSFER_READ);
429 if (map) {
430 last_trace_id = map[0];
431 last_compute_trace_id = map[1];
432 }
433
434 if (chunk->gfx_end != chunk->gfx_begin) {
435 if (chunk->gfx_begin == 0) {
436 if (ctx->init_config)
437 ac_parse_ib(f, ctx->init_config->pm4, ctx->init_config->ndw,
438 NULL, 0, "IB2: Init config", ctx->chip_class,
439 NULL, NULL);
440
441 if (ctx->init_config_gs_rings)
442 ac_parse_ib(f, ctx->init_config_gs_rings->pm4,
443 ctx->init_config_gs_rings->ndw,
444 NULL, 0, "IB2: Init GS rings", ctx->chip_class,
445 NULL, NULL);
446 }
447
448 if (scs->flushed) {
449 ac_parse_ib(f, scs->gfx.ib + chunk->gfx_begin,
450 chunk->gfx_end - chunk->gfx_begin,
451 &last_trace_id, map ? 1 : 0, "IB", ctx->chip_class,
452 NULL, NULL);
453 } else {
454 si_parse_current_ib(f, ctx->gfx_cs, chunk->gfx_begin,
455 chunk->gfx_end, &last_trace_id, map ? 1 : 0,
456 "IB", ctx->chip_class);
457 }
458 }
459
460 if (chunk->compute_end != chunk->compute_begin) {
461 assert(ctx->prim_discard_compute_cs);
462
463 if (scs->flushed) {
464 ac_parse_ib(f, scs->compute.ib + chunk->compute_begin,
465 chunk->compute_end - chunk->compute_begin,
466 &last_compute_trace_id, map ? 1 : 0, "Compute IB", ctx->chip_class,
467 NULL, NULL);
468 } else {
469 si_parse_current_ib(f, ctx->prim_discard_compute_cs, chunk->compute_begin,
470 chunk->compute_end, &last_compute_trace_id,
471 map ? 1 : 0, "Compute IB", ctx->chip_class);
472 }
473 }
474
475 if (chunk->dump_bo_list) {
476 fprintf(f, "Flushing. Time: ");
477 util_dump_ns(f, scs->time_flush);
478 fprintf(f, "\n\n");
479 si_dump_bo_list(ctx, &scs->gfx, f);
480 }
481 }
482
483 static const struct u_log_chunk_type si_log_chunk_type_cs = {
484 .destroy = si_log_chunk_type_cs_destroy,
485 .print = si_log_chunk_type_cs_print,
486 };
487
488 static void si_log_cs(struct si_context *ctx, struct u_log_context *log,
489 bool dump_bo_list)
490 {
491 assert(ctx->current_saved_cs);
492
493 struct si_saved_cs *scs = ctx->current_saved_cs;
494 unsigned gfx_cur = ctx->gfx_cs->prev_dw + ctx->gfx_cs->current.cdw;
495 unsigned compute_cur = 0;
496
497 if (ctx->prim_discard_compute_cs)
498 compute_cur = ctx->prim_discard_compute_cs->prev_dw + ctx->prim_discard_compute_cs->current.cdw;
499
500 if (!dump_bo_list &&
501 gfx_cur == scs->gfx_last_dw &&
502 compute_cur == scs->compute_last_dw)
503 return;
504
505 struct si_log_chunk_cs *chunk = calloc(1, sizeof(*chunk));
506
507 chunk->ctx = ctx;
508 si_saved_cs_reference(&chunk->cs, scs);
509 chunk->dump_bo_list = dump_bo_list;
510
511 chunk->gfx_begin = scs->gfx_last_dw;
512 chunk->gfx_end = gfx_cur;
513 scs->gfx_last_dw = gfx_cur;
514
515 chunk->compute_begin = scs->compute_last_dw;
516 chunk->compute_end = compute_cur;
517 scs->compute_last_dw = compute_cur;
518
519 u_log_chunk(log, &si_log_chunk_type_cs, chunk);
520 }
521
522 void si_auto_log_cs(void *data, struct u_log_context *log)
523 {
524 struct si_context *ctx = (struct si_context *)data;
525 si_log_cs(ctx, log, false);
526 }
527
528 void si_log_hw_flush(struct si_context *sctx)
529 {
530 if (!sctx->log)
531 return;
532
533 si_log_cs(sctx, sctx->log, true);
534
535 if (&sctx->b == sctx->screen->aux_context) {
536 /* The aux context isn't captured by the ddebug wrapper,
537 * so we dump it on a flush-by-flush basis here.
538 */
539 FILE *f = dd_get_debug_file(false);
540 if (!f) {
541 fprintf(stderr, "radeonsi: error opening aux context dump file.\n");
542 } else {
543 dd_write_header(f, &sctx->screen->b, 0);
544
545 fprintf(f, "Aux context dump:\n\n");
546 u_log_new_page_print(sctx->log, f);
547
548 fclose(f);
549 }
550 }
551 }
552
553 static const char *priority_to_string(enum radeon_bo_priority priority)
554 {
555 #define ITEM(x) [RADEON_PRIO_##x] = #x
556 static const char *table[64] = {
557 ITEM(FENCE),
558 ITEM(TRACE),
559 ITEM(SO_FILLED_SIZE),
560 ITEM(QUERY),
561 ITEM(IB1),
562 ITEM(IB2),
563 ITEM(DRAW_INDIRECT),
564 ITEM(INDEX_BUFFER),
565 ITEM(CP_DMA),
566 ITEM(CONST_BUFFER),
567 ITEM(DESCRIPTORS),
568 ITEM(BORDER_COLORS),
569 ITEM(SAMPLER_BUFFER),
570 ITEM(VERTEX_BUFFER),
571 ITEM(SHADER_RW_BUFFER),
572 ITEM(COMPUTE_GLOBAL),
573 ITEM(SAMPLER_TEXTURE),
574 ITEM(SHADER_RW_IMAGE),
575 ITEM(SAMPLER_TEXTURE_MSAA),
576 ITEM(COLOR_BUFFER),
577 ITEM(DEPTH_BUFFER),
578 ITEM(COLOR_BUFFER_MSAA),
579 ITEM(DEPTH_BUFFER_MSAA),
580 ITEM(SEPARATE_META),
581 ITEM(SHADER_BINARY),
582 ITEM(SHADER_RINGS),
583 ITEM(SCRATCH_BUFFER),
584 };
585 #undef ITEM
586
587 assert(priority < ARRAY_SIZE(table));
588 return table[priority];
589 }
590
591 static int bo_list_compare_va(const struct radeon_bo_list_item *a,
592 const struct radeon_bo_list_item *b)
593 {
594 return a->vm_address < b->vm_address ? -1 :
595 a->vm_address > b->vm_address ? 1 : 0;
596 }
597
598 static void si_dump_bo_list(struct si_context *sctx,
599 const struct radeon_saved_cs *saved, FILE *f)
600 {
601 unsigned i,j;
602
603 if (!saved->bo_list)
604 return;
605
606 /* Sort the list according to VM adddresses first. */
607 qsort(saved->bo_list, saved->bo_count,
608 sizeof(saved->bo_list[0]), (void*)bo_list_compare_va);
609
610 fprintf(f, "Buffer list (in units of pages = 4kB):\n"
611 COLOR_YELLOW " Size VM start page "
612 "VM end page Usage" COLOR_RESET "\n");
613
614 for (i = 0; i < saved->bo_count; i++) {
615 /* Note: Buffer sizes are expected to be aligned to 4k by the winsys. */
616 const unsigned page_size = sctx->screen->info.gart_page_size;
617 uint64_t va = saved->bo_list[i].vm_address;
618 uint64_t size = saved->bo_list[i].bo_size;
619 bool hit = false;
620
621 /* If there's unused virtual memory between 2 buffers, print it. */
622 if (i) {
623 uint64_t previous_va_end = saved->bo_list[i-1].vm_address +
624 saved->bo_list[i-1].bo_size;
625
626 if (va > previous_va_end) {
627 fprintf(f, " %10"PRIu64" -- hole --\n",
628 (va - previous_va_end) / page_size);
629 }
630 }
631
632 /* Print the buffer. */
633 fprintf(f, " %10"PRIu64" 0x%013"PRIX64" 0x%013"PRIX64" ",
634 size / page_size, va / page_size, (va + size) / page_size);
635
636 /* Print the usage. */
637 for (j = 0; j < 32; j++) {
638 if (!(saved->bo_list[i].priority_usage & (1u << j)))
639 continue;
640
641 fprintf(f, "%s%s", !hit ? "" : ", ", priority_to_string(j));
642 hit = true;
643 }
644 fprintf(f, "\n");
645 }
646 fprintf(f, "\nNote: The holes represent memory not used by the IB.\n"
647 " Other buffers can still be allocated there.\n\n");
648 }
649
650 static void si_dump_framebuffer(struct si_context *sctx, struct u_log_context *log)
651 {
652 struct pipe_framebuffer_state *state = &sctx->framebuffer.state;
653 struct si_texture *tex;
654 int i;
655
656 for (i = 0; i < state->nr_cbufs; i++) {
657 if (!state->cbufs[i])
658 continue;
659
660 tex = (struct si_texture*)state->cbufs[i]->texture;
661 u_log_printf(log, COLOR_YELLOW "Color buffer %i:" COLOR_RESET "\n", i);
662 si_print_texture_info(sctx->screen, tex, log);
663 u_log_printf(log, "\n");
664 }
665
666 if (state->zsbuf) {
667 tex = (struct si_texture*)state->zsbuf->texture;
668 u_log_printf(log, COLOR_YELLOW "Depth-stencil buffer:" COLOR_RESET "\n");
669 si_print_texture_info(sctx->screen, tex, log);
670 u_log_printf(log, "\n");
671 }
672 }
673
674 typedef unsigned (*slot_remap_func)(unsigned);
675
676 struct si_log_chunk_desc_list {
677 /** Pointer to memory map of buffer where the list is uploader */
678 uint32_t *gpu_list;
679 /** Reference of buffer where the list is uploaded, so that gpu_list
680 * is kept live. */
681 struct si_resource *buf;
682
683 const char *shader_name;
684 const char *elem_name;
685 slot_remap_func slot_remap;
686 enum chip_class chip_class;
687 unsigned element_dw_size;
688 unsigned num_elements;
689
690 uint32_t list[0];
691 };
692
693 static void
694 si_log_chunk_desc_list_destroy(void *data)
695 {
696 struct si_log_chunk_desc_list *chunk = data;
697 si_resource_reference(&chunk->buf, NULL);
698 FREE(chunk);
699 }
700
701 static void
702 si_log_chunk_desc_list_print(void *data, FILE *f)
703 {
704 struct si_log_chunk_desc_list *chunk = data;
705
706 for (unsigned i = 0; i < chunk->num_elements; i++) {
707 unsigned cpu_dw_offset = i * chunk->element_dw_size;
708 unsigned gpu_dw_offset = chunk->slot_remap(i) * chunk->element_dw_size;
709 const char *list_note = chunk->gpu_list ? "GPU list" : "CPU list";
710 uint32_t *cpu_list = chunk->list + cpu_dw_offset;
711 uint32_t *gpu_list = chunk->gpu_list ? chunk->gpu_list + gpu_dw_offset : cpu_list;
712
713 fprintf(f, COLOR_GREEN "%s%s slot %u (%s):" COLOR_RESET "\n",
714 chunk->shader_name, chunk->elem_name, i, list_note);
715
716 switch (chunk->element_dw_size) {
717 case 4:
718 for (unsigned j = 0; j < 4; j++)
719 ac_dump_reg(f, chunk->chip_class,
720 R_008F00_SQ_BUF_RSRC_WORD0 + j*4,
721 gpu_list[j], 0xffffffff);
722 break;
723 case 8:
724 for (unsigned j = 0; j < 8; j++)
725 ac_dump_reg(f, chunk->chip_class,
726 R_008F10_SQ_IMG_RSRC_WORD0 + j*4,
727 gpu_list[j], 0xffffffff);
728
729 fprintf(f, COLOR_CYAN " Buffer:" COLOR_RESET "\n");
730 for (unsigned j = 0; j < 4; j++)
731 ac_dump_reg(f, chunk->chip_class,
732 R_008F00_SQ_BUF_RSRC_WORD0 + j*4,
733 gpu_list[4+j], 0xffffffff);
734 break;
735 case 16:
736 for (unsigned j = 0; j < 8; j++)
737 ac_dump_reg(f, chunk->chip_class,
738 R_008F10_SQ_IMG_RSRC_WORD0 + j*4,
739 gpu_list[j], 0xffffffff);
740
741 fprintf(f, COLOR_CYAN " Buffer:" COLOR_RESET "\n");
742 for (unsigned j = 0; j < 4; j++)
743 ac_dump_reg(f, chunk->chip_class,
744 R_008F00_SQ_BUF_RSRC_WORD0 + j*4,
745 gpu_list[4+j], 0xffffffff);
746
747 fprintf(f, COLOR_CYAN " FMASK:" COLOR_RESET "\n");
748 for (unsigned j = 0; j < 8; j++)
749 ac_dump_reg(f, chunk->chip_class,
750 R_008F10_SQ_IMG_RSRC_WORD0 + j*4,
751 gpu_list[8+j], 0xffffffff);
752
753 fprintf(f, COLOR_CYAN " Sampler state:" COLOR_RESET "\n");
754 for (unsigned j = 0; j < 4; j++)
755 ac_dump_reg(f, chunk->chip_class,
756 R_008F30_SQ_IMG_SAMP_WORD0 + j*4,
757 gpu_list[12+j], 0xffffffff);
758 break;
759 }
760
761 if (memcmp(gpu_list, cpu_list, chunk->element_dw_size * 4) != 0) {
762 fprintf(f, COLOR_RED "!!!!! This slot was corrupted in GPU memory !!!!!"
763 COLOR_RESET "\n");
764 }
765
766 fprintf(f, "\n");
767 }
768
769 }
770
771 static const struct u_log_chunk_type si_log_chunk_type_descriptor_list = {
772 .destroy = si_log_chunk_desc_list_destroy,
773 .print = si_log_chunk_desc_list_print,
774 };
775
776 static void si_dump_descriptor_list(struct si_screen *screen,
777 struct si_descriptors *desc,
778 const char *shader_name,
779 const char *elem_name,
780 unsigned element_dw_size,
781 unsigned num_elements,
782 slot_remap_func slot_remap,
783 struct u_log_context *log)
784 {
785 if (!desc->list)
786 return;
787
788 /* In some cases, the caller doesn't know how many elements are really
789 * uploaded. Reduce num_elements to fit in the range of active slots. */
790 unsigned active_range_dw_begin =
791 desc->first_active_slot * desc->element_dw_size;
792 unsigned active_range_dw_end =
793 active_range_dw_begin + desc->num_active_slots * desc->element_dw_size;
794
795 while (num_elements > 0) {
796 int i = slot_remap(num_elements - 1);
797 unsigned dw_begin = i * element_dw_size;
798 unsigned dw_end = dw_begin + element_dw_size;
799
800 if (dw_begin >= active_range_dw_begin && dw_end <= active_range_dw_end)
801 break;
802
803 num_elements--;
804 }
805
806 struct si_log_chunk_desc_list *chunk =
807 CALLOC_VARIANT_LENGTH_STRUCT(si_log_chunk_desc_list,
808 4 * element_dw_size * num_elements);
809 chunk->shader_name = shader_name;
810 chunk->elem_name = elem_name;
811 chunk->element_dw_size = element_dw_size;
812 chunk->num_elements = num_elements;
813 chunk->slot_remap = slot_remap;
814 chunk->chip_class = screen->info.chip_class;
815
816 si_resource_reference(&chunk->buf, desc->buffer);
817 chunk->gpu_list = desc->gpu_list;
818
819 for (unsigned i = 0; i < num_elements; ++i) {
820 memcpy(&chunk->list[i * element_dw_size],
821 &desc->list[slot_remap(i) * element_dw_size],
822 4 * element_dw_size);
823 }
824
825 u_log_chunk(log, &si_log_chunk_type_descriptor_list, chunk);
826 }
827
828 static unsigned si_identity(unsigned slot)
829 {
830 return slot;
831 }
832
833 static void si_dump_descriptors(struct si_context *sctx,
834 enum pipe_shader_type processor,
835 const struct tgsi_shader_info *info,
836 struct u_log_context *log)
837 {
838 struct si_descriptors *descs =
839 &sctx->descriptors[SI_DESCS_FIRST_SHADER +
840 processor * SI_NUM_SHADER_DESCS];
841 static const char *shader_name[] = {"VS", "PS", "GS", "TCS", "TES", "CS"};
842 const char *name = shader_name[processor];
843 unsigned enabled_constbuf, enabled_shaderbuf, enabled_samplers;
844 unsigned enabled_images;
845
846 if (info) {
847 enabled_constbuf = info->const_buffers_declared;
848 enabled_shaderbuf = info->shader_buffers_declared;
849 enabled_samplers = info->samplers_declared;
850 enabled_images = info->images_declared;
851 } else {
852 enabled_constbuf = sctx->const_and_shader_buffers[processor].enabled_mask >>
853 SI_NUM_SHADER_BUFFERS;
854 enabled_shaderbuf = sctx->const_and_shader_buffers[processor].enabled_mask &
855 u_bit_consecutive(0, SI_NUM_SHADER_BUFFERS);
856 enabled_shaderbuf = util_bitreverse(enabled_shaderbuf) >>
857 (32 - SI_NUM_SHADER_BUFFERS);
858 enabled_samplers = sctx->samplers[processor].enabled_mask;
859 enabled_images = sctx->images[processor].enabled_mask;
860 }
861
862 if (processor == PIPE_SHADER_VERTEX &&
863 sctx->vb_descriptors_buffer &&
864 sctx->vb_descriptors_gpu_list &&
865 sctx->vertex_elements) {
866 assert(info); /* only CS may not have an info struct */
867 struct si_descriptors desc = {};
868
869 desc.buffer = sctx->vb_descriptors_buffer;
870 desc.list = sctx->vb_descriptors_gpu_list;
871 desc.gpu_list = sctx->vb_descriptors_gpu_list;
872 desc.element_dw_size = 4;
873 desc.num_active_slots = sctx->vertex_elements->desc_list_byte_size / 16;
874
875 si_dump_descriptor_list(sctx->screen, &desc, name,
876 " - Vertex buffer", 4, info->num_inputs,
877 si_identity, log);
878 }
879
880 si_dump_descriptor_list(sctx->screen,
881 &descs[SI_SHADER_DESCS_CONST_AND_SHADER_BUFFERS],
882 name, " - Constant buffer", 4,
883 util_last_bit(enabled_constbuf),
884 si_get_constbuf_slot, log);
885 si_dump_descriptor_list(sctx->screen,
886 &descs[SI_SHADER_DESCS_CONST_AND_SHADER_BUFFERS],
887 name, " - Shader buffer", 4,
888 util_last_bit(enabled_shaderbuf),
889 si_get_shaderbuf_slot, log);
890 si_dump_descriptor_list(sctx->screen,
891 &descs[SI_SHADER_DESCS_SAMPLERS_AND_IMAGES],
892 name, " - Sampler", 16,
893 util_last_bit(enabled_samplers),
894 si_get_sampler_slot, log);
895 si_dump_descriptor_list(sctx->screen,
896 &descs[SI_SHADER_DESCS_SAMPLERS_AND_IMAGES],
897 name, " - Image", 8,
898 util_last_bit(enabled_images),
899 si_get_image_slot, log);
900 }
901
902 static void si_dump_gfx_descriptors(struct si_context *sctx,
903 const struct si_shader_ctx_state *state,
904 struct u_log_context *log)
905 {
906 if (!state->cso || !state->current)
907 return;
908
909 si_dump_descriptors(sctx, state->cso->type, &state->cso->info, log);
910 }
911
912 static void si_dump_compute_descriptors(struct si_context *sctx,
913 struct u_log_context *log)
914 {
915 if (!sctx->cs_shader_state.program)
916 return;
917
918 si_dump_descriptors(sctx, PIPE_SHADER_COMPUTE, NULL, log);
919 }
920
921 struct si_shader_inst {
922 const char *text; /* start of disassembly for this instruction */
923 unsigned textlen;
924 unsigned size; /* instruction size = 4 or 8 */
925 uint64_t addr; /* instruction address */
926 };
927
928 /**
929 * Open the given \p binary as \p rtld_binary and split the contained
930 * disassembly string into instructions and add them to the array
931 * pointed to by \p instructions, which must be sufficiently large.
932 *
933 * Labels are considered to be part of the following instruction.
934 *
935 * The caller must keep \p rtld_binary alive as long as \p instructions are
936 * used and then close it afterwards.
937 */
938 static void si_add_split_disasm(struct si_screen *screen,
939 struct ac_rtld_binary *rtld_binary,
940 struct si_shader_binary *binary,
941 uint64_t *addr,
942 unsigned *num,
943 struct si_shader_inst *instructions)
944 {
945 if (!ac_rtld_open(rtld_binary, (struct ac_rtld_open_info){
946 .info = &screen->info,
947 .num_parts = 1,
948 .elf_ptrs = &binary->elf_buffer,
949 .elf_sizes = &binary->elf_size }))
950 return;
951
952 const char *disasm;
953 size_t nbytes;
954 if (!ac_rtld_get_section_by_name(rtld_binary, ".AMDGPU.disasm",
955 &disasm, &nbytes))
956 return;
957
958 const char *end = disasm + nbytes;
959 while (disasm < end) {
960 const char *semicolon = memchr(disasm, ';', end - disasm);
961 if (!semicolon)
962 break;
963
964 struct si_shader_inst *inst = &instructions[(*num)++];
965 const char *inst_end = memchr(semicolon + 1, '\n', end - semicolon - 1);
966 if (!inst_end)
967 inst_end = end;
968
969 inst->text = disasm;
970 inst->textlen = inst_end - disasm;
971
972 inst->addr = *addr;
973 /* More than 16 chars after ";" means the instruction is 8 bytes long. */
974 inst->size = inst_end - semicolon > 16 ? 8 : 4;
975 *addr += inst->size;
976
977 if (inst_end == end)
978 break;
979 disasm = inst_end + 1;
980 }
981 }
982
983 /* If the shader is being executed, print its asm instructions, and annotate
984 * those that are being executed right now with information about waves that
985 * execute them. This is most useful during a GPU hang.
986 */
987 static void si_print_annotated_shader(struct si_shader *shader,
988 struct ac_wave_info *waves,
989 unsigned num_waves,
990 FILE *f)
991 {
992 if (!shader)
993 return;
994
995 struct si_screen *screen = shader->selector->screen;
996 uint64_t start_addr = shader->bo->gpu_address;
997 uint64_t end_addr = start_addr + shader->bo->b.b.width0;
998 unsigned i;
999
1000 /* See if any wave executes the shader. */
1001 for (i = 0; i < num_waves; i++) {
1002 if (start_addr <= waves[i].pc && waves[i].pc <= end_addr)
1003 break;
1004 }
1005 if (i == num_waves)
1006 return; /* the shader is not being executed */
1007
1008 /* Remember the first found wave. The waves are sorted according to PC. */
1009 waves = &waves[i];
1010 num_waves -= i;
1011
1012 /* Get the list of instructions.
1013 * Buffer size / 4 is the upper bound of the instruction count.
1014 */
1015 unsigned num_inst = 0;
1016 uint64_t inst_addr = start_addr;
1017 struct ac_rtld_binary rtld_binaries[5] = {};
1018 struct si_shader_inst *instructions =
1019 calloc(shader->bo->b.b.width0 / 4, sizeof(struct si_shader_inst));
1020
1021 if (shader->prolog) {
1022 si_add_split_disasm(screen, &rtld_binaries[0], &shader->prolog->binary,
1023 &inst_addr, &num_inst, instructions);
1024 }
1025 if (shader->previous_stage) {
1026 si_add_split_disasm(screen, &rtld_binaries[1], &shader->previous_stage->binary,
1027 &inst_addr, &num_inst, instructions);
1028 }
1029 if (shader->prolog2) {
1030 si_add_split_disasm(screen, &rtld_binaries[2], &shader->prolog2->binary,
1031 &inst_addr, &num_inst, instructions);
1032 }
1033 si_add_split_disasm(screen, &rtld_binaries[3], &shader->binary,
1034 &inst_addr, &num_inst, instructions);
1035 if (shader->epilog) {
1036 si_add_split_disasm(screen, &rtld_binaries[4], &shader->epilog->binary,
1037 &inst_addr, &num_inst, instructions);
1038 }
1039
1040 fprintf(f, COLOR_YELLOW "%s - annotated disassembly:" COLOR_RESET "\n",
1041 si_get_shader_name(shader, shader->selector->type));
1042
1043 /* Print instructions with annotations. */
1044 for (i = 0; i < num_inst; i++) {
1045 struct si_shader_inst *inst = &instructions[i];
1046
1047 fprintf(f, "%.*s [PC=0x%"PRIx64", size=%u]\n",
1048 inst->textlen, inst->text, inst->addr, inst->size);
1049
1050 /* Print which waves execute the instruction right now. */
1051 while (num_waves && inst->addr == waves->pc) {
1052 fprintf(f,
1053 " " COLOR_GREEN "^ SE%u SH%u CU%u "
1054 "SIMD%u WAVE%u EXEC=%016"PRIx64 " ",
1055 waves->se, waves->sh, waves->cu, waves->simd,
1056 waves->wave, waves->exec);
1057
1058 if (inst->size == 4) {
1059 fprintf(f, "INST32=%08X" COLOR_RESET "\n",
1060 waves->inst_dw0);
1061 } else {
1062 fprintf(f, "INST64=%08X %08X" COLOR_RESET "\n",
1063 waves->inst_dw0, waves->inst_dw1);
1064 }
1065
1066 waves->matched = true;
1067 waves = &waves[1];
1068 num_waves--;
1069 }
1070 }
1071
1072 fprintf(f, "\n\n");
1073 free(instructions);
1074 for (unsigned i = 0; i < ARRAY_SIZE(rtld_binaries); ++i)
1075 ac_rtld_close(&rtld_binaries[i]);
1076 }
1077
1078 static void si_dump_annotated_shaders(struct si_context *sctx, FILE *f)
1079 {
1080 struct ac_wave_info waves[AC_MAX_WAVES_PER_CHIP];
1081 unsigned num_waves = ac_get_wave_info(waves);
1082
1083 fprintf(f, COLOR_CYAN "The number of active waves = %u" COLOR_RESET
1084 "\n\n", num_waves);
1085
1086 si_print_annotated_shader(sctx->vs_shader.current, waves, num_waves, f);
1087 si_print_annotated_shader(sctx->tcs_shader.current, waves, num_waves, f);
1088 si_print_annotated_shader(sctx->tes_shader.current, waves, num_waves, f);
1089 si_print_annotated_shader(sctx->gs_shader.current, waves, num_waves, f);
1090 si_print_annotated_shader(sctx->ps_shader.current, waves, num_waves, f);
1091
1092 /* Print waves executing shaders that are not currently bound. */
1093 unsigned i;
1094 bool found = false;
1095 for (i = 0; i < num_waves; i++) {
1096 if (waves[i].matched)
1097 continue;
1098
1099 if (!found) {
1100 fprintf(f, COLOR_CYAN
1101 "Waves not executing currently-bound shaders:"
1102 COLOR_RESET "\n");
1103 found = true;
1104 }
1105 fprintf(f, " SE%u SH%u CU%u SIMD%u WAVE%u EXEC=%016"PRIx64
1106 " INST=%08X %08X PC=%"PRIx64"\n",
1107 waves[i].se, waves[i].sh, waves[i].cu, waves[i].simd,
1108 waves[i].wave, waves[i].exec, waves[i].inst_dw0,
1109 waves[i].inst_dw1, waves[i].pc);
1110 }
1111 if (found)
1112 fprintf(f, "\n\n");
1113 }
1114
1115 static void si_dump_command(const char *title, const char *command, FILE *f)
1116 {
1117 char line[2000];
1118
1119 FILE *p = popen(command, "r");
1120 if (!p)
1121 return;
1122
1123 fprintf(f, COLOR_YELLOW "%s: " COLOR_RESET "\n", title);
1124 while (fgets(line, sizeof(line), p))
1125 fputs(line, f);
1126 fprintf(f, "\n\n");
1127 pclose(p);
1128 }
1129
1130 static void si_dump_debug_state(struct pipe_context *ctx, FILE *f,
1131 unsigned flags)
1132 {
1133 struct si_context *sctx = (struct si_context*)ctx;
1134
1135 if (sctx->log)
1136 u_log_flush(sctx->log);
1137
1138 if (flags & PIPE_DUMP_DEVICE_STATUS_REGISTERS) {
1139 si_dump_debug_registers(sctx, f);
1140
1141 si_dump_annotated_shaders(sctx, f);
1142 si_dump_command("Active waves (raw data)", "umr -O halt_waves -wa | column -t", f);
1143 si_dump_command("Wave information", "umr -O halt_waves,bits -wa", f);
1144 }
1145 }
1146
1147 void si_log_draw_state(struct si_context *sctx, struct u_log_context *log)
1148 {
1149 struct si_shader_ctx_state *tcs_shader;
1150
1151 if (!log)
1152 return;
1153
1154 tcs_shader = &sctx->tcs_shader;
1155 if (sctx->tes_shader.cso && !sctx->tcs_shader.cso)
1156 tcs_shader = &sctx->fixed_func_tcs_shader;
1157
1158 si_dump_framebuffer(sctx, log);
1159
1160 si_dump_gfx_shader(sctx, &sctx->vs_shader, log);
1161 si_dump_gfx_shader(sctx, tcs_shader, log);
1162 si_dump_gfx_shader(sctx, &sctx->tes_shader, log);
1163 si_dump_gfx_shader(sctx, &sctx->gs_shader, log);
1164 si_dump_gfx_shader(sctx, &sctx->ps_shader, log);
1165
1166 si_dump_descriptor_list(sctx->screen,
1167 &sctx->descriptors[SI_DESCS_RW_BUFFERS],
1168 "", "RW buffers", 4,
1169 sctx->descriptors[SI_DESCS_RW_BUFFERS].num_active_slots,
1170 si_identity, log);
1171 si_dump_gfx_descriptors(sctx, &sctx->vs_shader, log);
1172 si_dump_gfx_descriptors(sctx, tcs_shader, log);
1173 si_dump_gfx_descriptors(sctx, &sctx->tes_shader, log);
1174 si_dump_gfx_descriptors(sctx, &sctx->gs_shader, log);
1175 si_dump_gfx_descriptors(sctx, &sctx->ps_shader, log);
1176 }
1177
1178 void si_log_compute_state(struct si_context *sctx, struct u_log_context *log)
1179 {
1180 if (!log)
1181 return;
1182
1183 si_dump_compute_shader(sctx, log);
1184 si_dump_compute_descriptors(sctx, log);
1185 }
1186
1187 static void si_dump_dma(struct si_context *sctx,
1188 struct radeon_saved_cs *saved, FILE *f)
1189 {
1190 static const char ib_name[] = "sDMA IB";
1191 unsigned i;
1192
1193 si_dump_bo_list(sctx, saved, f);
1194
1195 fprintf(f, "------------------ %s begin ------------------\n", ib_name);
1196
1197 for (i = 0; i < saved->num_dw; ++i) {
1198 fprintf(f, " %08x\n", saved->ib[i]);
1199 }
1200
1201 fprintf(f, "------------------- %s end -------------------\n", ib_name);
1202 fprintf(f, "\n");
1203
1204 fprintf(f, "SDMA Dump Done.\n");
1205 }
1206
1207 void si_check_vm_faults(struct si_context *sctx,
1208 struct radeon_saved_cs *saved, enum ring_type ring)
1209 {
1210 struct pipe_screen *screen = sctx->b.screen;
1211 FILE *f;
1212 uint64_t addr;
1213 char cmd_line[4096];
1214
1215 if (!ac_vm_fault_occured(sctx->chip_class,
1216 &sctx->dmesg_timestamp, &addr))
1217 return;
1218
1219 f = dd_get_debug_file(false);
1220 if (!f)
1221 return;
1222
1223 fprintf(f, "VM fault report.\n\n");
1224 if (os_get_command_line(cmd_line, sizeof(cmd_line)))
1225 fprintf(f, "Command: %s\n", cmd_line);
1226 fprintf(f, "Driver vendor: %s\n", screen->get_vendor(screen));
1227 fprintf(f, "Device vendor: %s\n", screen->get_device_vendor(screen));
1228 fprintf(f, "Device name: %s\n\n", screen->get_name(screen));
1229 fprintf(f, "Failing VM page: 0x%08"PRIx64"\n\n", addr);
1230
1231 if (sctx->apitrace_call_number)
1232 fprintf(f, "Last apitrace call: %u\n\n",
1233 sctx->apitrace_call_number);
1234
1235 switch (ring) {
1236 case RING_GFX: {
1237 struct u_log_context log;
1238 u_log_context_init(&log);
1239
1240 si_log_draw_state(sctx, &log);
1241 si_log_compute_state(sctx, &log);
1242 si_log_cs(sctx, &log, true);
1243
1244 u_log_new_page_print(&log, f);
1245 u_log_context_destroy(&log);
1246 break;
1247 }
1248 case RING_DMA:
1249 si_dump_dma(sctx, saved, f);
1250 break;
1251
1252 default:
1253 break;
1254 }
1255
1256 fclose(f);
1257
1258 fprintf(stderr, "Detected a VM fault, exiting...\n");
1259 exit(0);
1260 }
1261
1262 void si_init_debug_functions(struct si_context *sctx)
1263 {
1264 sctx->b.dump_debug_state = si_dump_debug_state;
1265
1266 /* Set the initial dmesg timestamp for this context, so that
1267 * only new messages will be checked for VM faults.
1268 */
1269 if (sctx->screen->debug_flags & DBG(CHECK_VM))
1270 ac_vm_fault_occured(sctx->chip_class,
1271 &sctx->dmesg_timestamp, NULL);
1272 }