radeonsi: move gfx fence wait out of si_check_vm_faults
[mesa.git] / src / gallium / drivers / radeonsi / si_debug.c
1 /*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
22 *
23 * Authors:
24 * Marek Olšák <maraeo@gmail.com>
25 */
26
27 #include "si_pipe.h"
28 #include "si_shader.h"
29 #include "sid.h"
30 #include "sid_tables.h"
31 #include "radeon/radeon_elf_util.h"
32 #include "ddebug/dd_util.h"
33 #include "util/u_memory.h"
34
35 DEBUG_GET_ONCE_OPTION(replace_shaders, "RADEON_REPLACE_SHADERS", NULL)
36
37 static void si_dump_shader(struct si_screen *sscreen,
38 struct si_shader_ctx_state *state, FILE *f)
39 {
40 if (!state->cso || !state->current)
41 return;
42
43 si_dump_shader_key(state->cso->type, &state->current->key, f);
44 si_shader_dump(sscreen, state->current, NULL,
45 state->cso->info.processor, f);
46 }
47
48 /**
49 * Shader compiles can be overridden with arbitrary ELF objects by setting
50 * the environment variable RADEON_REPLACE_SHADERS=num1:filename1[;num2:filename2]
51 */
52 bool si_replace_shader(unsigned num, struct radeon_shader_binary *binary)
53 {
54 const char *p = debug_get_option_replace_shaders();
55 const char *semicolon;
56 char *copy = NULL;
57 FILE *f;
58 long filesize, nread;
59 char *buf = NULL;
60 bool replaced = false;
61
62 if (!p)
63 return false;
64
65 while (*p) {
66 unsigned long i;
67 char *endp;
68 i = strtoul(p, &endp, 0);
69
70 p = endp;
71 if (*p != ':') {
72 fprintf(stderr, "RADEON_REPLACE_SHADERS formatted badly.\n");
73 exit(1);
74 }
75 ++p;
76
77 if (i == num)
78 break;
79
80 p = strchr(p, ';');
81 if (!p)
82 return false;
83 ++p;
84 }
85 if (!*p)
86 return false;
87
88 semicolon = strchr(p, ';');
89 if (semicolon) {
90 p = copy = strndup(p, semicolon - p);
91 if (!copy) {
92 fprintf(stderr, "out of memory\n");
93 return false;
94 }
95 }
96
97 fprintf(stderr, "radeonsi: replace shader %u by %s\n", num, p);
98
99 f = fopen(p, "r");
100 if (!f) {
101 perror("radeonsi: failed to open file");
102 goto out_free;
103 }
104
105 if (fseek(f, 0, SEEK_END) != 0)
106 goto file_error;
107
108 filesize = ftell(f);
109 if (filesize < 0)
110 goto file_error;
111
112 if (fseek(f, 0, SEEK_SET) != 0)
113 goto file_error;
114
115 buf = MALLOC(filesize);
116 if (!buf) {
117 fprintf(stderr, "out of memory\n");
118 goto out_close;
119 }
120
121 nread = fread(buf, 1, filesize, f);
122 if (nread != filesize)
123 goto file_error;
124
125 radeon_elf_read(buf, filesize, binary);
126 replaced = true;
127
128 out_close:
129 fclose(f);
130 out_free:
131 FREE(buf);
132 free(copy);
133 return replaced;
134
135 file_error:
136 perror("radeonsi: reading shader");
137 goto out_close;
138 }
139
140 /* Parsed IBs are difficult to read without colors. Use "less -R file" to
141 * read them, or use "aha -b -f file" to convert them to html.
142 */
143 #define COLOR_RESET "\033[0m"
144 #define COLOR_RED "\033[31m"
145 #define COLOR_GREEN "\033[1;32m"
146 #define COLOR_YELLOW "\033[1;33m"
147 #define COLOR_CYAN "\033[1;36m"
148
149 #define INDENT_PKT 8
150
151 static void print_spaces(FILE *f, unsigned num)
152 {
153 fprintf(f, "%*s", num, "");
154 }
155
156 static void print_value(FILE *file, uint32_t value, int bits)
157 {
158 /* Guess if it's int or float */
159 if (value <= (1 << 15)) {
160 if (value <= 9)
161 fprintf(file, "%u\n", value);
162 else
163 fprintf(file, "%u (0x%0*x)\n", value, bits / 4, value);
164 } else {
165 float f = uif(value);
166
167 if (fabs(f) < 100000 && f*10 == floor(f*10))
168 fprintf(file, "%.1ff (0x%0*x)\n", f, bits / 4, value);
169 else
170 /* Don't print more leading zeros than there are bits. */
171 fprintf(file, "0x%0*x\n", bits / 4, value);
172 }
173 }
174
175 static void print_named_value(FILE *file, const char *name, uint32_t value,
176 int bits)
177 {
178 print_spaces(file, INDENT_PKT);
179 fprintf(file, COLOR_YELLOW "%s" COLOR_RESET " <- ", name);
180 print_value(file, value, bits);
181 }
182
183 static void si_dump_reg(FILE *file, unsigned offset, uint32_t value,
184 uint32_t field_mask)
185 {
186 int r, f;
187
188 for (r = 0; r < ARRAY_SIZE(sid_reg_table); r++) {
189 const struct si_reg *reg = &sid_reg_table[r];
190 const char *reg_name = sid_strings + reg->name_offset;
191
192 if (reg->offset == offset) {
193 bool first_field = true;
194
195 print_spaces(file, INDENT_PKT);
196 fprintf(file, COLOR_YELLOW "%s" COLOR_RESET " <- ",
197 reg_name);
198
199 if (!reg->num_fields) {
200 print_value(file, value, 32);
201 return;
202 }
203
204 for (f = 0; f < reg->num_fields; f++) {
205 const struct si_field *field = sid_fields_table + reg->fields_offset + f;
206 const int *values_offsets = sid_strings_offsets + field->values_offset;
207 uint32_t val = (value & field->mask) >>
208 (ffs(field->mask) - 1);
209
210 if (!(field->mask & field_mask))
211 continue;
212
213 /* Indent the field. */
214 if (!first_field)
215 print_spaces(file,
216 INDENT_PKT + strlen(reg_name) + 4);
217
218 /* Print the field. */
219 fprintf(file, "%s = ", sid_strings + field->name_offset);
220
221 if (val < field->num_values && values_offsets[val] >= 0)
222 fprintf(file, "%s\n", sid_strings + values_offsets[val]);
223 else
224 print_value(file, val,
225 util_bitcount(field->mask));
226
227 first_field = false;
228 }
229 return;
230 }
231 }
232
233 fprintf(file, COLOR_YELLOW "0x%05x" COLOR_RESET " = 0x%08x", offset, value);
234 }
235
236 static void si_parse_set_reg_packet(FILE *f, uint32_t *ib, unsigned count,
237 unsigned reg_offset)
238 {
239 unsigned reg = (ib[1] << 2) + reg_offset;
240 int i;
241
242 for (i = 0; i < count; i++)
243 si_dump_reg(f, reg + i*4, ib[2+i], ~0);
244 }
245
246 static uint32_t *si_parse_packet3(FILE *f, uint32_t *ib, int *num_dw,
247 int trace_id)
248 {
249 unsigned count = PKT_COUNT_G(ib[0]);
250 unsigned op = PKT3_IT_OPCODE_G(ib[0]);
251 const char *predicate = PKT3_PREDICATE(ib[0]) ? "(predicate)" : "";
252 int i;
253
254 /* Print the name first. */
255 for (i = 0; i < ARRAY_SIZE(packet3_table); i++)
256 if (packet3_table[i].op == op)
257 break;
258
259 if (i < ARRAY_SIZE(packet3_table)) {
260 const char *name = sid_strings + packet3_table[i].name_offset;
261
262 if (op == PKT3_SET_CONTEXT_REG ||
263 op == PKT3_SET_CONFIG_REG ||
264 op == PKT3_SET_UCONFIG_REG ||
265 op == PKT3_SET_SH_REG)
266 fprintf(f, COLOR_CYAN "%s%s" COLOR_CYAN ":\n",
267 name, predicate);
268 else
269 fprintf(f, COLOR_GREEN "%s%s" COLOR_RESET ":\n",
270 name, predicate);
271 } else
272 fprintf(f, COLOR_RED "PKT3_UNKNOWN 0x%x%s" COLOR_RESET ":\n",
273 op, predicate);
274
275 /* Print the contents. */
276 switch (op) {
277 case PKT3_SET_CONTEXT_REG:
278 si_parse_set_reg_packet(f, ib, count, SI_CONTEXT_REG_OFFSET);
279 break;
280 case PKT3_SET_CONFIG_REG:
281 si_parse_set_reg_packet(f, ib, count, SI_CONFIG_REG_OFFSET);
282 break;
283 case PKT3_SET_UCONFIG_REG:
284 si_parse_set_reg_packet(f, ib, count, CIK_UCONFIG_REG_OFFSET);
285 break;
286 case PKT3_SET_SH_REG:
287 si_parse_set_reg_packet(f, ib, count, SI_SH_REG_OFFSET);
288 break;
289 case PKT3_DRAW_PREAMBLE:
290 si_dump_reg(f, R_030908_VGT_PRIMITIVE_TYPE, ib[1], ~0);
291 si_dump_reg(f, R_028AA8_IA_MULTI_VGT_PARAM, ib[2], ~0);
292 si_dump_reg(f, R_028B58_VGT_LS_HS_CONFIG, ib[3], ~0);
293 break;
294 case PKT3_ACQUIRE_MEM:
295 si_dump_reg(f, R_0301F0_CP_COHER_CNTL, ib[1], ~0);
296 si_dump_reg(f, R_0301F4_CP_COHER_SIZE, ib[2], ~0);
297 si_dump_reg(f, R_030230_CP_COHER_SIZE_HI, ib[3], ~0);
298 si_dump_reg(f, R_0301F8_CP_COHER_BASE, ib[4], ~0);
299 si_dump_reg(f, R_0301E4_CP_COHER_BASE_HI, ib[5], ~0);
300 print_named_value(f, "POLL_INTERVAL", ib[6], 16);
301 break;
302 case PKT3_SURFACE_SYNC:
303 si_dump_reg(f, R_0085F0_CP_COHER_CNTL, ib[1], ~0);
304 si_dump_reg(f, R_0085F4_CP_COHER_SIZE, ib[2], ~0);
305 si_dump_reg(f, R_0085F8_CP_COHER_BASE, ib[3], ~0);
306 print_named_value(f, "POLL_INTERVAL", ib[4], 16);
307 break;
308 case PKT3_EVENT_WRITE:
309 si_dump_reg(f, R_028A90_VGT_EVENT_INITIATOR, ib[1],
310 S_028A90_EVENT_TYPE(~0));
311 print_named_value(f, "EVENT_INDEX", (ib[1] >> 8) & 0xf, 4);
312 print_named_value(f, "INV_L2", (ib[1] >> 20) & 0x1, 1);
313 if (count > 0) {
314 print_named_value(f, "ADDRESS_LO", ib[2], 32);
315 print_named_value(f, "ADDRESS_HI", ib[3], 16);
316 }
317 break;
318 case PKT3_DRAW_INDEX_AUTO:
319 si_dump_reg(f, R_030930_VGT_NUM_INDICES, ib[1], ~0);
320 si_dump_reg(f, R_0287F0_VGT_DRAW_INITIATOR, ib[2], ~0);
321 break;
322 case PKT3_DRAW_INDEX_2:
323 si_dump_reg(f, R_028A78_VGT_DMA_MAX_SIZE, ib[1], ~0);
324 si_dump_reg(f, R_0287E8_VGT_DMA_BASE, ib[2], ~0);
325 si_dump_reg(f, R_0287E4_VGT_DMA_BASE_HI, ib[3], ~0);
326 si_dump_reg(f, R_030930_VGT_NUM_INDICES, ib[4], ~0);
327 si_dump_reg(f, R_0287F0_VGT_DRAW_INITIATOR, ib[5], ~0);
328 break;
329 case PKT3_INDEX_TYPE:
330 si_dump_reg(f, R_028A7C_VGT_DMA_INDEX_TYPE, ib[1], ~0);
331 break;
332 case PKT3_NUM_INSTANCES:
333 si_dump_reg(f, R_030934_VGT_NUM_INSTANCES, ib[1], ~0);
334 break;
335 case PKT3_WRITE_DATA:
336 si_dump_reg(f, R_370_CONTROL, ib[1], ~0);
337 si_dump_reg(f, R_371_DST_ADDR_LO, ib[2], ~0);
338 si_dump_reg(f, R_372_DST_ADDR_HI, ib[3], ~0);
339 for (i = 2; i < count; i++) {
340 print_spaces(f, INDENT_PKT);
341 fprintf(f, "0x%08x\n", ib[2+i]);
342 }
343 break;
344 case PKT3_CP_DMA:
345 si_dump_reg(f, R_410_CP_DMA_WORD0, ib[1], ~0);
346 si_dump_reg(f, R_411_CP_DMA_WORD1, ib[2], ~0);
347 si_dump_reg(f, R_412_CP_DMA_WORD2, ib[3], ~0);
348 si_dump_reg(f, R_413_CP_DMA_WORD3, ib[4], ~0);
349 si_dump_reg(f, R_414_COMMAND, ib[5], ~0);
350 break;
351 case PKT3_DMA_DATA:
352 si_dump_reg(f, R_500_DMA_DATA_WORD0, ib[1], ~0);
353 si_dump_reg(f, R_501_SRC_ADDR_LO, ib[2], ~0);
354 si_dump_reg(f, R_502_SRC_ADDR_HI, ib[3], ~0);
355 si_dump_reg(f, R_503_DST_ADDR_LO, ib[4], ~0);
356 si_dump_reg(f, R_504_DST_ADDR_HI, ib[5], ~0);
357 si_dump_reg(f, R_414_COMMAND, ib[6], ~0);
358 break;
359 case PKT3_INDIRECT_BUFFER_SI:
360 case PKT3_INDIRECT_BUFFER_CONST:
361 case PKT3_INDIRECT_BUFFER_CIK:
362 si_dump_reg(f, R_3F0_IB_BASE_LO, ib[1], ~0);
363 si_dump_reg(f, R_3F1_IB_BASE_HI, ib[2], ~0);
364 si_dump_reg(f, R_3F2_CONTROL, ib[3], ~0);
365 break;
366 case PKT3_NOP:
367 if (ib[0] == 0xffff1000) {
368 count = -1; /* One dword NOP. */
369 break;
370 } else if (count == 0 && SI_IS_TRACE_POINT(ib[1])) {
371 unsigned packet_id = SI_GET_TRACE_POINT_ID(ib[1]);
372
373 print_spaces(f, INDENT_PKT);
374 fprintf(f, COLOR_RED "Trace point ID: %u\n", packet_id);
375
376 if (trace_id == -1)
377 break; /* tracing was disabled */
378
379 print_spaces(f, INDENT_PKT);
380 if (packet_id < trace_id)
381 fprintf(f, COLOR_RED
382 "This trace point was reached by the CP."
383 COLOR_RESET "\n");
384 else if (packet_id == trace_id)
385 fprintf(f, COLOR_RED
386 "!!!!! This is the last trace point that "
387 "was reached by the CP !!!!!"
388 COLOR_RESET "\n");
389 else if (packet_id+1 == trace_id)
390 fprintf(f, COLOR_RED
391 "!!!!! This is the first trace point that "
392 "was NOT been reached by the CP !!!!!"
393 COLOR_RESET "\n");
394 else
395 fprintf(f, COLOR_RED
396 "!!!!! This trace point was NOT reached "
397 "by the CP !!!!!"
398 COLOR_RESET "\n");
399 break;
400 }
401 /* fall through, print all dwords */
402 default:
403 for (i = 0; i < count+1; i++) {
404 print_spaces(f, INDENT_PKT);
405 fprintf(f, "0x%08x\n", ib[1+i]);
406 }
407 }
408
409 ib += count + 2;
410 *num_dw -= count + 2;
411 return ib;
412 }
413
414 /**
415 * Parse and print an IB into a file.
416 *
417 * \param f file
418 * \param ib IB
419 * \param num_dw size of the IB
420 * \param chip_class chip class
421 * \param trace_id the last trace ID that is known to have been reached
422 * and executed by the CP, typically read from a buffer
423 */
424 static void si_parse_ib(FILE *f, uint32_t *ib, int num_dw, int trace_id,
425 const char *name)
426 {
427 fprintf(f, "------------------ %s begin ------------------\n", name);
428
429 while (num_dw > 0) {
430 unsigned type = PKT_TYPE_G(ib[0]);
431
432 switch (type) {
433 case 3:
434 ib = si_parse_packet3(f, ib, &num_dw, trace_id);
435 break;
436 case 2:
437 /* type-2 nop */
438 if (ib[0] == 0x80000000) {
439 fprintf(f, COLOR_GREEN "NOP (type 2)" COLOR_RESET "\n");
440 ib++;
441 break;
442 }
443 /* fall through */
444 default:
445 fprintf(f, "Unknown packet type %i\n", type);
446 return;
447 }
448 }
449
450 fprintf(f, "------------------- %s end -------------------\n", name);
451 if (num_dw < 0) {
452 printf("Packet ends after the end of IB.\n");
453 exit(0);
454 }
455 fprintf(f, "\n");
456 }
457
458 static void si_dump_mmapped_reg(struct si_context *sctx, FILE *f,
459 unsigned offset)
460 {
461 struct radeon_winsys *ws = sctx->b.ws;
462 uint32_t value;
463
464 if (ws->read_registers(ws, offset, 1, &value))
465 si_dump_reg(f, offset, value, ~0);
466 }
467
468 static void si_dump_debug_registers(struct si_context *sctx, FILE *f)
469 {
470 if (sctx->screen->b.info.drm_major == 2 &&
471 sctx->screen->b.info.drm_minor < 42)
472 return; /* no radeon support */
473
474 fprintf(f, "Memory-mapped registers:\n");
475 si_dump_mmapped_reg(sctx, f, R_008010_GRBM_STATUS);
476
477 /* No other registers can be read on DRM < 3.1.0. */
478 if (sctx->screen->b.info.drm_major < 3 ||
479 sctx->screen->b.info.drm_minor < 1) {
480 fprintf(f, "\n");
481 return;
482 }
483
484 si_dump_mmapped_reg(sctx, f, R_008008_GRBM_STATUS2);
485 si_dump_mmapped_reg(sctx, f, R_008014_GRBM_STATUS_SE0);
486 si_dump_mmapped_reg(sctx, f, R_008018_GRBM_STATUS_SE1);
487 si_dump_mmapped_reg(sctx, f, R_008038_GRBM_STATUS_SE2);
488 si_dump_mmapped_reg(sctx, f, R_00803C_GRBM_STATUS_SE3);
489 si_dump_mmapped_reg(sctx, f, R_00D034_SDMA0_STATUS_REG);
490 si_dump_mmapped_reg(sctx, f, R_00D834_SDMA1_STATUS_REG);
491 si_dump_mmapped_reg(sctx, f, R_000E50_SRBM_STATUS);
492 si_dump_mmapped_reg(sctx, f, R_000E4C_SRBM_STATUS2);
493 si_dump_mmapped_reg(sctx, f, R_000E54_SRBM_STATUS3);
494 si_dump_mmapped_reg(sctx, f, R_008680_CP_STAT);
495 si_dump_mmapped_reg(sctx, f, R_008674_CP_STALLED_STAT1);
496 si_dump_mmapped_reg(sctx, f, R_008678_CP_STALLED_STAT2);
497 si_dump_mmapped_reg(sctx, f, R_008670_CP_STALLED_STAT3);
498 si_dump_mmapped_reg(sctx, f, R_008210_CP_CPC_STATUS);
499 si_dump_mmapped_reg(sctx, f, R_008214_CP_CPC_BUSY_STAT);
500 si_dump_mmapped_reg(sctx, f, R_008218_CP_CPC_STALLED_STAT1);
501 si_dump_mmapped_reg(sctx, f, R_00821C_CP_CPF_STATUS);
502 si_dump_mmapped_reg(sctx, f, R_008220_CP_CPF_BUSY_STAT);
503 si_dump_mmapped_reg(sctx, f, R_008224_CP_CPF_STALLED_STAT1);
504 fprintf(f, "\n");
505 }
506
507 static void si_dump_last_ib(struct si_context *sctx, FILE *f)
508 {
509 int last_trace_id = -1;
510
511 if (!sctx->last_gfx.ib)
512 return;
513
514 if (sctx->last_trace_buf) {
515 /* We are expecting that the ddebug pipe has already
516 * waited for the context, so this buffer should be idle.
517 * If the GPU is hung, there is no point in waiting for it.
518 */
519 uint32_t *map = sctx->b.ws->buffer_map(sctx->last_trace_buf->buf,
520 NULL,
521 PIPE_TRANSFER_UNSYNCHRONIZED |
522 PIPE_TRANSFER_READ);
523 if (map)
524 last_trace_id = *map;
525 }
526
527 if (sctx->init_config)
528 si_parse_ib(f, sctx->init_config->pm4, sctx->init_config->ndw,
529 -1, "IB2: Init config");
530
531 if (sctx->init_config_gs_rings)
532 si_parse_ib(f, sctx->init_config_gs_rings->pm4,
533 sctx->init_config_gs_rings->ndw,
534 -1, "IB2: Init GS rings");
535
536 si_parse_ib(f, sctx->last_gfx.ib, sctx->last_gfx.num_dw,
537 last_trace_id, "IB");
538 }
539
540 static const char *priority_to_string(enum radeon_bo_priority priority)
541 {
542 #define ITEM(x) [RADEON_PRIO_##x] = #x
543 static const char *table[64] = {
544 ITEM(FENCE),
545 ITEM(TRACE),
546 ITEM(SO_FILLED_SIZE),
547 ITEM(QUERY),
548 ITEM(IB1),
549 ITEM(IB2),
550 ITEM(DRAW_INDIRECT),
551 ITEM(INDEX_BUFFER),
552 ITEM(CP_DMA),
553 ITEM(VCE),
554 ITEM(UVD),
555 ITEM(SDMA_BUFFER),
556 ITEM(SDMA_TEXTURE),
557 ITEM(USER_SHADER),
558 ITEM(INTERNAL_SHADER),
559 ITEM(CONST_BUFFER),
560 ITEM(DESCRIPTORS),
561 ITEM(BORDER_COLORS),
562 ITEM(SAMPLER_BUFFER),
563 ITEM(VERTEX_BUFFER),
564 ITEM(SHADER_RW_BUFFER),
565 ITEM(RINGS_STREAMOUT),
566 ITEM(SCRATCH_BUFFER),
567 ITEM(COMPUTE_GLOBAL),
568 ITEM(SAMPLER_TEXTURE),
569 ITEM(SHADER_RW_IMAGE),
570 ITEM(SAMPLER_TEXTURE_MSAA),
571 ITEM(COLOR_BUFFER),
572 ITEM(DEPTH_BUFFER),
573 ITEM(COLOR_BUFFER_MSAA),
574 ITEM(DEPTH_BUFFER_MSAA),
575 ITEM(CMASK),
576 ITEM(DCC),
577 ITEM(HTILE),
578 };
579 #undef ITEM
580
581 assert(priority < ARRAY_SIZE(table));
582 return table[priority];
583 }
584
585 static int bo_list_compare_va(const struct radeon_bo_list_item *a,
586 const struct radeon_bo_list_item *b)
587 {
588 return a->vm_address < b->vm_address ? -1 :
589 a->vm_address > b->vm_address ? 1 : 0;
590 }
591
592 static void si_dump_bo_list(struct si_context *sctx,
593 const struct radeon_saved_cs *saved, FILE *f)
594 {
595 unsigned i,j;
596
597 if (!saved->bo_list)
598 return;
599
600 /* Sort the list according to VM adddresses first. */
601 qsort(saved->bo_list, saved->bo_count,
602 sizeof(saved->bo_list[0]), (void*)bo_list_compare_va);
603
604 fprintf(f, "Buffer list (in units of pages = 4kB):\n"
605 COLOR_YELLOW " Size VM start page "
606 "VM end page Usage" COLOR_RESET "\n");
607
608 for (i = 0; i < saved->bo_count; i++) {
609 /* Note: Buffer sizes are expected to be aligned to 4k by the winsys. */
610 const unsigned page_size = sctx->b.screen->info.gart_page_size;
611 uint64_t va = saved->bo_list[i].vm_address;
612 uint64_t size = saved->bo_list[i].buf->size;
613 bool hit = false;
614
615 /* If there's unused virtual memory between 2 buffers, print it. */
616 if (i) {
617 uint64_t previous_va_end = saved->bo_list[i-1].vm_address +
618 saved->bo_list[i-1].buf->size;
619
620 if (va > previous_va_end) {
621 fprintf(f, " %10"PRIu64" -- hole --\n",
622 (va - previous_va_end) / page_size);
623 }
624 }
625
626 /* Print the buffer. */
627 fprintf(f, " %10"PRIu64" 0x%013"PRIx64" 0x%013"PRIx64" ",
628 size / page_size, va / page_size, (va + size) / page_size);
629
630 /* Print the usage. */
631 for (j = 0; j < 64; j++) {
632 if (!(saved->bo_list[i].priority_usage & (1llu << j)))
633 continue;
634
635 fprintf(f, "%s%s", !hit ? "" : ", ", priority_to_string(j));
636 hit = true;
637 }
638 fprintf(f, "\n");
639 }
640 fprintf(f, "\nNote: The holes represent memory not used by the IB.\n"
641 " Other buffers can still be allocated there.\n\n");
642 }
643
644 static void si_dump_framebuffer(struct si_context *sctx, FILE *f)
645 {
646 struct pipe_framebuffer_state *state = &sctx->framebuffer.state;
647 struct r600_texture *rtex;
648 int i;
649
650 for (i = 0; i < state->nr_cbufs; i++) {
651 if (!state->cbufs[i])
652 continue;
653
654 rtex = (struct r600_texture*)state->cbufs[i]->texture;
655 fprintf(f, COLOR_YELLOW "Color buffer %i:" COLOR_RESET "\n", i);
656 r600_print_texture_info(rtex, f);
657 fprintf(f, "\n");
658 }
659
660 if (state->zsbuf) {
661 rtex = (struct r600_texture*)state->zsbuf->texture;
662 fprintf(f, COLOR_YELLOW "Depth-stencil buffer:" COLOR_RESET "\n");
663 r600_print_texture_info(rtex, f);
664 fprintf(f, "\n");
665 }
666 }
667
668 static void si_dump_debug_state(struct pipe_context *ctx, FILE *f,
669 unsigned flags)
670 {
671 struct si_context *sctx = (struct si_context*)ctx;
672
673 if (flags & PIPE_DEBUG_DEVICE_IS_HUNG)
674 si_dump_debug_registers(sctx, f);
675
676 si_dump_framebuffer(sctx, f);
677 si_dump_shader(sctx->screen, &sctx->vs_shader, f);
678 si_dump_shader(sctx->screen, &sctx->tcs_shader, f);
679 si_dump_shader(sctx->screen, &sctx->tes_shader, f);
680 si_dump_shader(sctx->screen, &sctx->gs_shader, f);
681 si_dump_shader(sctx->screen, &sctx->ps_shader, f);
682
683 si_dump_bo_list(sctx, &sctx->last_gfx, f);
684 si_dump_last_ib(sctx, f);
685
686 fprintf(f, "Done.\n");
687
688 /* dump only once */
689 radeon_clear_saved_cs(&sctx->last_gfx);
690 r600_resource_reference(&sctx->last_trace_buf, NULL);
691 }
692
693 static bool si_vm_fault_occured(struct si_context *sctx, uint32_t *out_addr)
694 {
695 char line[2000];
696 unsigned sec, usec;
697 int progress = 0;
698 uint64_t timestamp = 0;
699 bool fault = false;
700
701 FILE *p = popen("dmesg", "r");
702 if (!p)
703 return false;
704
705 while (fgets(line, sizeof(line), p)) {
706 char *msg, len;
707
708 if (!line[0] || line[0] == '\n')
709 continue;
710
711 /* Get the timestamp. */
712 if (sscanf(line, "[%u.%u]", &sec, &usec) != 2) {
713 assert(0);
714 continue;
715 }
716 timestamp = sec * 1000000llu + usec;
717
718 /* If just updating the timestamp. */
719 if (!out_addr)
720 continue;
721
722 /* Process messages only if the timestamp is newer. */
723 if (timestamp <= sctx->dmesg_timestamp)
724 continue;
725
726 /* Only process the first VM fault. */
727 if (fault)
728 continue;
729
730 /* Remove trailing \n */
731 len = strlen(line);
732 if (len && line[len-1] == '\n')
733 line[len-1] = 0;
734
735 /* Get the message part. */
736 msg = strchr(line, ']');
737 if (!msg) {
738 assert(0);
739 continue;
740 }
741 msg++;
742
743 switch (progress) {
744 case 0:
745 if (strstr(msg, "GPU fault detected:"))
746 progress = 1;
747 break;
748 case 1:
749 msg = strstr(msg, "VM_CONTEXT1_PROTECTION_FAULT_ADDR");
750 if (msg) {
751 msg = strstr(msg, "0x");
752 if (msg) {
753 msg += 2;
754 if (sscanf(msg, "%X", out_addr) == 1)
755 fault = true;
756 }
757 }
758 progress = 0;
759 break;
760 default:
761 progress = 0;
762 }
763 }
764 pclose(p);
765
766 if (timestamp > sctx->dmesg_timestamp)
767 sctx->dmesg_timestamp = timestamp;
768 return fault;
769 }
770
771 void si_check_vm_faults(struct si_context *sctx)
772 {
773 struct pipe_screen *screen = sctx->b.b.screen;
774 FILE *f;
775 uint32_t addr;
776
777 if (!si_vm_fault_occured(sctx, &addr))
778 return;
779
780 f = dd_get_debug_file(false);
781 if (!f)
782 return;
783
784 fprintf(f, "VM fault report.\n\n");
785 fprintf(f, "Driver vendor: %s\n", screen->get_vendor(screen));
786 fprintf(f, "Device vendor: %s\n", screen->get_device_vendor(screen));
787 fprintf(f, "Device name: %s\n\n", screen->get_name(screen));
788 fprintf(f, "Failing VM page: 0x%08x\n\n", addr);
789
790 si_dump_debug_state(&sctx->b.b, f, 0);
791 fclose(f);
792
793 fprintf(stderr, "Detected a VM fault, exiting...\n");
794 exit(0);
795 }
796
797 void si_init_debug_functions(struct si_context *sctx)
798 {
799 sctx->b.b.dump_debug_state = si_dump_debug_state;
800
801 /* Set the initial dmesg timestamp for this context, so that
802 * only new messages will be checked for VM faults.
803 */
804 if (sctx->screen->b.debug_flags & DBG_CHECK_VM)
805 si_vm_fault_occured(sctx, NULL);
806 }