+ const struct radeon_bo_list_item *b)
+{
+ return a->vm_address < b->vm_address ? -1 : a->vm_address > b->vm_address ? 1 : 0;
+}
+
+static void si_dump_bo_list(struct si_context *sctx, const struct radeon_saved_cs *saved, FILE *f)
+{
+ unsigned i, j;
+
+ if (!saved->bo_list)
+ return;
+
+ /* Sort the list according to VM adddresses first. */
+ qsort(saved->bo_list, saved->bo_count, sizeof(saved->bo_list[0]), (void *)bo_list_compare_va);
+
+ fprintf(f, "Buffer list (in units of pages = 4kB):\n" COLOR_YELLOW
+ " Size VM start page "
+ "VM end page Usage" COLOR_RESET "\n");
+
+ for (i = 0; i < saved->bo_count; i++) {
+ /* Note: Buffer sizes are expected to be aligned to 4k by the winsys. */
+ const unsigned page_size = sctx->screen->info.gart_page_size;
+ uint64_t va = saved->bo_list[i].vm_address;
+ uint64_t size = saved->bo_list[i].bo_size;
+ bool hit = false;
+
+ /* If there's unused virtual memory between 2 buffers, print it. */
+ if (i) {
+ uint64_t previous_va_end =
+ saved->bo_list[i - 1].vm_address + saved->bo_list[i - 1].bo_size;
+
+ if (va > previous_va_end) {
+ fprintf(f, " %10" PRIu64 " -- hole --\n", (va - previous_va_end) / page_size);
+ }
+ }
+
+ /* Print the buffer. */
+ fprintf(f, " %10" PRIu64 " 0x%013" PRIX64 " 0x%013" PRIX64 " ",
+ size / page_size, va / page_size, (va + size) / page_size);
+
+ /* Print the usage. */
+ for (j = 0; j < 32; j++) {
+ if (!(saved->bo_list[i].priority_usage & (1u << j)))
+ continue;
+
+ fprintf(f, "%s%s", !hit ? "" : ", ", priority_to_string(j));
+ hit = true;
+ }
+ fprintf(f, "\n");
+ }
+ fprintf(f, "\nNote: The holes represent memory not used by the IB.\n"
+ " Other buffers can still be allocated there.\n\n");
+}
+
+static void si_dump_framebuffer(struct si_context *sctx, struct u_log_context *log)
+{
+ struct pipe_framebuffer_state *state = &sctx->framebuffer.state;
+ struct si_texture *tex;
+ int i;
+
+ for (i = 0; i < state->nr_cbufs; i++) {
+ if (!state->cbufs[i])
+ continue;
+
+ tex = (struct si_texture *)state->cbufs[i]->texture;
+ u_log_printf(log, COLOR_YELLOW "Color buffer %i:" COLOR_RESET "\n", i);
+ si_print_texture_info(sctx->screen, tex, log);
+ u_log_printf(log, "\n");
+ }
+
+ if (state->zsbuf) {
+ tex = (struct si_texture *)state->zsbuf->texture;
+ u_log_printf(log, COLOR_YELLOW "Depth-stencil buffer:" COLOR_RESET "\n");
+ si_print_texture_info(sctx->screen, tex, log);
+ u_log_printf(log, "\n");
+ }
+}
+
+typedef unsigned (*slot_remap_func)(unsigned);
+
+struct si_log_chunk_desc_list {
+ /** Pointer to memory map of buffer where the list is uploader */
+ uint32_t *gpu_list;
+ /** Reference of buffer where the list is uploaded, so that gpu_list
+ * is kept live. */
+ struct si_resource *buf;
+
+ const char *shader_name;
+ const char *elem_name;
+ slot_remap_func slot_remap;
+ enum chip_class chip_class;
+ unsigned element_dw_size;
+ unsigned num_elements;
+
+ uint32_t list[0];
+};
+
+static void si_log_chunk_desc_list_destroy(void *data)
+{
+ struct si_log_chunk_desc_list *chunk = data;
+ si_resource_reference(&chunk->buf, NULL);
+ FREE(chunk);
+}
+
+static void si_log_chunk_desc_list_print(void *data, FILE *f)
+{
+ struct si_log_chunk_desc_list *chunk = data;
+ unsigned sq_img_rsrc_word0 =
+ chunk->chip_class >= GFX10 ? R_00A000_SQ_IMG_RSRC_WORD0 : R_008F10_SQ_IMG_RSRC_WORD0;
+
+ for (unsigned i = 0; i < chunk->num_elements; i++) {
+ unsigned cpu_dw_offset = i * chunk->element_dw_size;
+ unsigned gpu_dw_offset = chunk->slot_remap(i) * chunk->element_dw_size;
+ const char *list_note = chunk->gpu_list ? "GPU list" : "CPU list";
+ uint32_t *cpu_list = chunk->list + cpu_dw_offset;
+ uint32_t *gpu_list = chunk->gpu_list ? chunk->gpu_list + gpu_dw_offset : cpu_list;
+
+ fprintf(f, COLOR_GREEN "%s%s slot %u (%s):" COLOR_RESET "\n", chunk->shader_name,
+ chunk->elem_name, i, list_note);
+
+ switch (chunk->element_dw_size) {
+ case 4:
+ for (unsigned j = 0; j < 4; j++)
+ ac_dump_reg(f, chunk->chip_class, R_008F00_SQ_BUF_RSRC_WORD0 + j * 4, gpu_list[j],
+ 0xffffffff);
+ break;
+ case 8:
+ for (unsigned j = 0; j < 8; j++)
+ ac_dump_reg(f, chunk->chip_class, sq_img_rsrc_word0 + j * 4, gpu_list[j], 0xffffffff);
+
+ fprintf(f, COLOR_CYAN " Buffer:" COLOR_RESET "\n");
+ for (unsigned j = 0; j < 4; j++)
+ ac_dump_reg(f, chunk->chip_class, R_008F00_SQ_BUF_RSRC_WORD0 + j * 4, gpu_list[4 + j],
+ 0xffffffff);
+ break;
+ case 16:
+ for (unsigned j = 0; j < 8; j++)
+ ac_dump_reg(f, chunk->chip_class, sq_img_rsrc_word0 + j * 4, gpu_list[j], 0xffffffff);
+
+ fprintf(f, COLOR_CYAN " Buffer:" COLOR_RESET "\n");
+ for (unsigned j = 0; j < 4; j++)
+ ac_dump_reg(f, chunk->chip_class, R_008F00_SQ_BUF_RSRC_WORD0 + j * 4, gpu_list[4 + j],
+ 0xffffffff);
+
+ fprintf(f, COLOR_CYAN " FMASK:" COLOR_RESET "\n");
+ for (unsigned j = 0; j < 8; j++)
+ ac_dump_reg(f, chunk->chip_class, sq_img_rsrc_word0 + j * 4, gpu_list[8 + j],
+ 0xffffffff);
+
+ fprintf(f, COLOR_CYAN " Sampler state:" COLOR_RESET "\n");
+ for (unsigned j = 0; j < 4; j++)
+ ac_dump_reg(f, chunk->chip_class, R_008F30_SQ_IMG_SAMP_WORD0 + j * 4, gpu_list[12 + j],
+ 0xffffffff);
+ break;
+ }
+
+ if (memcmp(gpu_list, cpu_list, chunk->element_dw_size * 4) != 0) {
+ fprintf(f, COLOR_RED "!!!!! This slot was corrupted in GPU memory !!!!!" COLOR_RESET "\n");
+ }
+
+ fprintf(f, "\n");
+ }
+}
+
+static const struct u_log_chunk_type si_log_chunk_type_descriptor_list = {
+ .destroy = si_log_chunk_desc_list_destroy,
+ .print = si_log_chunk_desc_list_print,
+};
+
+static void si_dump_descriptor_list(struct si_screen *screen, struct si_descriptors *desc,
+ const char *shader_name, const char *elem_name,
+ unsigned element_dw_size, unsigned num_elements,
+ slot_remap_func slot_remap, struct u_log_context *log)
+{
+ if (!desc->list)
+ return;
+
+ /* In some cases, the caller doesn't know how many elements are really
+ * uploaded. Reduce num_elements to fit in the range of active slots. */
+ unsigned active_range_dw_begin = desc->first_active_slot * desc->element_dw_size;
+ unsigned active_range_dw_end =
+ active_range_dw_begin + desc->num_active_slots * desc->element_dw_size;
+
+ while (num_elements > 0) {
+ int i = slot_remap(num_elements - 1);
+ unsigned dw_begin = i * element_dw_size;
+ unsigned dw_end = dw_begin + element_dw_size;
+
+ if (dw_begin >= active_range_dw_begin && dw_end <= active_range_dw_end)
+ break;
+
+ num_elements--;
+ }
+
+ struct si_log_chunk_desc_list *chunk =
+ CALLOC_VARIANT_LENGTH_STRUCT(si_log_chunk_desc_list, 4 * element_dw_size * num_elements);
+ chunk->shader_name = shader_name;
+ chunk->elem_name = elem_name;
+ chunk->element_dw_size = element_dw_size;
+ chunk->num_elements = num_elements;
+ chunk->slot_remap = slot_remap;
+ chunk->chip_class = screen->info.chip_class;
+
+ si_resource_reference(&chunk->buf, desc->buffer);
+ chunk->gpu_list = desc->gpu_list;
+
+ for (unsigned i = 0; i < num_elements; ++i) {
+ memcpy(&chunk->list[i * element_dw_size], &desc->list[slot_remap(i) * element_dw_size],
+ 4 * element_dw_size);
+ }
+
+ u_log_chunk(log, &si_log_chunk_type_descriptor_list, chunk);
+}
+
+static unsigned si_identity(unsigned slot)
+{
+ return slot;
+}
+
+static void si_dump_descriptors(struct si_context *sctx, gl_shader_stage stage,
+ const struct si_shader_info *info, struct u_log_context *log)
+{
+ enum pipe_shader_type processor = pipe_shader_type_from_mesa(stage);
+ struct si_descriptors *descs =
+ &sctx->descriptors[SI_DESCS_FIRST_SHADER + processor * SI_NUM_SHADER_DESCS];
+ static const char *shader_name[] = {"VS", "PS", "GS", "TCS", "TES", "CS"};
+ const char *name = shader_name[processor];
+ unsigned enabled_constbuf, enabled_shaderbuf, enabled_samplers;
+ unsigned enabled_images;
+
+ if (info) {
+ enabled_constbuf = u_bit_consecutive(0, info->base.num_ubos);
+ enabled_shaderbuf = u_bit_consecutive(0, info->base.num_ssbos);
+ enabled_samplers = info->base.textures_used;
+ enabled_images = u_bit_consecutive(0, info->base.num_images);
+ } else {
+ enabled_constbuf =
+ sctx->const_and_shader_buffers[processor].enabled_mask >> SI_NUM_SHADER_BUFFERS;
+ enabled_shaderbuf = sctx->const_and_shader_buffers[processor].enabled_mask &
+ u_bit_consecutive64(0, SI_NUM_SHADER_BUFFERS);
+ enabled_shaderbuf = 0;
+ for (int i = 0; i < SI_NUM_SHADER_BUFFERS; i++) {
+ enabled_shaderbuf |=
+ (sctx->const_and_shader_buffers[processor].enabled_mask &
+ 1llu << (SI_NUM_SHADER_BUFFERS - i - 1)) << i;
+ }
+ enabled_samplers = sctx->samplers[processor].enabled_mask;
+ enabled_images = sctx->images[processor].enabled_mask;
+ }
+
+ if (stage == MESA_SHADER_VERTEX && sctx->vb_descriptors_buffer &&
+ sctx->vb_descriptors_gpu_list && sctx->vertex_elements) {
+ assert(info); /* only CS may not have an info struct */
+ struct si_descriptors desc = {};
+
+ desc.buffer = sctx->vb_descriptors_buffer;
+ desc.list = sctx->vb_descriptors_gpu_list;
+ desc.gpu_list = sctx->vb_descriptors_gpu_list;
+ desc.element_dw_size = 4;
+ desc.num_active_slots = sctx->vertex_elements->vb_desc_list_alloc_size / 16;
+
+ si_dump_descriptor_list(sctx->screen, &desc, name, " - Vertex buffer", 4, info->num_inputs,
+ si_identity, log);
+ }
+
+ si_dump_descriptor_list(sctx->screen, &descs[SI_SHADER_DESCS_CONST_AND_SHADER_BUFFERS], name,
+ " - Constant buffer", 4, util_last_bit(enabled_constbuf),
+ si_get_constbuf_slot, log);
+ si_dump_descriptor_list(sctx->screen, &descs[SI_SHADER_DESCS_CONST_AND_SHADER_BUFFERS], name,
+ " - Shader buffer", 4, util_last_bit(enabled_shaderbuf),
+ si_get_shaderbuf_slot, log);
+ si_dump_descriptor_list(sctx->screen, &descs[SI_SHADER_DESCS_SAMPLERS_AND_IMAGES], name,
+ " - Sampler", 16, util_last_bit(enabled_samplers), si_get_sampler_slot,
+ log);
+ si_dump_descriptor_list(sctx->screen, &descs[SI_SHADER_DESCS_SAMPLERS_AND_IMAGES], name,
+ " - Image", 8, util_last_bit(enabled_images), si_get_image_slot, log);
+}
+
+static void si_dump_gfx_descriptors(struct si_context *sctx,
+ const struct si_shader_ctx_state *state,
+ struct u_log_context *log)
+{
+ if (!state->cso || !state->current)
+ return;
+
+ si_dump_descriptors(sctx, state->cso->info.stage, &state->cso->info, log);
+}
+
+static void si_dump_compute_descriptors(struct si_context *sctx, struct u_log_context *log)
+{
+ if (!sctx->cs_shader_state.program)
+ return;
+
+ si_dump_descriptors(sctx, MESA_SHADER_COMPUTE, NULL, log);
+}
+
+struct si_shader_inst {
+ const char *text; /* start of disassembly for this instruction */
+ unsigned textlen;
+ unsigned size; /* instruction size = 4 or 8 */
+ uint64_t addr; /* instruction address */
+};
+
+/**
+ * Open the given \p binary as \p rtld_binary and split the contained
+ * disassembly string into instructions and add them to the array
+ * pointed to by \p instructions, which must be sufficiently large.
+ *
+ * Labels are considered to be part of the following instruction.
+ *
+ * The caller must keep \p rtld_binary alive as long as \p instructions are
+ * used and then close it afterwards.
+ */
+static void si_add_split_disasm(struct si_screen *screen, struct ac_rtld_binary *rtld_binary,
+ struct si_shader_binary *binary, uint64_t *addr, unsigned *num,
+ struct si_shader_inst *instructions,
+ gl_shader_stage stage, unsigned wave_size)
+{
+ if (!ac_rtld_open(rtld_binary, (struct ac_rtld_open_info){
+ .info = &screen->info,
+ .shader_type = stage,
+ .wave_size = wave_size,
+ .num_parts = 1,
+ .elf_ptrs = &binary->elf_buffer,
+ .elf_sizes = &binary->elf_size}))
+ return;
+
+ const char *disasm;
+ size_t nbytes;
+ if (!ac_rtld_get_section_by_name(rtld_binary, ".AMDGPU.disasm", &disasm, &nbytes))
+ return;
+
+ const char *end = disasm + nbytes;
+ while (disasm < end) {
+ const char *semicolon = memchr(disasm, ';', end - disasm);
+ if (!semicolon)
+ break;
+
+ struct si_shader_inst *inst = &instructions[(*num)++];
+ const char *inst_end = memchr(semicolon + 1, '\n', end - semicolon - 1);
+ if (!inst_end)
+ inst_end = end;
+
+ inst->text = disasm;
+ inst->textlen = inst_end - disasm;
+
+ inst->addr = *addr;
+ /* More than 16 chars after ";" means the instruction is 8 bytes long. */
+ inst->size = inst_end - semicolon > 16 ? 8 : 4;
+ *addr += inst->size;
+
+ if (inst_end == end)
+ break;
+ disasm = inst_end + 1;
+ }
+}
+
+/* If the shader is being executed, print its asm instructions, and annotate
+ * those that are being executed right now with information about waves that
+ * execute them. This is most useful during a GPU hang.
+ */
+static void si_print_annotated_shader(struct si_shader *shader, struct ac_wave_info *waves,
+ unsigned num_waves, FILE *f)
+{
+ if (!shader)
+ return;
+
+ struct si_screen *screen = shader->selector->screen;
+ gl_shader_stage stage = shader->selector->info.stage;
+ uint64_t start_addr = shader->bo->gpu_address;
+ uint64_t end_addr = start_addr + shader->bo->b.b.width0;
+ unsigned i;
+
+ /* See if any wave executes the shader. */
+ for (i = 0; i < num_waves; i++) {
+ if (start_addr <= waves[i].pc && waves[i].pc <= end_addr)
+ break;
+ }
+ if (i == num_waves)
+ return; /* the shader is not being executed */
+
+ /* Remember the first found wave. The waves are sorted according to PC. */
+ waves = &waves[i];
+ num_waves -= i;
+
+ /* Get the list of instructions.
+ * Buffer size / 4 is the upper bound of the instruction count.
+ */
+ unsigned num_inst = 0;
+ uint64_t inst_addr = start_addr;
+ unsigned wave_size = si_get_shader_wave_size(shader);
+ struct ac_rtld_binary rtld_binaries[5] = {};
+ struct si_shader_inst *instructions =
+ calloc(shader->bo->b.b.width0 / 4, sizeof(struct si_shader_inst));
+
+ if (shader->prolog) {
+ si_add_split_disasm(screen, &rtld_binaries[0], &shader->prolog->binary, &inst_addr, &num_inst,
+ instructions, stage, wave_size);
+ }
+ if (shader->previous_stage) {
+ si_add_split_disasm(screen, &rtld_binaries[1], &shader->previous_stage->binary, &inst_addr,
+ &num_inst, instructions, stage, wave_size);
+ }
+ if (shader->prolog2) {
+ si_add_split_disasm(screen, &rtld_binaries[2], &shader->prolog2->binary, &inst_addr,
+ &num_inst, instructions, stage, wave_size);
+ }
+ si_add_split_disasm(screen, &rtld_binaries[3], &shader->binary, &inst_addr, &num_inst,
+ instructions, stage, wave_size);
+ if (shader->epilog) {
+ si_add_split_disasm(screen, &rtld_binaries[4], &shader->epilog->binary, &inst_addr, &num_inst,
+ instructions, stage, wave_size);
+ }
+
+ fprintf(f, COLOR_YELLOW "%s - annotated disassembly:" COLOR_RESET "\n",
+ si_get_shader_name(shader));
+
+ /* Print instructions with annotations. */
+ for (i = 0; i < num_inst; i++) {
+ struct si_shader_inst *inst = &instructions[i];
+
+ fprintf(f, "%.*s [PC=0x%" PRIx64 ", size=%u]\n", inst->textlen, inst->text, inst->addr,
+ inst->size);
+
+ /* Print which waves execute the instruction right now. */
+ while (num_waves && inst->addr == waves->pc) {
+ fprintf(f,
+ " " COLOR_GREEN "^ SE%u SH%u CU%u "
+ "SIMD%u WAVE%u EXEC=%016" PRIx64 " ",
+ waves->se, waves->sh, waves->cu, waves->simd, waves->wave, waves->exec);
+
+ if (inst->size == 4) {
+ fprintf(f, "INST32=%08X" COLOR_RESET "\n", waves->inst_dw0);
+ } else {
+ fprintf(f, "INST64=%08X %08X" COLOR_RESET "\n", waves->inst_dw0, waves->inst_dw1);
+ }
+
+ waves->matched = true;
+ waves = &waves[1];
+ num_waves--;
+ }
+ }
+
+ fprintf(f, "\n\n");
+ free(instructions);
+ for (unsigned i = 0; i < ARRAY_SIZE(rtld_binaries); ++i)
+ ac_rtld_close(&rtld_binaries[i]);
+}
+
+static void si_dump_annotated_shaders(struct si_context *sctx, FILE *f)
+{
+ struct ac_wave_info waves[AC_MAX_WAVES_PER_CHIP];
+ unsigned num_waves = ac_get_wave_info(sctx->chip_class, waves);
+
+ fprintf(f, COLOR_CYAN "The number of active waves = %u" COLOR_RESET "\n\n", num_waves);
+
+ si_print_annotated_shader(sctx->vs_shader.current, waves, num_waves, f);
+ si_print_annotated_shader(sctx->tcs_shader.current, waves, num_waves, f);
+ si_print_annotated_shader(sctx->tes_shader.current, waves, num_waves, f);
+ si_print_annotated_shader(sctx->gs_shader.current, waves, num_waves, f);
+ si_print_annotated_shader(sctx->ps_shader.current, waves, num_waves, f);
+
+ /* Print waves executing shaders that are not currently bound. */
+ unsigned i;
+ bool found = false;
+ for (i = 0; i < num_waves; i++) {
+ if (waves[i].matched)
+ continue;
+
+ if (!found) {
+ fprintf(f, COLOR_CYAN "Waves not executing currently-bound shaders:" COLOR_RESET "\n");
+ found = true;
+ }
+ fprintf(f,
+ " SE%u SH%u CU%u SIMD%u WAVE%u EXEC=%016" PRIx64 " INST=%08X %08X PC=%" PRIx64
+ "\n",
+ waves[i].se, waves[i].sh, waves[i].cu, waves[i].simd, waves[i].wave, waves[i].exec,
+ waves[i].inst_dw0, waves[i].inst_dw1, waves[i].pc);
+ }
+ if (found)
+ fprintf(f, "\n\n");
+}
+
+static void si_dump_command(const char *title, const char *command, FILE *f)
+{
+ char line[2000];
+
+ FILE *p = popen(command, "r");
+ if (!p)
+ return;
+
+ fprintf(f, COLOR_YELLOW "%s: " COLOR_RESET "\n", title);
+ while (fgets(line, sizeof(line), p))
+ fputs(line, f);
+ fprintf(f, "\n\n");
+ pclose(p);
+}
+
+static void si_dump_debug_state(struct pipe_context *ctx, FILE *f, unsigned flags)
+{
+ struct si_context *sctx = (struct si_context *)ctx;
+
+ if (sctx->log)
+ u_log_flush(sctx->log);
+
+ if (flags & PIPE_DUMP_DEVICE_STATUS_REGISTERS) {
+ si_dump_debug_registers(sctx, f);
+
+ si_dump_annotated_shaders(sctx, f);
+ si_dump_command("Active waves (raw data)", "umr -O halt_waves -wa | column -t", f);
+ si_dump_command("Wave information", "umr -O halt_waves,bits -wa", f);
+ }
+}
+
+void si_log_draw_state(struct si_context *sctx, struct u_log_context *log)
+{
+ struct si_shader_ctx_state *tcs_shader;
+
+ if (!log)
+ return;
+
+ tcs_shader = &sctx->tcs_shader;
+ if (sctx->tes_shader.cso && !sctx->tcs_shader.cso)
+ tcs_shader = &sctx->fixed_func_tcs_shader;
+
+ si_dump_framebuffer(sctx, log);
+
+ si_dump_gfx_shader(sctx, &sctx->vs_shader, log);
+ si_dump_gfx_shader(sctx, tcs_shader, log);
+ si_dump_gfx_shader(sctx, &sctx->tes_shader, log);
+ si_dump_gfx_shader(sctx, &sctx->gs_shader, log);
+ si_dump_gfx_shader(sctx, &sctx->ps_shader, log);
+
+ si_dump_descriptor_list(sctx->screen, &sctx->descriptors[SI_DESCS_RW_BUFFERS], "", "RW buffers",
+ 4, sctx->descriptors[SI_DESCS_RW_BUFFERS].num_active_slots, si_identity,
+ log);
+ si_dump_gfx_descriptors(sctx, &sctx->vs_shader, log);
+ si_dump_gfx_descriptors(sctx, tcs_shader, log);
+ si_dump_gfx_descriptors(sctx, &sctx->tes_shader, log);
+ si_dump_gfx_descriptors(sctx, &sctx->gs_shader, log);
+ si_dump_gfx_descriptors(sctx, &sctx->ps_shader, log);
+}
+
+void si_log_compute_state(struct si_context *sctx, struct u_log_context *log)
+{
+ if (!log)
+ return;
+
+ si_dump_compute_shader(sctx, log);
+ si_dump_compute_descriptors(sctx, log);
+}
+
+static void si_dump_dma(struct si_context *sctx, struct radeon_saved_cs *saved, FILE *f)
+{
+ static const char ib_name[] = "sDMA IB";
+ unsigned i;
+
+ si_dump_bo_list(sctx, saved, f);
+
+ fprintf(f, "------------------ %s begin ------------------\n", ib_name);
+
+ for (i = 0; i < saved->num_dw; ++i) {
+ fprintf(f, " %08x\n", saved->ib[i]);
+ }
+
+ fprintf(f, "------------------- %s end -------------------\n", ib_name);
+ fprintf(f, "\n");
+
+ fprintf(f, "SDMA Dump Done.\n");
+}
+
+void si_check_vm_faults(struct si_context *sctx, struct radeon_saved_cs *saved, enum ring_type ring)