1 /**************************************************************************
3 * Copyright 2015 Advanced Micro Devices, Inc.
4 * Copyright 2008 VMware, Inc.
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the "Software"),
9 * to deal in the Software without restriction, including without limitation
10 * on the rights to use, copy, modify, merge, publish, distribute, sub
11 * license, and/or sell copies of the Software, and to permit persons to whom
12 * the Software is furnished to do so, subject to the following conditions:
14 * The above copyright notice and this permission notice (including the next
15 * paragraph) shall be included in all copies or substantial portions of the
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
30 #include "util/u_dump.h"
31 #include "util/u_format.h"
32 #include "util/u_framebuffer.h"
33 #include "util/u_helpers.h"
34 #include "util/u_inlines.h"
35 #include "util/u_memory.h"
36 #include "tgsi/tgsi_parse.h"
37 #include "tgsi/tgsi_scan.h"
38 #include "os/os_time.h"
43 dd_get_file_stream(struct dd_screen
*dscreen
, unsigned apitrace_call_number
)
45 struct pipe_screen
*screen
= dscreen
->screen
;
46 FILE *f
= dd_get_debug_file(dscreen
->verbose
);
50 fprintf(f
, "Driver vendor: %s\n", screen
->get_vendor(screen
));
51 fprintf(f
, "Device vendor: %s\n", screen
->get_device_vendor(screen
));
52 fprintf(f
, "Device name: %s\n\n", screen
->get_name(screen
));
54 if (apitrace_call_number
)
55 fprintf(f
, "Last apitrace call: %u\n\n",
56 apitrace_call_number
);
61 dd_dump_dmesg(FILE *f
)
64 FILE *p
= popen("dmesg | tail -n60", "r");
69 fprintf(f
, "\nLast 60 lines of dmesg:\n\n");
70 while (fgets(line
, sizeof(line
), p
))
77 dd_close_file_stream(FILE *f
)
83 dd_num_active_viewports(struct dd_draw_state
*dstate
)
85 struct tgsi_shader_info info
;
86 const struct tgsi_token
*tokens
;
88 if (dstate
->shaders
[PIPE_SHADER_GEOMETRY
])
89 tokens
= dstate
->shaders
[PIPE_SHADER_GEOMETRY
]->state
.shader
.tokens
;
90 else if (dstate
->shaders
[PIPE_SHADER_TESS_EVAL
])
91 tokens
= dstate
->shaders
[PIPE_SHADER_TESS_EVAL
]->state
.shader
.tokens
;
92 else if (dstate
->shaders
[PIPE_SHADER_VERTEX
])
93 tokens
= dstate
->shaders
[PIPE_SHADER_VERTEX
]->state
.shader
.tokens
;
97 tgsi_scan_shader(tokens
, &info
);
98 return info
.writes_viewport_index
? PIPE_MAX_VIEWPORTS
: 1;
101 #define COLOR_RESET "\033[0m"
102 #define COLOR_SHADER "\033[1;32m"
103 #define COLOR_STATE "\033[1;33m"
105 #define DUMP(name, var) do { \
106 fprintf(f, COLOR_STATE #name ": " COLOR_RESET); \
107 util_dump_##name(f, var); \
111 #define DUMP_I(name, var, i) do { \
112 fprintf(f, COLOR_STATE #name " %i: " COLOR_RESET, i); \
113 util_dump_##name(f, var); \
117 #define DUMP_M(name, var, member) do { \
118 fprintf(f, " " #member ": "); \
119 util_dump_##name(f, (var)->member); \
123 #define DUMP_M_ADDR(name, var, member) do { \
124 fprintf(f, " " #member ": "); \
125 util_dump_##name(f, &(var)->member); \
130 print_named_value(FILE *f
, const char *name
, int value
)
132 fprintf(f
, COLOR_STATE
"%s" COLOR_RESET
" = %i\n", name
, value
);
136 print_named_xvalue(FILE *f
, const char *name
, int value
)
138 fprintf(f
, COLOR_STATE
"%s" COLOR_RESET
" = 0x%08x\n", name
, value
);
142 util_dump_uint(FILE *f
, unsigned i
)
148 util_dump_hex(FILE *f
, unsigned i
)
150 fprintf(f
, "0x%x", i
);
154 util_dump_double(FILE *f
, double d
)
160 util_dump_format(FILE *f
, enum pipe_format format
)
162 fprintf(f
, "%s", util_format_name(format
));
166 util_dump_color_union(FILE *f
, const union pipe_color_union
*color
)
168 fprintf(f
, "{f = {%f, %f, %f, %f}, ui = {%u, %u, %u, %u}",
169 color
->f
[0], color
->f
[1], color
->f
[2], color
->f
[3],
170 color
->ui
[0], color
->ui
[1], color
->ui
[2], color
->ui
[3]);
174 util_dump_query(FILE *f
, struct dd_query
*query
)
176 if (query
->type
>= PIPE_QUERY_DRIVER_SPECIFIC
)
177 fprintf(f
, "PIPE_QUERY_DRIVER_SPECIFIC + %i",
178 query
->type
- PIPE_QUERY_DRIVER_SPECIFIC
);
180 fprintf(f
, "%s", util_dump_query_type(query
->type
, false));
184 dd_dump_render_condition(struct dd_draw_state
*dstate
, FILE *f
)
186 if (dstate
->render_cond
.query
) {
187 fprintf(f
, "render condition:\n");
188 DUMP_M(query
, &dstate
->render_cond
, query
);
189 DUMP_M(uint
, &dstate
->render_cond
, condition
);
190 DUMP_M(uint
, &dstate
->render_cond
, mode
);
196 dd_dump_draw_vbo(struct dd_draw_state
*dstate
, struct pipe_draw_info
*info
, FILE *f
)
199 const char *shader_str
[PIPE_SHADER_TYPES
];
201 shader_str
[PIPE_SHADER_VERTEX
] = "VERTEX";
202 shader_str
[PIPE_SHADER_TESS_CTRL
] = "TESS_CTRL";
203 shader_str
[PIPE_SHADER_TESS_EVAL
] = "TESS_EVAL";
204 shader_str
[PIPE_SHADER_GEOMETRY
] = "GEOMETRY";
205 shader_str
[PIPE_SHADER_FRAGMENT
] = "FRAGMENT";
206 shader_str
[PIPE_SHADER_COMPUTE
] = "COMPUTE";
208 DUMP(draw_info
, info
);
210 DUMP(index_buffer
, &dstate
->index_buffer
);
211 if (dstate
->index_buffer
.buffer
)
212 DUMP_M(resource
, &dstate
->index_buffer
, buffer
);
214 if (info
->count_from_stream_output
)
215 DUMP_M(stream_output_target
, info
,
216 count_from_stream_output
);
218 DUMP_M(resource
, info
, indirect
);
221 /* TODO: dump active queries */
223 dd_dump_render_condition(dstate
, f
);
225 for (i
= 0; i
< PIPE_MAX_ATTRIBS
; i
++)
226 if (dstate
->vertex_buffers
[i
].buffer
||
227 dstate
->vertex_buffers
[i
].user_buffer
) {
228 DUMP_I(vertex_buffer
, &dstate
->vertex_buffers
[i
], i
);
229 if (dstate
->vertex_buffers
[i
].buffer
)
230 DUMP_M(resource
, &dstate
->vertex_buffers
[i
], buffer
);
233 if (dstate
->velems
) {
234 print_named_value(f
, "num vertex elements",
235 dstate
->velems
->state
.velems
.count
);
236 for (i
= 0; i
< dstate
->velems
->state
.velems
.count
; i
++) {
238 DUMP_I(vertex_element
, &dstate
->velems
->state
.velems
.velems
[i
], i
);
242 print_named_value(f
, "num stream output targets", dstate
->num_so_targets
);
243 for (i
= 0; i
< dstate
->num_so_targets
; i
++)
244 if (dstate
->so_targets
[i
]) {
245 DUMP_I(stream_output_target
, dstate
->so_targets
[i
], i
);
246 DUMP_M(resource
, dstate
->so_targets
[i
], buffer
);
247 fprintf(f
, " offset = %i\n", dstate
->so_offsets
[i
]);
251 for (sh
= 0; sh
< PIPE_SHADER_TYPES
; sh
++) {
252 if (sh
== PIPE_SHADER_COMPUTE
)
255 if (sh
== PIPE_SHADER_TESS_CTRL
&&
256 !dstate
->shaders
[PIPE_SHADER_TESS_CTRL
] &&
257 dstate
->shaders
[PIPE_SHADER_TESS_EVAL
])
258 fprintf(f
, "tess_state: {default_outer_level = {%f, %f, %f, %f}, "
259 "default_inner_level = {%f, %f}}\n",
260 dstate
->tess_default_levels
[0],
261 dstate
->tess_default_levels
[1],
262 dstate
->tess_default_levels
[2],
263 dstate
->tess_default_levels
[3],
264 dstate
->tess_default_levels
[4],
265 dstate
->tess_default_levels
[5]);
267 if (sh
== PIPE_SHADER_FRAGMENT
)
269 unsigned num_viewports
= dd_num_active_viewports(dstate
);
271 if (dstate
->rs
->state
.rs
.clip_plane_enable
)
272 DUMP(clip_state
, &dstate
->clip_state
);
274 for (i
= 0; i
< num_viewports
; i
++)
275 DUMP_I(viewport_state
, &dstate
->viewports
[i
], i
);
277 if (dstate
->rs
->state
.rs
.scissor
)
278 for (i
= 0; i
< num_viewports
; i
++)
279 DUMP_I(scissor_state
, &dstate
->scissors
[i
], i
);
281 DUMP(rasterizer_state
, &dstate
->rs
->state
.rs
);
283 if (dstate
->rs
->state
.rs
.poly_stipple_enable
)
284 DUMP(poly_stipple
, &dstate
->polygon_stipple
);
288 if (!dstate
->shaders
[sh
])
291 fprintf(f
, COLOR_SHADER
"begin shader: %s" COLOR_RESET
"\n", shader_str
[sh
]);
292 DUMP(shader_state
, &dstate
->shaders
[sh
]->state
.shader
);
294 for (i
= 0; i
< PIPE_MAX_CONSTANT_BUFFERS
; i
++)
295 if (dstate
->constant_buffers
[sh
][i
].buffer
||
296 dstate
->constant_buffers
[sh
][i
].user_buffer
) {
297 DUMP_I(constant_buffer
, &dstate
->constant_buffers
[sh
][i
], i
);
298 if (dstate
->constant_buffers
[sh
][i
].buffer
)
299 DUMP_M(resource
, &dstate
->constant_buffers
[sh
][i
], buffer
);
302 for (i
= 0; i
< PIPE_MAX_SAMPLERS
; i
++)
303 if (dstate
->sampler_states
[sh
][i
])
304 DUMP_I(sampler_state
, &dstate
->sampler_states
[sh
][i
]->state
.sampler
, i
);
306 for (i
= 0; i
< PIPE_MAX_SAMPLERS
; i
++)
307 if (dstate
->sampler_views
[sh
][i
]) {
308 DUMP_I(sampler_view
, dstate
->sampler_views
[sh
][i
], i
);
309 DUMP_M(resource
, dstate
->sampler_views
[sh
][i
], texture
);
312 /* TODO: print shader images */
313 /* TODO: print shader buffers */
315 fprintf(f
, COLOR_SHADER
"end shader: %s" COLOR_RESET
"\n\n", shader_str
[sh
]);
319 DUMP(depth_stencil_alpha_state
, &dstate
->dsa
->state
.dsa
);
320 DUMP(stencil_ref
, &dstate
->stencil_ref
);
323 DUMP(blend_state
, &dstate
->blend
->state
.blend
);
324 DUMP(blend_color
, &dstate
->blend_color
);
326 print_named_value(f
, "min_samples", dstate
->min_samples
);
327 print_named_xvalue(f
, "sample_mask", dstate
->sample_mask
);
330 DUMP(framebuffer_state
, &dstate
->framebuffer_state
);
331 for (i
= 0; i
< dstate
->framebuffer_state
.nr_cbufs
; i
++)
332 if (dstate
->framebuffer_state
.cbufs
[i
]) {
333 fprintf(f
, " " COLOR_STATE
"cbufs[%i]:" COLOR_RESET
"\n ", i
);
334 DUMP(surface
, dstate
->framebuffer_state
.cbufs
[i
]);
336 DUMP(resource
, dstate
->framebuffer_state
.cbufs
[i
]->texture
);
338 if (dstate
->framebuffer_state
.zsbuf
) {
339 fprintf(f
, " " COLOR_STATE
"zsbuf:" COLOR_RESET
"\n ");
340 DUMP(surface
, dstate
->framebuffer_state
.zsbuf
);
342 DUMP(resource
, dstate
->framebuffer_state
.zsbuf
->texture
);
348 dd_dump_launch_grid(struct dd_draw_state
*dstate
, struct pipe_grid_info
*info
, FILE *f
)
350 fprintf(f
, "%s:\n", __func__
+8);
355 dd_dump_resource_copy_region(struct dd_draw_state
*dstate
,
356 struct call_resource_copy_region
*info
,
359 fprintf(f
, "%s:\n", __func__
+8);
360 DUMP_M(resource
, info
, dst
);
361 DUMP_M(uint
, info
, dst_level
);
362 DUMP_M(uint
, info
, dstx
);
363 DUMP_M(uint
, info
, dsty
);
364 DUMP_M(uint
, info
, dstz
);
365 DUMP_M(resource
, info
, src
);
366 DUMP_M(uint
, info
, src_level
);
367 DUMP_M_ADDR(box
, info
, src_box
);
371 dd_dump_blit(struct dd_draw_state
*dstate
, struct pipe_blit_info
*info
, FILE *f
)
373 fprintf(f
, "%s:\n", __func__
+8);
374 DUMP_M(resource
, info
, dst
.resource
);
375 DUMP_M(uint
, info
, dst
.level
);
376 DUMP_M_ADDR(box
, info
, dst
.box
);
377 DUMP_M(format
, info
, dst
.format
);
379 DUMP_M(resource
, info
, src
.resource
);
380 DUMP_M(uint
, info
, src
.level
);
381 DUMP_M_ADDR(box
, info
, src
.box
);
382 DUMP_M(format
, info
, src
.format
);
384 DUMP_M(hex
, info
, mask
);
385 DUMP_M(uint
, info
, filter
);
386 DUMP_M(uint
, info
, scissor_enable
);
387 DUMP_M_ADDR(scissor_state
, info
, scissor
);
388 DUMP_M(uint
, info
, render_condition_enable
);
390 if (info
->render_condition_enable
)
391 dd_dump_render_condition(dstate
, f
);
395 dd_dump_generate_mipmap(struct dd_draw_state
*dstate
, FILE *f
)
397 fprintf(f
, "%s:\n", __func__
+8);
402 dd_dump_flush_resource(struct dd_draw_state
*dstate
, struct pipe_resource
*res
,
405 fprintf(f
, "%s:\n", __func__
+8);
410 dd_dump_clear(struct dd_draw_state
*dstate
, struct call_clear
*info
, FILE *f
)
412 fprintf(f
, "%s:\n", __func__
+8);
413 DUMP_M(uint
, info
, buffers
);
414 DUMP_M_ADDR(color_union
, info
, color
);
415 DUMP_M(double, info
, depth
);
416 DUMP_M(hex
, info
, stencil
);
420 dd_dump_clear_buffer(struct dd_draw_state
*dstate
, struct call_clear_buffer
*info
,
424 const char *value
= (const char*)info
->clear_value
;
426 fprintf(f
, "%s:\n", __func__
+8);
427 DUMP_M(resource
, info
, res
);
428 DUMP_M(uint
, info
, offset
);
429 DUMP_M(uint
, info
, size
);
430 DUMP_M(uint
, info
, clear_value_size
);
432 fprintf(f
, " clear_value:");
433 for (i
= 0; i
< info
->clear_value_size
; i
++)
434 fprintf(f
, " %02x", value
[i
]);
439 dd_dump_clear_render_target(struct dd_draw_state
*dstate
, FILE *f
)
441 fprintf(f
, "%s:\n", __func__
+8);
446 dd_dump_clear_depth_stencil(struct dd_draw_state
*dstate
, FILE *f
)
448 fprintf(f
, "%s:\n", __func__
+8);
453 dd_dump_driver_state(struct dd_context
*dctx
, FILE *f
, unsigned flags
)
455 if (dctx
->pipe
->dump_debug_state
) {
456 fprintf(f
,"\n\n**************************************************"
457 "***************************\n");
458 fprintf(f
, "Driver-specific state:\n\n");
459 dctx
->pipe
->dump_debug_state(dctx
->pipe
, f
, flags
);
464 dd_dump_call(FILE *f
, struct dd_draw_state
*state
, struct dd_call
*call
)
466 switch (call
->type
) {
468 dd_dump_draw_vbo(state
, &call
->info
.draw_vbo
, f
);
470 case CALL_LAUNCH_GRID
:
471 dd_dump_launch_grid(state
, &call
->info
.launch_grid
, f
);
473 case CALL_RESOURCE_COPY_REGION
:
474 dd_dump_resource_copy_region(state
,
475 &call
->info
.resource_copy_region
, f
);
478 dd_dump_blit(state
, &call
->info
.blit
, f
);
480 case CALL_FLUSH_RESOURCE
:
481 dd_dump_flush_resource(state
, call
->info
.flush_resource
, f
);
484 dd_dump_clear(state
, &call
->info
.clear
, f
);
486 case CALL_CLEAR_BUFFER
:
487 dd_dump_clear_buffer(state
, &call
->info
.clear_buffer
, f
);
489 case CALL_CLEAR_RENDER_TARGET
:
490 dd_dump_clear_render_target(state
, f
);
492 case CALL_CLEAR_DEPTH_STENCIL
:
493 dd_dump_clear_depth_stencil(state
, f
);
495 case CALL_GENERATE_MIPMAP
:
496 dd_dump_generate_mipmap(state
, f
);
502 dd_write_report(struct dd_context
*dctx
, struct dd_call
*call
, unsigned flags
,
505 FILE *f
= dd_get_file_stream(dd_screen(dctx
->base
.screen
),
506 dctx
->draw_state
.apitrace_call_number
);
511 dd_dump_call(f
, &dctx
->draw_state
, call
);
512 dd_dump_driver_state(dctx
, f
, flags
);
515 dd_close_file_stream(f
);
519 dd_kill_process(void)
522 fprintf(stderr
, "dd: Aborting the process...\n");
529 dd_flush_and_check_hang(struct dd_context
*dctx
,
530 struct pipe_fence_handle
**flush_fence
,
531 unsigned flush_flags
)
533 struct pipe_fence_handle
*fence
= NULL
;
534 struct pipe_context
*pipe
= dctx
->pipe
;
535 struct pipe_screen
*screen
= pipe
->screen
;
536 uint64_t timeout_ms
= dd_screen(dctx
->base
.screen
)->timeout_ms
;
539 assert(timeout_ms
> 0);
541 pipe
->flush(pipe
, &fence
, flush_flags
);
543 screen
->fence_reference(screen
, flush_fence
, fence
);
547 idle
= screen
->fence_finish(screen
, fence
, timeout_ms
* 1000000);
548 screen
->fence_reference(screen
, &fence
, NULL
);
550 fprintf(stderr
, "dd: GPU hang detected!\n");
555 dd_flush_and_handle_hang(struct dd_context
*dctx
,
556 struct pipe_fence_handle
**fence
, unsigned flags
,
559 if (dd_flush_and_check_hang(dctx
, fence
, flags
)) {
560 FILE *f
= dd_get_file_stream(dd_screen(dctx
->base
.screen
),
561 dctx
->draw_state
.apitrace_call_number
);
564 fprintf(f
, "dd: %s.\n", cause
);
565 dd_dump_driver_state(dctx
, f
,
566 PIPE_DUMP_DEVICE_STATUS_REGISTERS
|
567 PIPE_DUMP_CURRENT_STATES
|
568 PIPE_DUMP_CURRENT_SHADERS
|
569 PIPE_DUMP_LAST_COMMAND_BUFFER
);
571 dd_close_file_stream(f
);
574 /* Terminate the process to prevent future hangs. */
580 dd_unreference_copy_of_call(struct dd_call
*dst
)
584 pipe_so_target_reference(&dst
->info
.draw_vbo
.count_from_stream_output
, NULL
);
585 pipe_resource_reference(&dst
->info
.draw_vbo
.indirect
, NULL
);
586 pipe_resource_reference(&dst
->info
.draw_vbo
.indirect_params
, NULL
);
588 case CALL_LAUNCH_GRID
:
589 pipe_resource_reference(&dst
->info
.launch_grid
.indirect
, NULL
);
591 case CALL_RESOURCE_COPY_REGION
:
592 pipe_resource_reference(&dst
->info
.resource_copy_region
.dst
, NULL
);
593 pipe_resource_reference(&dst
->info
.resource_copy_region
.src
, NULL
);
596 pipe_resource_reference(&dst
->info
.blit
.dst
.resource
, NULL
);
597 pipe_resource_reference(&dst
->info
.blit
.src
.resource
, NULL
);
599 case CALL_FLUSH_RESOURCE
:
600 pipe_resource_reference(&dst
->info
.flush_resource
, NULL
);
604 case CALL_CLEAR_BUFFER
:
605 pipe_resource_reference(&dst
->info
.clear_buffer
.res
, NULL
);
607 case CALL_CLEAR_RENDER_TARGET
:
609 case CALL_CLEAR_DEPTH_STENCIL
:
611 case CALL_GENERATE_MIPMAP
:
612 pipe_resource_reference(&dst
->info
.generate_mipmap
.res
, NULL
);
618 dd_copy_call(struct dd_call
*dst
, struct dd_call
*src
)
620 dst
->type
= src
->type
;
624 pipe_so_target_reference(&dst
->info
.draw_vbo
.count_from_stream_output
,
625 src
->info
.draw_vbo
.count_from_stream_output
);
626 pipe_resource_reference(&dst
->info
.draw_vbo
.indirect
,
627 src
->info
.draw_vbo
.indirect
);
628 pipe_resource_reference(&dst
->info
.draw_vbo
.indirect_params
,
629 src
->info
.draw_vbo
.indirect_params
);
630 dst
->info
.draw_vbo
= src
->info
.draw_vbo
;
632 case CALL_LAUNCH_GRID
:
633 pipe_resource_reference(&dst
->info
.launch_grid
.indirect
,
634 src
->info
.launch_grid
.indirect
);
635 dst
->info
.launch_grid
= src
->info
.launch_grid
;
637 case CALL_RESOURCE_COPY_REGION
:
638 pipe_resource_reference(&dst
->info
.resource_copy_region
.dst
,
639 src
->info
.resource_copy_region
.dst
);
640 pipe_resource_reference(&dst
->info
.resource_copy_region
.src
,
641 src
->info
.resource_copy_region
.src
);
642 dst
->info
.resource_copy_region
= src
->info
.resource_copy_region
;
645 pipe_resource_reference(&dst
->info
.blit
.dst
.resource
,
646 src
->info
.blit
.dst
.resource
);
647 pipe_resource_reference(&dst
->info
.blit
.src
.resource
,
648 src
->info
.blit
.src
.resource
);
649 dst
->info
.blit
= src
->info
.blit
;
651 case CALL_FLUSH_RESOURCE
:
652 pipe_resource_reference(&dst
->info
.flush_resource
,
653 src
->info
.flush_resource
);
656 dst
->info
.clear
= src
->info
.clear
;
658 case CALL_CLEAR_BUFFER
:
659 pipe_resource_reference(&dst
->info
.clear_buffer
.res
,
660 src
->info
.clear_buffer
.res
);
661 dst
->info
.clear_buffer
= src
->info
.clear_buffer
;
663 case CALL_CLEAR_RENDER_TARGET
:
665 case CALL_CLEAR_DEPTH_STENCIL
:
667 case CALL_GENERATE_MIPMAP
:
668 pipe_resource_reference(&dst
->info
.generate_mipmap
.res
,
669 src
->info
.generate_mipmap
.res
);
670 dst
->info
.generate_mipmap
= src
->info
.generate_mipmap
;
676 dd_init_copy_of_draw_state(struct dd_draw_state_copy
*state
)
680 /* Just clear pointers to gallium objects. Don't clear the whole structure,
681 * because it would kill performance with its size of 130 KB.
683 memset(&state
->base
.index_buffer
, 0,
684 sizeof(state
->base
.index_buffer
));
685 memset(state
->base
.vertex_buffers
, 0,
686 sizeof(state
->base
.vertex_buffers
));
687 memset(state
->base
.so_targets
, 0,
688 sizeof(state
->base
.so_targets
));
689 memset(state
->base
.constant_buffers
, 0,
690 sizeof(state
->base
.constant_buffers
));
691 memset(state
->base
.sampler_views
, 0,
692 sizeof(state
->base
.sampler_views
));
693 memset(state
->base
.shader_images
, 0,
694 sizeof(state
->base
.shader_images
));
695 memset(state
->base
.shader_buffers
, 0,
696 sizeof(state
->base
.shader_buffers
));
697 memset(&state
->base
.framebuffer_state
, 0,
698 sizeof(state
->base
.framebuffer_state
));
700 memset(state
->shaders
, 0, sizeof(state
->shaders
));
702 state
->base
.render_cond
.query
= &state
->render_cond
;
704 for (i
= 0; i
< PIPE_SHADER_TYPES
; i
++) {
705 state
->base
.shaders
[i
] = &state
->shaders
[i
];
706 for (j
= 0; j
< PIPE_MAX_SAMPLERS
; j
++)
707 state
->base
.sampler_states
[i
][j
] = &state
->sampler_states
[i
][j
];
710 state
->base
.velems
= &state
->velems
;
711 state
->base
.rs
= &state
->rs
;
712 state
->base
.dsa
= &state
->dsa
;
713 state
->base
.blend
= &state
->blend
;
717 dd_unreference_copy_of_draw_state(struct dd_draw_state_copy
*state
)
719 struct dd_draw_state
*dst
= &state
->base
;
722 util_set_index_buffer(&dst
->index_buffer
, NULL
);
724 for (i
= 0; i
< ARRAY_SIZE(dst
->vertex_buffers
); i
++)
725 pipe_resource_reference(&dst
->vertex_buffers
[i
].buffer
, NULL
);
726 for (i
= 0; i
< ARRAY_SIZE(dst
->so_targets
); i
++)
727 pipe_so_target_reference(&dst
->so_targets
[i
], NULL
);
729 for (i
= 0; i
< PIPE_SHADER_TYPES
; i
++) {
731 tgsi_free_tokens(dst
->shaders
[i
]->state
.shader
.tokens
);
733 for (j
= 0; j
< PIPE_MAX_CONSTANT_BUFFERS
; j
++)
734 pipe_resource_reference(&dst
->constant_buffers
[i
][j
].buffer
, NULL
);
735 for (j
= 0; j
< PIPE_MAX_SAMPLERS
; j
++)
736 pipe_sampler_view_reference(&dst
->sampler_views
[i
][j
], NULL
);
737 for (j
= 0; j
< PIPE_MAX_SHADER_IMAGES
; j
++)
738 pipe_resource_reference(&dst
->shader_images
[i
][j
].resource
, NULL
);
739 for (j
= 0; j
< PIPE_MAX_SHADER_BUFFERS
; j
++)
740 pipe_resource_reference(&dst
->shader_buffers
[i
][j
].buffer
, NULL
);
743 util_unreference_framebuffer_state(&dst
->framebuffer_state
);
747 dd_copy_draw_state(struct dd_draw_state
*dst
, struct dd_draw_state
*src
)
751 if (src
->render_cond
.query
) {
752 *dst
->render_cond
.query
= *src
->render_cond
.query
;
753 dst
->render_cond
.condition
= src
->render_cond
.condition
;
754 dst
->render_cond
.mode
= src
->render_cond
.mode
;
756 dst
->render_cond
.query
= NULL
;
759 util_set_index_buffer(&dst
->index_buffer
, &src
->index_buffer
);
761 for (i
= 0; i
< ARRAY_SIZE(src
->vertex_buffers
); i
++) {
762 pipe_resource_reference(&dst
->vertex_buffers
[i
].buffer
,
763 src
->vertex_buffers
[i
].buffer
);
764 memcpy(&dst
->vertex_buffers
[i
], &src
->vertex_buffers
[i
],
765 sizeof(src
->vertex_buffers
[i
]));
768 dst
->num_so_targets
= src
->num_so_targets
;
769 for (i
= 0; i
< ARRAY_SIZE(src
->so_targets
); i
++)
770 pipe_so_target_reference(&dst
->so_targets
[i
], src
->so_targets
[i
]);
771 memcpy(dst
->so_offsets
, src
->so_offsets
, sizeof(src
->so_offsets
));
773 for (i
= 0; i
< PIPE_SHADER_TYPES
; i
++) {
774 if (!src
->shaders
[i
]) {
775 dst
->shaders
[i
] = NULL
;
779 if (src
->shaders
[i
]) {
780 dst
->shaders
[i
]->state
.shader
= src
->shaders
[i
]->state
.shader
;
781 dst
->shaders
[i
]->state
.shader
.tokens
=
782 tgsi_dup_tokens(src
->shaders
[i
]->state
.shader
.tokens
);
784 dst
->shaders
[i
] = NULL
;
787 for (j
= 0; j
< PIPE_MAX_CONSTANT_BUFFERS
; j
++) {
788 pipe_resource_reference(&dst
->constant_buffers
[i
][j
].buffer
,
789 src
->constant_buffers
[i
][j
].buffer
);
790 memcpy(&dst
->constant_buffers
[i
][j
], &src
->constant_buffers
[i
][j
],
791 sizeof(src
->constant_buffers
[i
][j
]));
794 for (j
= 0; j
< PIPE_MAX_SAMPLERS
; j
++) {
795 pipe_sampler_view_reference(&dst
->sampler_views
[i
][j
],
796 src
->sampler_views
[i
][j
]);
797 if (src
->sampler_states
[i
][j
])
798 dst
->sampler_states
[i
][j
]->state
.sampler
=
799 src
->sampler_states
[i
][j
]->state
.sampler
;
801 dst
->sampler_states
[i
][j
] = NULL
;
803 /* TODO: shader buffers & images */
807 dst
->velems
->state
.velems
= src
->velems
->state
.velems
;
812 dst
->rs
->state
.rs
= src
->rs
->state
.rs
;
817 dst
->dsa
->state
.dsa
= src
->dsa
->state
.dsa
;
822 dst
->blend
->state
.blend
= src
->blend
->state
.blend
;
826 dst
->blend_color
= src
->blend_color
;
827 dst
->stencil_ref
= src
->stencil_ref
;
828 dst
->sample_mask
= src
->sample_mask
;
829 dst
->min_samples
= src
->min_samples
;
830 dst
->clip_state
= src
->clip_state
;
831 util_copy_framebuffer_state(&dst
->framebuffer_state
, &src
->framebuffer_state
);
832 memcpy(dst
->scissors
, src
->scissors
, sizeof(src
->scissors
));
833 memcpy(dst
->viewports
, src
->viewports
, sizeof(src
->viewports
));
834 memcpy(dst
->tess_default_levels
, src
->tess_default_levels
,
835 sizeof(src
->tess_default_levels
));
836 dst
->apitrace_call_number
= src
->apitrace_call_number
;
840 dd_free_record(struct dd_draw_record
**record
)
842 struct dd_draw_record
*next
= (*record
)->next
;
844 dd_unreference_copy_of_call(&(*record
)->call
);
845 dd_unreference_copy_of_draw_state(&(*record
)->draw_state
);
846 FREE((*record
)->driver_state_log
);
852 dd_dump_record(struct dd_context
*dctx
, struct dd_draw_record
*record
,
853 uint32_t hw_sequence_no
, int64_t now
)
855 FILE *f
= dd_get_file_stream(dd_screen(dctx
->base
.screen
),
856 record
->draw_state
.base
.apitrace_call_number
);
860 fprintf(f
, "Draw call sequence # = %u\n", record
->sequence_no
);
861 fprintf(f
, "HW reached sequence # = %u\n", hw_sequence_no
);
862 fprintf(f
, "Elapsed time = %"PRIi64
" ms\n\n",
863 (now
- record
->timestamp
) / 1000);
865 dd_dump_call(f
, &record
->draw_state
.base
, &record
->call
);
866 fprintf(f
, "%s\n", record
->driver_state_log
);
868 dctx
->pipe
->dump_debug_state(dctx
->pipe
, f
,
869 PIPE_DUMP_DEVICE_STATUS_REGISTERS
);
874 PIPE_THREAD_ROUTINE(dd_thread_pipelined_hang_detect
, input
)
876 struct dd_context
*dctx
= (struct dd_context
*)input
;
877 struct dd_screen
*dscreen
= dd_screen(dctx
->base
.screen
);
879 pipe_mutex_lock(dctx
->mutex
);
881 while (!dctx
->kill_thread
) {
882 struct dd_draw_record
**record
= &dctx
->records
;
884 /* Loop over all records. */
888 /* If the fence has been signalled, release the record and all older
891 if (*dctx
->mapped_fence
>= (*record
)->sequence_no
) {
893 dd_free_record(record
);
897 /* The fence hasn't been signalled. Check the timeout. */
899 if (os_time_timeout((*record
)->timestamp
,
900 (*record
)->timestamp
+ dscreen
->timeout_ms
* 1000,
902 fprintf(stderr
, "GPU hang detected.\n");
904 /* Get the oldest unsignalled draw call. */
905 while ((*record
)->next
&&
906 *dctx
->mapped_fence
< (*record
)->next
->sequence_no
)
907 record
= &(*record
)->next
;
909 dd_dump_record(dctx
, *record
, *dctx
->mapped_fence
, now
);
913 record
= &(*record
)->next
;
916 /* Unlock and sleep before starting all over again. */
917 pipe_mutex_unlock(dctx
->mutex
);
918 os_time_sleep(10000); /* 10 ms */
919 pipe_mutex_lock(dctx
->mutex
);
922 /* Thread termination. */
923 while (dctx
->records
)
924 dd_free_record(&dctx
->records
);
926 pipe_mutex_unlock(dctx
->mutex
);
931 dd_get_driver_shader_log(struct dd_context
*dctx
)
933 #if defined(PIPE_OS_LINUX)
938 if (!dctx
->max_log_buffer_size
)
939 dctx
->max_log_buffer_size
= 16 * 1024;
941 /* Keep increasing the buffer size until there is enough space.
943 * open_memstream can resize automatically, but it's VERY SLOW.
944 * fmemopen is much faster.
947 buf
= malloc(dctx
->max_log_buffer_size
);
950 f
= fmemopen(buf
, dctx
->max_log_buffer_size
, "a");
956 dd_dump_driver_state(dctx
, f
, PIPE_DUMP_CURRENT_SHADERS
);
957 written_bytes
= ftell(f
);
960 /* Return if the backing buffer is large enough. */
961 if (written_bytes
< dctx
->max_log_buffer_size
- 1)
966 dctx
->max_log_buffer_size
*= 2;
971 /* Return an empty string. */
972 return (char*)calloc(1, 4);
977 dd_pipelined_process_draw(struct dd_context
*dctx
, struct dd_call
*call
)
979 struct pipe_context
*pipe
= dctx
->pipe
;
980 struct dd_draw_record
*record
;
983 /* Make a record of the draw call. */
984 record
= MALLOC_STRUCT(dd_draw_record
);
988 /* Create the log. */
989 log
= dd_get_driver_shader_log(dctx
);
995 /* Update the fence with the GPU.
997 * radeonsi/clear_buffer waits in the command processor until shaders are
998 * idle before writing to memory. That's a necessary condition for isolating
1001 dctx
->sequence_no
++;
1002 pipe
->clear_buffer(pipe
, dctx
->fence
, 0, 4, &dctx
->sequence_no
, 4);
1004 /* Initialize the record. */
1005 record
->timestamp
= os_time_get();
1006 record
->sequence_no
= dctx
->sequence_no
;
1007 record
->driver_state_log
= log
;
1009 memset(&record
->call
, 0, sizeof(record
->call
));
1010 dd_copy_call(&record
->call
, call
);
1012 dd_init_copy_of_draw_state(&record
->draw_state
);
1013 dd_copy_draw_state(&record
->draw_state
.base
, &dctx
->draw_state
);
1015 /* Add the record to the list. */
1016 pipe_mutex_lock(dctx
->mutex
);
1017 record
->next
= dctx
->records
;
1018 dctx
->records
= record
;
1019 pipe_mutex_unlock(dctx
->mutex
);
1023 dd_context_flush(struct pipe_context
*_pipe
,
1024 struct pipe_fence_handle
**fence
, unsigned flags
)
1026 struct dd_context
*dctx
= dd_context(_pipe
);
1027 struct pipe_context
*pipe
= dctx
->pipe
;
1029 switch (dd_screen(dctx
->base
.screen
)->mode
) {
1030 case DD_DETECT_HANGS
:
1031 dd_flush_and_handle_hang(dctx
, fence
, flags
,
1032 "GPU hang detected in pipe->flush()");
1034 case DD_DETECT_HANGS_PIPELINED
: /* nothing to do here */
1035 case DD_DUMP_ALL_CALLS
:
1036 case DD_DUMP_APITRACE_CALL
:
1037 pipe
->flush(pipe
, fence
, flags
);
1045 dd_before_draw(struct dd_context
*dctx
)
1047 struct dd_screen
*dscreen
= dd_screen(dctx
->base
.screen
);
1049 if (dscreen
->mode
== DD_DETECT_HANGS
&&
1050 !dscreen
->no_flush
&&
1051 dctx
->num_draw_calls
>= dscreen
->skip_count
)
1052 dd_flush_and_handle_hang(dctx
, NULL
, 0,
1053 "GPU hang most likely caused by internal "
1058 dd_after_draw(struct dd_context
*dctx
, struct dd_call
*call
)
1060 struct dd_screen
*dscreen
= dd_screen(dctx
->base
.screen
);
1061 struct pipe_context
*pipe
= dctx
->pipe
;
1063 if (dctx
->num_draw_calls
>= dscreen
->skip_count
) {
1064 switch (dscreen
->mode
) {
1065 case DD_DETECT_HANGS
:
1066 if (!dscreen
->no_flush
&&
1067 dd_flush_and_check_hang(dctx
, NULL
, 0)) {
1068 dd_write_report(dctx
, call
,
1069 PIPE_DUMP_DEVICE_STATUS_REGISTERS
|
1070 PIPE_DUMP_CURRENT_STATES
|
1071 PIPE_DUMP_CURRENT_SHADERS
|
1072 PIPE_DUMP_LAST_COMMAND_BUFFER
,
1075 /* Terminate the process to prevent future hangs. */
1079 case DD_DETECT_HANGS_PIPELINED
:
1080 dd_pipelined_process_draw(dctx
, call
);
1082 case DD_DUMP_ALL_CALLS
:
1083 if (!dscreen
->no_flush
)
1084 pipe
->flush(pipe
, NULL
, 0);
1085 dd_write_report(dctx
, call
, 0, false);
1087 case DD_DUMP_APITRACE_CALL
:
1088 if (dscreen
->apitrace_dump_call
==
1089 dctx
->draw_state
.apitrace_call_number
) {
1090 dd_write_report(dctx
, call
, 0, false);
1091 /* No need to continue. */
1100 ++dctx
->num_draw_calls
;
1101 if (dscreen
->skip_count
&& dctx
->num_draw_calls
% 10000 == 0)
1102 fprintf(stderr
, "Gallium debugger reached %u draw calls.\n",
1103 dctx
->num_draw_calls
);
1107 dd_context_draw_vbo(struct pipe_context
*_pipe
,
1108 const struct pipe_draw_info
*info
)
1110 struct dd_context
*dctx
= dd_context(_pipe
);
1111 struct pipe_context
*pipe
= dctx
->pipe
;
1112 struct dd_call call
;
1114 call
.type
= CALL_DRAW_VBO
;
1115 call
.info
.draw_vbo
= *info
;
1117 dd_before_draw(dctx
);
1118 pipe
->draw_vbo(pipe
, info
);
1119 dd_after_draw(dctx
, &call
);
1123 dd_context_launch_grid(struct pipe_context
*_pipe
,
1124 const struct pipe_grid_info
*info
)
1126 struct dd_context
*dctx
= dd_context(_pipe
);
1127 struct pipe_context
*pipe
= dctx
->pipe
;
1128 struct dd_call call
;
1130 call
.type
= CALL_LAUNCH_GRID
;
1131 call
.info
.launch_grid
= *info
;
1133 dd_before_draw(dctx
);
1134 pipe
->launch_grid(pipe
, info
);
1135 dd_after_draw(dctx
, &call
);
1139 dd_context_resource_copy_region(struct pipe_context
*_pipe
,
1140 struct pipe_resource
*dst
, unsigned dst_level
,
1141 unsigned dstx
, unsigned dsty
, unsigned dstz
,
1142 struct pipe_resource
*src
, unsigned src_level
,
1143 const struct pipe_box
*src_box
)
1145 struct dd_context
*dctx
= dd_context(_pipe
);
1146 struct pipe_context
*pipe
= dctx
->pipe
;
1147 struct dd_call call
;
1149 call
.type
= CALL_RESOURCE_COPY_REGION
;
1150 call
.info
.resource_copy_region
.dst
= dst
;
1151 call
.info
.resource_copy_region
.dst_level
= dst_level
;
1152 call
.info
.resource_copy_region
.dstx
= dstx
;
1153 call
.info
.resource_copy_region
.dsty
= dsty
;
1154 call
.info
.resource_copy_region
.dstz
= dstz
;
1155 call
.info
.resource_copy_region
.src
= src
;
1156 call
.info
.resource_copy_region
.src_level
= src_level
;
1157 call
.info
.resource_copy_region
.src_box
= *src_box
;
1159 dd_before_draw(dctx
);
1160 pipe
->resource_copy_region(pipe
,
1161 dst
, dst_level
, dstx
, dsty
, dstz
,
1162 src
, src_level
, src_box
);
1163 dd_after_draw(dctx
, &call
);
1167 dd_context_blit(struct pipe_context
*_pipe
, const struct pipe_blit_info
*info
)
1169 struct dd_context
*dctx
= dd_context(_pipe
);
1170 struct pipe_context
*pipe
= dctx
->pipe
;
1171 struct dd_call call
;
1173 call
.type
= CALL_BLIT
;
1174 call
.info
.blit
= *info
;
1176 dd_before_draw(dctx
);
1177 pipe
->blit(pipe
, info
);
1178 dd_after_draw(dctx
, &call
);
1182 dd_context_generate_mipmap(struct pipe_context
*_pipe
,
1183 struct pipe_resource
*res
,
1184 enum pipe_format format
,
1185 unsigned base_level
,
1186 unsigned last_level
,
1187 unsigned first_layer
,
1188 unsigned last_layer
)
1190 struct dd_context
*dctx
= dd_context(_pipe
);
1191 struct pipe_context
*pipe
= dctx
->pipe
;
1192 struct dd_call call
;
1195 call
.type
= CALL_GENERATE_MIPMAP
;
1196 call
.info
.generate_mipmap
.res
= res
;
1197 call
.info
.generate_mipmap
.format
= format
;
1198 call
.info
.generate_mipmap
.base_level
= base_level
;
1199 call
.info
.generate_mipmap
.last_level
= last_level
;
1200 call
.info
.generate_mipmap
.first_layer
= first_layer
;
1201 call
.info
.generate_mipmap
.last_layer
= last_layer
;
1203 dd_before_draw(dctx
);
1204 result
= pipe
->generate_mipmap(pipe
, res
, format
, base_level
, last_level
,
1205 first_layer
, last_layer
);
1206 dd_after_draw(dctx
, &call
);
1211 dd_context_flush_resource(struct pipe_context
*_pipe
,
1212 struct pipe_resource
*resource
)
1214 struct dd_context
*dctx
= dd_context(_pipe
);
1215 struct pipe_context
*pipe
= dctx
->pipe
;
1216 struct dd_call call
;
1218 call
.type
= CALL_FLUSH_RESOURCE
;
1219 call
.info
.flush_resource
= resource
;
1221 dd_before_draw(dctx
);
1222 pipe
->flush_resource(pipe
, resource
);
1223 dd_after_draw(dctx
, &call
);
1227 dd_context_clear(struct pipe_context
*_pipe
, unsigned buffers
,
1228 const union pipe_color_union
*color
, double depth
,
1231 struct dd_context
*dctx
= dd_context(_pipe
);
1232 struct pipe_context
*pipe
= dctx
->pipe
;
1233 struct dd_call call
;
1235 call
.type
= CALL_CLEAR
;
1236 call
.info
.clear
.buffers
= buffers
;
1237 call
.info
.clear
.color
= *color
;
1238 call
.info
.clear
.depth
= depth
;
1239 call
.info
.clear
.stencil
= stencil
;
1241 dd_before_draw(dctx
);
1242 pipe
->clear(pipe
, buffers
, color
, depth
, stencil
);
1243 dd_after_draw(dctx
, &call
);
1247 dd_context_clear_render_target(struct pipe_context
*_pipe
,
1248 struct pipe_surface
*dst
,
1249 const union pipe_color_union
*color
,
1250 unsigned dstx
, unsigned dsty
,
1251 unsigned width
, unsigned height
)
1253 struct dd_context
*dctx
= dd_context(_pipe
);
1254 struct pipe_context
*pipe
= dctx
->pipe
;
1255 struct dd_call call
;
1257 call
.type
= CALL_CLEAR_RENDER_TARGET
;
1259 dd_before_draw(dctx
);
1260 pipe
->clear_render_target(pipe
, dst
, color
, dstx
, dsty
, width
, height
);
1261 dd_after_draw(dctx
, &call
);
1265 dd_context_clear_depth_stencil(struct pipe_context
*_pipe
,
1266 struct pipe_surface
*dst
, unsigned clear_flags
,
1267 double depth
, unsigned stencil
, unsigned dstx
,
1268 unsigned dsty
, unsigned width
, unsigned height
)
1270 struct dd_context
*dctx
= dd_context(_pipe
);
1271 struct pipe_context
*pipe
= dctx
->pipe
;
1272 struct dd_call call
;
1274 call
.type
= CALL_CLEAR_DEPTH_STENCIL
;
1276 dd_before_draw(dctx
);
1277 pipe
->clear_depth_stencil(pipe
, dst
, clear_flags
, depth
, stencil
,
1278 dstx
, dsty
, width
, height
);
1279 dd_after_draw(dctx
, &call
);
1283 dd_context_clear_buffer(struct pipe_context
*_pipe
, struct pipe_resource
*res
,
1284 unsigned offset
, unsigned size
,
1285 const void *clear_value
, int clear_value_size
)
1287 struct dd_context
*dctx
= dd_context(_pipe
);
1288 struct pipe_context
*pipe
= dctx
->pipe
;
1289 struct dd_call call
;
1291 call
.type
= CALL_CLEAR_BUFFER
;
1292 call
.info
.clear_buffer
.res
= res
;
1293 call
.info
.clear_buffer
.offset
= offset
;
1294 call
.info
.clear_buffer
.size
= size
;
1295 call
.info
.clear_buffer
.clear_value
= clear_value
;
1296 call
.info
.clear_buffer
.clear_value_size
= clear_value_size
;
1298 dd_before_draw(dctx
);
1299 pipe
->clear_buffer(pipe
, res
, offset
, size
, clear_value
, clear_value_size
);
1300 dd_after_draw(dctx
, &call
);
1304 dd_init_draw_functions(struct dd_context
*dctx
)
1308 CTX_INIT(launch_grid
);
1309 CTX_INIT(resource_copy_region
);
1312 CTX_INIT(clear_render_target
);
1313 CTX_INIT(clear_depth_stencil
);
1314 CTX_INIT(clear_buffer
);
1315 CTX_INIT(flush_resource
);
1316 CTX_INIT(generate_mipmap
);