1 /**************************************************************************
3 * Copyright 2015 Advanced Micro Devices, Inc.
4 * Copyright 2008 VMware, Inc.
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the "Software"),
9 * to deal in the Software without restriction, including without limitation
10 * on the rights to use, copy, modify, merge, publish, distribute, sub
11 * license, and/or sell copies of the Software, and to permit persons to whom
12 * the Software is furnished to do so, subject to the following conditions:
14 * The above copyright notice and this permission notice (including the next
15 * paragraph) shall be included in all copies or substantial portions of the
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
30 #include "util/u_dump.h"
31 #include "util/u_format.h"
32 #include "util/u_framebuffer.h"
33 #include "util/u_helpers.h"
34 #include "util/u_inlines.h"
35 #include "util/u_memory.h"
36 #include "tgsi/tgsi_parse.h"
37 #include "tgsi/tgsi_scan.h"
38 #include "os/os_time.h"
43 dd_get_file_stream(struct dd_screen
*dscreen
, unsigned apitrace_call_number
)
45 struct pipe_screen
*screen
= dscreen
->screen
;
48 FILE *f
= dd_get_debug_file(dscreen
->verbose
);
52 if (os_get_command_line(cmd_line
, sizeof(cmd_line
)))
53 fprintf(f
, "Command: %s\n", cmd_line
);
54 fprintf(f
, "Driver vendor: %s\n", screen
->get_vendor(screen
));
55 fprintf(f
, "Device vendor: %s\n", screen
->get_device_vendor(screen
));
56 fprintf(f
, "Device name: %s\n\n", screen
->get_name(screen
));
58 if (apitrace_call_number
)
59 fprintf(f
, "Last apitrace call: %u\n\n",
60 apitrace_call_number
);
65 dd_dump_dmesg(FILE *f
)
68 FILE *p
= popen("dmesg | tail -n60", "r");
73 fprintf(f
, "\nLast 60 lines of dmesg:\n\n");
74 while (fgets(line
, sizeof(line
), p
))
81 dd_close_file_stream(FILE *f
)
87 dd_num_active_viewports(struct dd_draw_state
*dstate
)
89 struct tgsi_shader_info info
;
90 const struct tgsi_token
*tokens
;
92 if (dstate
->shaders
[PIPE_SHADER_GEOMETRY
])
93 tokens
= dstate
->shaders
[PIPE_SHADER_GEOMETRY
]->state
.shader
.tokens
;
94 else if (dstate
->shaders
[PIPE_SHADER_TESS_EVAL
])
95 tokens
= dstate
->shaders
[PIPE_SHADER_TESS_EVAL
]->state
.shader
.tokens
;
96 else if (dstate
->shaders
[PIPE_SHADER_VERTEX
])
97 tokens
= dstate
->shaders
[PIPE_SHADER_VERTEX
]->state
.shader
.tokens
;
102 tgsi_scan_shader(tokens
, &info
);
103 if (info
.writes_viewport_index
)
104 return PIPE_MAX_VIEWPORTS
;
110 #define COLOR_RESET "\033[0m"
111 #define COLOR_SHADER "\033[1;32m"
112 #define COLOR_STATE "\033[1;33m"
114 #define DUMP(name, var) do { \
115 fprintf(f, COLOR_STATE #name ": " COLOR_RESET); \
116 util_dump_##name(f, var); \
120 #define DUMP_I(name, var, i) do { \
121 fprintf(f, COLOR_STATE #name " %i: " COLOR_RESET, i); \
122 util_dump_##name(f, var); \
126 #define DUMP_M(name, var, member) do { \
127 fprintf(f, " " #member ": "); \
128 util_dump_##name(f, (var)->member); \
132 #define DUMP_M_ADDR(name, var, member) do { \
133 fprintf(f, " " #member ": "); \
134 util_dump_##name(f, &(var)->member); \
139 print_named_value(FILE *f
, const char *name
, int value
)
141 fprintf(f
, COLOR_STATE
"%s" COLOR_RESET
" = %i\n", name
, value
);
145 print_named_xvalue(FILE *f
, const char *name
, int value
)
147 fprintf(f
, COLOR_STATE
"%s" COLOR_RESET
" = 0x%08x\n", name
, value
);
151 util_dump_uint(FILE *f
, unsigned i
)
157 util_dump_int(FILE *f
, int i
)
163 util_dump_hex(FILE *f
, unsigned i
)
165 fprintf(f
, "0x%x", i
);
169 util_dump_double(FILE *f
, double d
)
175 util_dump_format(FILE *f
, enum pipe_format format
)
177 fprintf(f
, "%s", util_format_name(format
));
181 util_dump_color_union(FILE *f
, const union pipe_color_union
*color
)
183 fprintf(f
, "{f = {%f, %f, %f, %f}, ui = {%u, %u, %u, %u}",
184 color
->f
[0], color
->f
[1], color
->f
[2], color
->f
[3],
185 color
->ui
[0], color
->ui
[1], color
->ui
[2], color
->ui
[3]);
189 dd_dump_render_condition(struct dd_draw_state
*dstate
, FILE *f
)
191 if (dstate
->render_cond
.query
) {
192 fprintf(f
, "render condition:\n");
193 DUMP_M(query_type
, &dstate
->render_cond
, query
->type
);
194 DUMP_M(uint
, &dstate
->render_cond
, condition
);
195 DUMP_M(uint
, &dstate
->render_cond
, mode
);
201 dd_dump_shader(struct dd_draw_state
*dstate
, enum pipe_shader_type sh
, FILE *f
)
204 const char *shader_str
[PIPE_SHADER_TYPES
];
206 shader_str
[PIPE_SHADER_VERTEX
] = "VERTEX";
207 shader_str
[PIPE_SHADER_TESS_CTRL
] = "TESS_CTRL";
208 shader_str
[PIPE_SHADER_TESS_EVAL
] = "TESS_EVAL";
209 shader_str
[PIPE_SHADER_GEOMETRY
] = "GEOMETRY";
210 shader_str
[PIPE_SHADER_FRAGMENT
] = "FRAGMENT";
211 shader_str
[PIPE_SHADER_COMPUTE
] = "COMPUTE";
213 if (sh
== PIPE_SHADER_TESS_CTRL
&&
214 !dstate
->shaders
[PIPE_SHADER_TESS_CTRL
] &&
215 dstate
->shaders
[PIPE_SHADER_TESS_EVAL
])
216 fprintf(f
, "tess_state: {default_outer_level = {%f, %f, %f, %f}, "
217 "default_inner_level = {%f, %f}}\n",
218 dstate
->tess_default_levels
[0],
219 dstate
->tess_default_levels
[1],
220 dstate
->tess_default_levels
[2],
221 dstate
->tess_default_levels
[3],
222 dstate
->tess_default_levels
[4],
223 dstate
->tess_default_levels
[5]);
225 if (sh
== PIPE_SHADER_FRAGMENT
)
227 unsigned num_viewports
= dd_num_active_viewports(dstate
);
229 if (dstate
->rs
->state
.rs
.clip_plane_enable
)
230 DUMP(clip_state
, &dstate
->clip_state
);
232 for (i
= 0; i
< num_viewports
; i
++)
233 DUMP_I(viewport_state
, &dstate
->viewports
[i
], i
);
235 if (dstate
->rs
->state
.rs
.scissor
)
236 for (i
= 0; i
< num_viewports
; i
++)
237 DUMP_I(scissor_state
, &dstate
->scissors
[i
], i
);
239 DUMP(rasterizer_state
, &dstate
->rs
->state
.rs
);
241 if (dstate
->rs
->state
.rs
.poly_stipple_enable
)
242 DUMP(poly_stipple
, &dstate
->polygon_stipple
);
246 if (!dstate
->shaders
[sh
])
249 fprintf(f
, COLOR_SHADER
"begin shader: %s" COLOR_RESET
"\n", shader_str
[sh
]);
250 DUMP(shader_state
, &dstate
->shaders
[sh
]->state
.shader
);
252 for (i
= 0; i
< PIPE_MAX_CONSTANT_BUFFERS
; i
++)
253 if (dstate
->constant_buffers
[sh
][i
].buffer
||
254 dstate
->constant_buffers
[sh
][i
].user_buffer
) {
255 DUMP_I(constant_buffer
, &dstate
->constant_buffers
[sh
][i
], i
);
256 if (dstate
->constant_buffers
[sh
][i
].buffer
)
257 DUMP_M(resource
, &dstate
->constant_buffers
[sh
][i
], buffer
);
260 for (i
= 0; i
< PIPE_MAX_SAMPLERS
; i
++)
261 if (dstate
->sampler_states
[sh
][i
])
262 DUMP_I(sampler_state
, &dstate
->sampler_states
[sh
][i
]->state
.sampler
, i
);
264 for (i
= 0; i
< PIPE_MAX_SAMPLERS
; i
++)
265 if (dstate
->sampler_views
[sh
][i
]) {
266 DUMP_I(sampler_view
, dstate
->sampler_views
[sh
][i
], i
);
267 DUMP_M(resource
, dstate
->sampler_views
[sh
][i
], texture
);
270 for (i
= 0; i
< PIPE_MAX_SHADER_IMAGES
; i
++)
271 if (dstate
->shader_images
[sh
][i
].resource
) {
272 DUMP_I(image_view
, &dstate
->shader_images
[sh
][i
], i
);
273 if (dstate
->shader_images
[sh
][i
].resource
)
274 DUMP_M(resource
, &dstate
->shader_images
[sh
][i
], resource
);
277 for (i
= 0; i
< PIPE_MAX_SHADER_BUFFERS
; i
++)
278 if (dstate
->shader_buffers
[sh
][i
].buffer
) {
279 DUMP_I(shader_buffer
, &dstate
->shader_buffers
[sh
][i
], i
);
280 if (dstate
->shader_buffers
[sh
][i
].buffer
)
281 DUMP_M(resource
, &dstate
->shader_buffers
[sh
][i
], buffer
);
284 fprintf(f
, COLOR_SHADER
"end shader: %s" COLOR_RESET
"\n\n", shader_str
[sh
]);
288 dd_dump_draw_vbo(struct dd_draw_state
*dstate
, struct pipe_draw_info
*info
, FILE *f
)
292 DUMP(draw_info
, info
);
293 if (info
->count_from_stream_output
)
294 DUMP_M(stream_output_target
, info
,
295 count_from_stream_output
);
296 if (info
->indirect
) {
297 DUMP_M(resource
, info
, indirect
->buffer
);
298 if (info
->indirect
->indirect_draw_count
)
299 DUMP_M(resource
, info
, indirect
->indirect_draw_count
);
304 /* TODO: dump active queries */
306 dd_dump_render_condition(dstate
, f
);
308 for (i
= 0; i
< PIPE_MAX_ATTRIBS
; i
++)
309 if (dstate
->vertex_buffers
[i
].buffer
.resource
) {
310 DUMP_I(vertex_buffer
, &dstate
->vertex_buffers
[i
], i
);
311 if (!dstate
->vertex_buffers
[i
].is_user_buffer
)
312 DUMP_M(resource
, &dstate
->vertex_buffers
[i
], buffer
.resource
);
315 if (dstate
->velems
) {
316 print_named_value(f
, "num vertex elements",
317 dstate
->velems
->state
.velems
.count
);
318 for (i
= 0; i
< dstate
->velems
->state
.velems
.count
; i
++) {
320 DUMP_I(vertex_element
, &dstate
->velems
->state
.velems
.velems
[i
], i
);
324 print_named_value(f
, "num stream output targets", dstate
->num_so_targets
);
325 for (i
= 0; i
< dstate
->num_so_targets
; i
++)
326 if (dstate
->so_targets
[i
]) {
327 DUMP_I(stream_output_target
, dstate
->so_targets
[i
], i
);
328 DUMP_M(resource
, dstate
->so_targets
[i
], buffer
);
329 fprintf(f
, " offset = %i\n", dstate
->so_offsets
[i
]);
333 for (sh
= 0; sh
< PIPE_SHADER_TYPES
; sh
++) {
334 if (sh
== PIPE_SHADER_COMPUTE
)
337 dd_dump_shader(dstate
, sh
, f
);
341 DUMP(depth_stencil_alpha_state
, &dstate
->dsa
->state
.dsa
);
342 DUMP(stencil_ref
, &dstate
->stencil_ref
);
345 DUMP(blend_state
, &dstate
->blend
->state
.blend
);
346 DUMP(blend_color
, &dstate
->blend_color
);
348 print_named_value(f
, "min_samples", dstate
->min_samples
);
349 print_named_xvalue(f
, "sample_mask", dstate
->sample_mask
);
352 DUMP(framebuffer_state
, &dstate
->framebuffer_state
);
353 for (i
= 0; i
< dstate
->framebuffer_state
.nr_cbufs
; i
++)
354 if (dstate
->framebuffer_state
.cbufs
[i
]) {
355 fprintf(f
, " " COLOR_STATE
"cbufs[%i]:" COLOR_RESET
"\n ", i
);
356 DUMP(surface
, dstate
->framebuffer_state
.cbufs
[i
]);
358 DUMP(resource
, dstate
->framebuffer_state
.cbufs
[i
]->texture
);
360 if (dstate
->framebuffer_state
.zsbuf
) {
361 fprintf(f
, " " COLOR_STATE
"zsbuf:" COLOR_RESET
"\n ");
362 DUMP(surface
, dstate
->framebuffer_state
.zsbuf
);
364 DUMP(resource
, dstate
->framebuffer_state
.zsbuf
->texture
);
370 dd_dump_launch_grid(struct dd_draw_state
*dstate
, struct pipe_grid_info
*info
, FILE *f
)
372 fprintf(f
, "%s:\n", __func__
+8);
373 DUMP(grid_info
, info
);
376 dd_dump_shader(dstate
, PIPE_SHADER_COMPUTE
, f
);
381 dd_dump_resource_copy_region(struct dd_draw_state
*dstate
,
382 struct call_resource_copy_region
*info
,
385 fprintf(f
, "%s:\n", __func__
+8);
386 DUMP_M(resource
, info
, dst
);
387 DUMP_M(uint
, info
, dst_level
);
388 DUMP_M(uint
, info
, dstx
);
389 DUMP_M(uint
, info
, dsty
);
390 DUMP_M(uint
, info
, dstz
);
391 DUMP_M(resource
, info
, src
);
392 DUMP_M(uint
, info
, src_level
);
393 DUMP_M_ADDR(box
, info
, src_box
);
397 dd_dump_blit(struct dd_draw_state
*dstate
, struct pipe_blit_info
*info
, FILE *f
)
399 fprintf(f
, "%s:\n", __func__
+8);
400 DUMP_M(resource
, info
, dst
.resource
);
401 DUMP_M(uint
, info
, dst
.level
);
402 DUMP_M_ADDR(box
, info
, dst
.box
);
403 DUMP_M(format
, info
, dst
.format
);
405 DUMP_M(resource
, info
, src
.resource
);
406 DUMP_M(uint
, info
, src
.level
);
407 DUMP_M_ADDR(box
, info
, src
.box
);
408 DUMP_M(format
, info
, src
.format
);
410 DUMP_M(hex
, info
, mask
);
411 DUMP_M(uint
, info
, filter
);
412 DUMP_M(uint
, info
, scissor_enable
);
413 DUMP_M_ADDR(scissor_state
, info
, scissor
);
414 DUMP_M(uint
, info
, render_condition_enable
);
416 if (info
->render_condition_enable
)
417 dd_dump_render_condition(dstate
, f
);
421 dd_dump_generate_mipmap(struct dd_draw_state
*dstate
, FILE *f
)
423 fprintf(f
, "%s:\n", __func__
+8);
428 dd_dump_get_query_result_resource(struct call_get_query_result_resource
*info
, FILE *f
)
430 fprintf(f
, "%s:\n", __func__
+ 8);
431 DUMP_M(query_type
, info
, query_type
);
432 DUMP_M(uint
, info
, wait
);
433 DUMP_M(query_value_type
, info
, result_type
);
434 DUMP_M(int, info
, index
);
435 DUMP_M(resource
, info
, resource
);
436 DUMP_M(uint
, info
, offset
);
440 dd_dump_flush_resource(struct dd_draw_state
*dstate
, struct pipe_resource
*res
,
443 fprintf(f
, "%s:\n", __func__
+8);
448 dd_dump_clear(struct dd_draw_state
*dstate
, struct call_clear
*info
, FILE *f
)
450 fprintf(f
, "%s:\n", __func__
+8);
451 DUMP_M(uint
, info
, buffers
);
452 DUMP_M_ADDR(color_union
, info
, color
);
453 DUMP_M(double, info
, depth
);
454 DUMP_M(hex
, info
, stencil
);
458 dd_dump_clear_buffer(struct dd_draw_state
*dstate
, struct call_clear_buffer
*info
,
462 const char *value
= (const char*)info
->clear_value
;
464 fprintf(f
, "%s:\n", __func__
+8);
465 DUMP_M(resource
, info
, res
);
466 DUMP_M(uint
, info
, offset
);
467 DUMP_M(uint
, info
, size
);
468 DUMP_M(uint
, info
, clear_value_size
);
470 fprintf(f
, " clear_value:");
471 for (i
= 0; i
< info
->clear_value_size
; i
++)
472 fprintf(f
, " %02x", value
[i
]);
477 dd_dump_clear_texture(struct dd_draw_state
*dstate
, FILE *f
)
479 fprintf(f
, "%s:\n", __func__
+8);
484 dd_dump_clear_render_target(struct dd_draw_state
*dstate
, FILE *f
)
486 fprintf(f
, "%s:\n", __func__
+8);
491 dd_dump_clear_depth_stencil(struct dd_draw_state
*dstate
, FILE *f
)
493 fprintf(f
, "%s:\n", __func__
+8);
498 dd_dump_driver_state(struct dd_context
*dctx
, FILE *f
, unsigned flags
)
500 if (dctx
->pipe
->dump_debug_state
) {
501 fprintf(f
,"\n\n**************************************************"
502 "***************************\n");
503 fprintf(f
, "Driver-specific state:\n\n");
504 dctx
->pipe
->dump_debug_state(dctx
->pipe
, f
, flags
);
509 dd_dump_call(FILE *f
, struct dd_draw_state
*state
, struct dd_call
*call
)
511 switch (call
->type
) {
513 dd_dump_draw_vbo(state
, &call
->info
.draw_vbo
.draw
, f
);
515 case CALL_LAUNCH_GRID
:
516 dd_dump_launch_grid(state
, &call
->info
.launch_grid
, f
);
518 case CALL_RESOURCE_COPY_REGION
:
519 dd_dump_resource_copy_region(state
,
520 &call
->info
.resource_copy_region
, f
);
523 dd_dump_blit(state
, &call
->info
.blit
, f
);
525 case CALL_FLUSH_RESOURCE
:
526 dd_dump_flush_resource(state
, call
->info
.flush_resource
, f
);
529 dd_dump_clear(state
, &call
->info
.clear
, f
);
531 case CALL_CLEAR_BUFFER
:
532 dd_dump_clear_buffer(state
, &call
->info
.clear_buffer
, f
);
534 case CALL_CLEAR_TEXTURE
:
535 dd_dump_clear_texture(state
, f
);
537 case CALL_CLEAR_RENDER_TARGET
:
538 dd_dump_clear_render_target(state
, f
);
540 case CALL_CLEAR_DEPTH_STENCIL
:
541 dd_dump_clear_depth_stencil(state
, f
);
543 case CALL_GENERATE_MIPMAP
:
544 dd_dump_generate_mipmap(state
, f
);
546 case CALL_GET_QUERY_RESULT_RESOURCE
:
547 dd_dump_get_query_result_resource(&call
->info
.get_query_result_resource
, f
);
553 dd_write_report(struct dd_context
*dctx
, struct dd_call
*call
, unsigned flags
,
556 FILE *f
= dd_get_file_stream(dd_screen(dctx
->base
.screen
),
557 dctx
->draw_state
.apitrace_call_number
);
562 dd_dump_call(f
, &dctx
->draw_state
, call
);
563 dd_dump_driver_state(dctx
, f
, flags
);
565 fprintf(f
,"\n\n**************************************************"
566 "***************************\n");
567 fprintf(f
, "Context Log:\n\n");
568 u_log_new_page_print(&dctx
->log
, f
);
572 dd_close_file_stream(f
);
576 dd_kill_process(void)
579 fprintf(stderr
, "dd: Aborting the process...\n");
586 dd_flush_and_check_hang(struct dd_context
*dctx
,
587 struct pipe_fence_handle
**flush_fence
,
588 unsigned flush_flags
)
590 struct pipe_fence_handle
*fence
= NULL
;
591 struct pipe_context
*pipe
= dctx
->pipe
;
592 struct pipe_screen
*screen
= pipe
->screen
;
593 uint64_t timeout_ms
= dd_screen(dctx
->base
.screen
)->timeout_ms
;
596 assert(timeout_ms
> 0);
598 pipe
->flush(pipe
, &fence
, flush_flags
);
600 screen
->fence_reference(screen
, flush_fence
, fence
);
604 idle
= screen
->fence_finish(screen
, pipe
, fence
, timeout_ms
* 1000000);
605 screen
->fence_reference(screen
, &fence
, NULL
);
607 fprintf(stderr
, "dd: GPU hang detected!\n");
612 dd_flush_and_handle_hang(struct dd_context
*dctx
,
613 struct pipe_fence_handle
**fence
, unsigned flags
,
616 if (dd_flush_and_check_hang(dctx
, fence
, flags
)) {
617 FILE *f
= dd_get_file_stream(dd_screen(dctx
->base
.screen
),
618 dctx
->draw_state
.apitrace_call_number
);
621 fprintf(f
, "dd: %s.\n", cause
);
622 dd_dump_driver_state(dctx
, f
,
623 PIPE_DUMP_DEVICE_STATUS_REGISTERS
|
624 PIPE_DUMP_CURRENT_STATES
|
625 PIPE_DUMP_CURRENT_SHADERS
|
626 PIPE_DUMP_LAST_COMMAND_BUFFER
);
628 dd_close_file_stream(f
);
631 /* Terminate the process to prevent future hangs. */
637 dd_unreference_copy_of_call(struct dd_call
*dst
)
641 pipe_so_target_reference(&dst
->info
.draw_vbo
.draw
.count_from_stream_output
, NULL
);
642 pipe_resource_reference(&dst
->info
.draw_vbo
.indirect
.buffer
, NULL
);
643 pipe_resource_reference(&dst
->info
.draw_vbo
.indirect
.indirect_draw_count
, NULL
);
644 if (dst
->info
.draw_vbo
.draw
.index_size
&&
645 !dst
->info
.draw_vbo
.draw
.has_user_indices
)
646 pipe_resource_reference(&dst
->info
.draw_vbo
.draw
.index
.resource
, NULL
);
648 dst
->info
.draw_vbo
.draw
.index
.user
= NULL
;
650 case CALL_LAUNCH_GRID
:
651 pipe_resource_reference(&dst
->info
.launch_grid
.indirect
, NULL
);
653 case CALL_RESOURCE_COPY_REGION
:
654 pipe_resource_reference(&dst
->info
.resource_copy_region
.dst
, NULL
);
655 pipe_resource_reference(&dst
->info
.resource_copy_region
.src
, NULL
);
658 pipe_resource_reference(&dst
->info
.blit
.dst
.resource
, NULL
);
659 pipe_resource_reference(&dst
->info
.blit
.src
.resource
, NULL
);
661 case CALL_FLUSH_RESOURCE
:
662 pipe_resource_reference(&dst
->info
.flush_resource
, NULL
);
666 case CALL_CLEAR_BUFFER
:
667 pipe_resource_reference(&dst
->info
.clear_buffer
.res
, NULL
);
669 case CALL_CLEAR_TEXTURE
:
671 case CALL_CLEAR_RENDER_TARGET
:
673 case CALL_CLEAR_DEPTH_STENCIL
:
675 case CALL_GENERATE_MIPMAP
:
676 pipe_resource_reference(&dst
->info
.generate_mipmap
.res
, NULL
);
678 case CALL_GET_QUERY_RESULT_RESOURCE
:
679 pipe_resource_reference(&dst
->info
.get_query_result_resource
.resource
, NULL
);
685 dd_copy_call(struct dd_call
*dst
, struct dd_call
*src
)
687 dst
->type
= src
->type
;
691 pipe_so_target_reference(&dst
->info
.draw_vbo
.draw
.count_from_stream_output
,
692 src
->info
.draw_vbo
.draw
.count_from_stream_output
);
693 pipe_resource_reference(&dst
->info
.draw_vbo
.indirect
.buffer
,
694 src
->info
.draw_vbo
.indirect
.buffer
);
695 pipe_resource_reference(&dst
->info
.draw_vbo
.indirect
.indirect_draw_count
,
696 src
->info
.draw_vbo
.indirect
.indirect_draw_count
);
698 if (dst
->info
.draw_vbo
.draw
.index_size
&&
699 !dst
->info
.draw_vbo
.draw
.has_user_indices
)
700 pipe_resource_reference(&dst
->info
.draw_vbo
.draw
.index
.resource
, NULL
);
702 dst
->info
.draw_vbo
.draw
.index
.user
= NULL
;
704 if (src
->info
.draw_vbo
.draw
.index_size
&&
705 !src
->info
.draw_vbo
.draw
.has_user_indices
) {
706 pipe_resource_reference(&dst
->info
.draw_vbo
.draw
.index
.resource
,
707 src
->info
.draw_vbo
.draw
.index
.resource
);
710 dst
->info
.draw_vbo
= src
->info
.draw_vbo
;
711 if (!src
->info
.draw_vbo
.draw
.indirect
)
712 dst
->info
.draw_vbo
.draw
.indirect
= NULL
;
714 dst
->info
.draw_vbo
.draw
.indirect
= &dst
->info
.draw_vbo
.indirect
;
716 case CALL_LAUNCH_GRID
:
717 pipe_resource_reference(&dst
->info
.launch_grid
.indirect
,
718 src
->info
.launch_grid
.indirect
);
719 dst
->info
.launch_grid
= src
->info
.launch_grid
;
721 case CALL_RESOURCE_COPY_REGION
:
722 pipe_resource_reference(&dst
->info
.resource_copy_region
.dst
,
723 src
->info
.resource_copy_region
.dst
);
724 pipe_resource_reference(&dst
->info
.resource_copy_region
.src
,
725 src
->info
.resource_copy_region
.src
);
726 dst
->info
.resource_copy_region
= src
->info
.resource_copy_region
;
729 pipe_resource_reference(&dst
->info
.blit
.dst
.resource
,
730 src
->info
.blit
.dst
.resource
);
731 pipe_resource_reference(&dst
->info
.blit
.src
.resource
,
732 src
->info
.blit
.src
.resource
);
733 dst
->info
.blit
= src
->info
.blit
;
735 case CALL_FLUSH_RESOURCE
:
736 pipe_resource_reference(&dst
->info
.flush_resource
,
737 src
->info
.flush_resource
);
740 dst
->info
.clear
= src
->info
.clear
;
742 case CALL_CLEAR_BUFFER
:
743 pipe_resource_reference(&dst
->info
.clear_buffer
.res
,
744 src
->info
.clear_buffer
.res
);
745 dst
->info
.clear_buffer
= src
->info
.clear_buffer
;
747 case CALL_CLEAR_TEXTURE
:
749 case CALL_CLEAR_RENDER_TARGET
:
751 case CALL_CLEAR_DEPTH_STENCIL
:
753 case CALL_GENERATE_MIPMAP
:
754 pipe_resource_reference(&dst
->info
.generate_mipmap
.res
,
755 src
->info
.generate_mipmap
.res
);
756 dst
->info
.generate_mipmap
= src
->info
.generate_mipmap
;
758 case CALL_GET_QUERY_RESULT_RESOURCE
:
759 pipe_resource_reference(&dst
->info
.get_query_result_resource
.resource
,
760 src
->info
.get_query_result_resource
.resource
);
761 dst
->info
.get_query_result_resource
= src
->info
.get_query_result_resource
;
762 dst
->info
.get_query_result_resource
.query
= NULL
;
768 dd_init_copy_of_draw_state(struct dd_draw_state_copy
*state
)
772 /* Just clear pointers to gallium objects. Don't clear the whole structure,
773 * because it would kill performance with its size of 130 KB.
775 memset(state
->base
.vertex_buffers
, 0,
776 sizeof(state
->base
.vertex_buffers
));
777 memset(state
->base
.so_targets
, 0,
778 sizeof(state
->base
.so_targets
));
779 memset(state
->base
.constant_buffers
, 0,
780 sizeof(state
->base
.constant_buffers
));
781 memset(state
->base
.sampler_views
, 0,
782 sizeof(state
->base
.sampler_views
));
783 memset(state
->base
.shader_images
, 0,
784 sizeof(state
->base
.shader_images
));
785 memset(state
->base
.shader_buffers
, 0,
786 sizeof(state
->base
.shader_buffers
));
787 memset(&state
->base
.framebuffer_state
, 0,
788 sizeof(state
->base
.framebuffer_state
));
790 memset(state
->shaders
, 0, sizeof(state
->shaders
));
792 state
->base
.render_cond
.query
= &state
->render_cond
;
794 for (i
= 0; i
< PIPE_SHADER_TYPES
; i
++) {
795 state
->base
.shaders
[i
] = &state
->shaders
[i
];
796 for (j
= 0; j
< PIPE_MAX_SAMPLERS
; j
++)
797 state
->base
.sampler_states
[i
][j
] = &state
->sampler_states
[i
][j
];
800 state
->base
.velems
= &state
->velems
;
801 state
->base
.rs
= &state
->rs
;
802 state
->base
.dsa
= &state
->dsa
;
803 state
->base
.blend
= &state
->blend
;
807 dd_unreference_copy_of_draw_state(struct dd_draw_state_copy
*state
)
809 struct dd_draw_state
*dst
= &state
->base
;
812 for (i
= 0; i
< ARRAY_SIZE(dst
->vertex_buffers
); i
++)
813 pipe_vertex_buffer_unreference(&dst
->vertex_buffers
[i
]);
814 for (i
= 0; i
< ARRAY_SIZE(dst
->so_targets
); i
++)
815 pipe_so_target_reference(&dst
->so_targets
[i
], NULL
);
817 for (i
= 0; i
< PIPE_SHADER_TYPES
; i
++) {
819 tgsi_free_tokens(dst
->shaders
[i
]->state
.shader
.tokens
);
821 for (j
= 0; j
< PIPE_MAX_CONSTANT_BUFFERS
; j
++)
822 pipe_resource_reference(&dst
->constant_buffers
[i
][j
].buffer
, NULL
);
823 for (j
= 0; j
< PIPE_MAX_SAMPLERS
; j
++)
824 pipe_sampler_view_reference(&dst
->sampler_views
[i
][j
], NULL
);
825 for (j
= 0; j
< PIPE_MAX_SHADER_IMAGES
; j
++)
826 pipe_resource_reference(&dst
->shader_images
[i
][j
].resource
, NULL
);
827 for (j
= 0; j
< PIPE_MAX_SHADER_BUFFERS
; j
++)
828 pipe_resource_reference(&dst
->shader_buffers
[i
][j
].buffer
, NULL
);
831 util_unreference_framebuffer_state(&dst
->framebuffer_state
);
835 dd_copy_draw_state(struct dd_draw_state
*dst
, struct dd_draw_state
*src
)
839 if (src
->render_cond
.query
) {
840 *dst
->render_cond
.query
= *src
->render_cond
.query
;
841 dst
->render_cond
.condition
= src
->render_cond
.condition
;
842 dst
->render_cond
.mode
= src
->render_cond
.mode
;
844 dst
->render_cond
.query
= NULL
;
847 for (i
= 0; i
< ARRAY_SIZE(src
->vertex_buffers
); i
++) {
848 pipe_vertex_buffer_reference(&dst
->vertex_buffers
[i
],
849 &src
->vertex_buffers
[i
]);
852 dst
->num_so_targets
= src
->num_so_targets
;
853 for (i
= 0; i
< ARRAY_SIZE(src
->so_targets
); i
++)
854 pipe_so_target_reference(&dst
->so_targets
[i
], src
->so_targets
[i
]);
855 memcpy(dst
->so_offsets
, src
->so_offsets
, sizeof(src
->so_offsets
));
857 for (i
= 0; i
< PIPE_SHADER_TYPES
; i
++) {
858 if (!src
->shaders
[i
]) {
859 dst
->shaders
[i
] = NULL
;
863 if (src
->shaders
[i
]) {
864 dst
->shaders
[i
]->state
.shader
= src
->shaders
[i
]->state
.shader
;
865 if (src
->shaders
[i
]->state
.shader
.tokens
) {
866 dst
->shaders
[i
]->state
.shader
.tokens
=
867 tgsi_dup_tokens(src
->shaders
[i
]->state
.shader
.tokens
);
869 dst
->shaders
[i
]->state
.shader
.ir
.nir
= NULL
;
872 dst
->shaders
[i
] = NULL
;
875 for (j
= 0; j
< PIPE_MAX_CONSTANT_BUFFERS
; j
++) {
876 pipe_resource_reference(&dst
->constant_buffers
[i
][j
].buffer
,
877 src
->constant_buffers
[i
][j
].buffer
);
878 memcpy(&dst
->constant_buffers
[i
][j
], &src
->constant_buffers
[i
][j
],
879 sizeof(src
->constant_buffers
[i
][j
]));
882 for (j
= 0; j
< PIPE_MAX_SAMPLERS
; j
++) {
883 pipe_sampler_view_reference(&dst
->sampler_views
[i
][j
],
884 src
->sampler_views
[i
][j
]);
885 if (src
->sampler_states
[i
][j
])
886 dst
->sampler_states
[i
][j
]->state
.sampler
=
887 src
->sampler_states
[i
][j
]->state
.sampler
;
889 dst
->sampler_states
[i
][j
] = NULL
;
892 for (j
= 0; j
< PIPE_MAX_SHADER_IMAGES
; j
++) {
893 pipe_resource_reference(&dst
->shader_images
[i
][j
].resource
,
894 src
->shader_images
[i
][j
].resource
);
895 memcpy(&dst
->shader_images
[i
][j
], &src
->shader_images
[i
][j
],
896 sizeof(src
->shader_images
[i
][j
]));
899 for (j
= 0; j
< PIPE_MAX_SHADER_BUFFERS
; j
++) {
900 pipe_resource_reference(&dst
->shader_buffers
[i
][j
].buffer
,
901 src
->shader_buffers
[i
][j
].buffer
);
902 memcpy(&dst
->shader_buffers
[i
][j
], &src
->shader_buffers
[i
][j
],
903 sizeof(src
->shader_buffers
[i
][j
]));
908 dst
->velems
->state
.velems
= src
->velems
->state
.velems
;
913 dst
->rs
->state
.rs
= src
->rs
->state
.rs
;
918 dst
->dsa
->state
.dsa
= src
->dsa
->state
.dsa
;
923 dst
->blend
->state
.blend
= src
->blend
->state
.blend
;
927 dst
->blend_color
= src
->blend_color
;
928 dst
->stencil_ref
= src
->stencil_ref
;
929 dst
->sample_mask
= src
->sample_mask
;
930 dst
->min_samples
= src
->min_samples
;
931 dst
->clip_state
= src
->clip_state
;
932 util_copy_framebuffer_state(&dst
->framebuffer_state
, &src
->framebuffer_state
);
933 memcpy(dst
->scissors
, src
->scissors
, sizeof(src
->scissors
));
934 memcpy(dst
->viewports
, src
->viewports
, sizeof(src
->viewports
));
935 memcpy(dst
->tess_default_levels
, src
->tess_default_levels
,
936 sizeof(src
->tess_default_levels
));
937 dst
->apitrace_call_number
= src
->apitrace_call_number
;
941 dd_free_record(struct dd_draw_record
**record
)
943 struct dd_draw_record
*next
= (*record
)->next
;
945 u_log_page_destroy((*record
)->log_page
);
946 dd_unreference_copy_of_call(&(*record
)->call
);
947 dd_unreference_copy_of_draw_state(&(*record
)->draw_state
);
953 dd_dump_record(struct dd_context
*dctx
, struct dd_draw_record
*record
,
954 uint32_t hw_sequence_no
, int64_t now
)
956 FILE *f
= dd_get_file_stream(dd_screen(dctx
->base
.screen
),
957 record
->draw_state
.base
.apitrace_call_number
);
961 fprintf(f
, "Draw call sequence # = %u\n", record
->sequence_no
);
962 fprintf(f
, "HW reached sequence # = %u\n", hw_sequence_no
);
963 fprintf(f
, "Elapsed time = %"PRIi64
" ms\n\n",
964 (now
- record
->timestamp
) / 1000);
966 dd_dump_call(f
, &record
->draw_state
.base
, &record
->call
);
968 fprintf(f
,"\n\n**************************************************"
969 "***************************\n");
970 fprintf(f
, "Context Log:\n\n");
971 u_log_page_print(record
->log_page
, f
);
973 dctx
->pipe
->dump_debug_state(dctx
->pipe
, f
,
974 PIPE_DUMP_DEVICE_STATUS_REGISTERS
);
980 dd_thread_pipelined_hang_detect(void *input
)
982 struct dd_context
*dctx
= (struct dd_context
*)input
;
983 struct dd_screen
*dscreen
= dd_screen(dctx
->base
.screen
);
985 mtx_lock(&dctx
->mutex
);
987 while (!dctx
->kill_thread
) {
988 struct dd_draw_record
**record
= &dctx
->records
;
990 /* Loop over all records. */
994 /* If the fence has been signalled, release the record and all older
997 if (*dctx
->mapped_fence
>= (*record
)->sequence_no
) {
999 dd_free_record(record
);
1003 /* The fence hasn't been signalled. Check the timeout. */
1004 now
= os_time_get();
1005 if (os_time_timeout((*record
)->timestamp
,
1006 (*record
)->timestamp
+ dscreen
->timeout_ms
* 1000,
1008 fprintf(stderr
, "GPU hang detected.\n");
1010 /* Get the oldest unsignalled draw call. */
1011 while ((*record
)->next
&&
1012 *dctx
->mapped_fence
< (*record
)->next
->sequence_no
)
1013 record
= &(*record
)->next
;
1015 dd_dump_record(dctx
, *record
, *dctx
->mapped_fence
, now
);
1019 record
= &(*record
)->next
;
1022 /* Unlock and sleep before starting all over again. */
1023 mtx_unlock(&dctx
->mutex
);
1024 os_time_sleep(10000); /* 10 ms */
1025 mtx_lock(&dctx
->mutex
);
1028 /* Thread termination. */
1029 while (dctx
->records
)
1030 dd_free_record(&dctx
->records
);
1032 mtx_unlock(&dctx
->mutex
);
1037 dd_pipelined_process_draw(struct dd_context
*dctx
, struct dd_call
*call
)
1039 struct pipe_context
*pipe
= dctx
->pipe
;
1040 struct dd_draw_record
*record
;
1042 /* Make a record of the draw call. */
1043 record
= MALLOC_STRUCT(dd_draw_record
);
1047 /* Update the fence with the GPU.
1049 * radeonsi/clear_buffer waits in the command processor until shaders are
1050 * idle before writing to memory. That's a necessary condition for isolating
1053 dctx
->sequence_no
++;
1054 pipe
->clear_buffer(pipe
, dctx
->fence
, 0, 4, &dctx
->sequence_no
, 4);
1056 /* Initialize the record. */
1057 record
->timestamp
= os_time_get();
1058 record
->sequence_no
= dctx
->sequence_no
;
1059 record
->log_page
= u_log_new_page(&dctx
->log
);
1061 memset(&record
->call
, 0, sizeof(record
->call
));
1062 dd_copy_call(&record
->call
, call
);
1064 dd_init_copy_of_draw_state(&record
->draw_state
);
1065 dd_copy_draw_state(&record
->draw_state
.base
, &dctx
->draw_state
);
1067 /* Add the record to the list. */
1068 mtx_lock(&dctx
->mutex
);
1069 record
->next
= dctx
->records
;
1070 dctx
->records
= record
;
1071 mtx_unlock(&dctx
->mutex
);
1075 dd_context_flush(struct pipe_context
*_pipe
,
1076 struct pipe_fence_handle
**fence
, unsigned flags
)
1078 struct dd_context
*dctx
= dd_context(_pipe
);
1079 struct pipe_context
*pipe
= dctx
->pipe
;
1081 switch (dd_screen(dctx
->base
.screen
)->mode
) {
1082 case DD_DETECT_HANGS
:
1083 dd_flush_and_handle_hang(dctx
, fence
, flags
,
1084 "GPU hang detected in pipe->flush()");
1086 case DD_DETECT_HANGS_PIPELINED
: /* nothing to do here */
1087 case DD_DUMP_ALL_CALLS
:
1088 case DD_DUMP_APITRACE_CALL
:
1089 pipe
->flush(pipe
, fence
, flags
);
1097 dd_before_draw(struct dd_context
*dctx
)
1099 struct dd_screen
*dscreen
= dd_screen(dctx
->base
.screen
);
1101 if (dscreen
->mode
== DD_DETECT_HANGS
&&
1102 !dscreen
->no_flush
&&
1103 dctx
->num_draw_calls
>= dscreen
->skip_count
)
1104 dd_flush_and_handle_hang(dctx
, NULL
, 0,
1105 "GPU hang most likely caused by internal "
1110 dd_after_draw(struct dd_context
*dctx
, struct dd_call
*call
)
1112 struct dd_screen
*dscreen
= dd_screen(dctx
->base
.screen
);
1113 struct pipe_context
*pipe
= dctx
->pipe
;
1115 if (dctx
->num_draw_calls
>= dscreen
->skip_count
) {
1116 switch (dscreen
->mode
) {
1117 case DD_DETECT_HANGS
:
1118 if (!dscreen
->no_flush
&&
1119 dd_flush_and_check_hang(dctx
, NULL
, 0)) {
1120 dd_write_report(dctx
, call
,
1121 PIPE_DUMP_DEVICE_STATUS_REGISTERS
|
1122 PIPE_DUMP_CURRENT_STATES
|
1123 PIPE_DUMP_CURRENT_SHADERS
|
1124 PIPE_DUMP_LAST_COMMAND_BUFFER
,
1127 /* Terminate the process to prevent future hangs. */
1130 u_log_page_destroy(u_log_new_page(&dctx
->log
));
1133 case DD_DETECT_HANGS_PIPELINED
:
1134 dd_pipelined_process_draw(dctx
, call
);
1136 case DD_DUMP_ALL_CALLS
:
1137 if (!dscreen
->no_flush
)
1138 pipe
->flush(pipe
, NULL
, 0);
1139 dd_write_report(dctx
, call
,
1140 PIPE_DUMP_CURRENT_STATES
|
1141 PIPE_DUMP_CURRENT_SHADERS
|
1142 PIPE_DUMP_LAST_COMMAND_BUFFER
,
1145 case DD_DUMP_APITRACE_CALL
:
1146 if (dscreen
->apitrace_dump_call
==
1147 dctx
->draw_state
.apitrace_call_number
) {
1148 dd_write_report(dctx
, call
,
1149 PIPE_DUMP_CURRENT_STATES
|
1150 PIPE_DUMP_CURRENT_SHADERS
,
1152 /* No need to continue. */
1155 u_log_page_destroy(u_log_new_page(&dctx
->log
));
1163 ++dctx
->num_draw_calls
;
1164 if (dscreen
->skip_count
&& dctx
->num_draw_calls
% 10000 == 0)
1165 fprintf(stderr
, "Gallium debugger reached %u draw calls.\n",
1166 dctx
->num_draw_calls
);
1170 dd_context_draw_vbo(struct pipe_context
*_pipe
,
1171 const struct pipe_draw_info
*info
)
1173 struct dd_context
*dctx
= dd_context(_pipe
);
1174 struct pipe_context
*pipe
= dctx
->pipe
;
1175 struct dd_call call
;
1177 call
.type
= CALL_DRAW_VBO
;
1178 call
.info
.draw_vbo
.draw
= *info
;
1179 if (info
->indirect
) {
1180 call
.info
.draw_vbo
.indirect
= *info
->indirect
;
1181 call
.info
.draw_vbo
.draw
.indirect
= &call
.info
.draw_vbo
.indirect
;
1183 memset(&call
.info
.draw_vbo
.indirect
, 0, sizeof(*info
->indirect
));
1186 dd_before_draw(dctx
);
1187 pipe
->draw_vbo(pipe
, info
);
1188 dd_after_draw(dctx
, &call
);
1192 dd_context_launch_grid(struct pipe_context
*_pipe
,
1193 const struct pipe_grid_info
*info
)
1195 struct dd_context
*dctx
= dd_context(_pipe
);
1196 struct pipe_context
*pipe
= dctx
->pipe
;
1197 struct dd_call call
;
1199 call
.type
= CALL_LAUNCH_GRID
;
1200 call
.info
.launch_grid
= *info
;
1202 dd_before_draw(dctx
);
1203 pipe
->launch_grid(pipe
, info
);
1204 dd_after_draw(dctx
, &call
);
1208 dd_context_resource_copy_region(struct pipe_context
*_pipe
,
1209 struct pipe_resource
*dst
, unsigned dst_level
,
1210 unsigned dstx
, unsigned dsty
, unsigned dstz
,
1211 struct pipe_resource
*src
, unsigned src_level
,
1212 const struct pipe_box
*src_box
)
1214 struct dd_context
*dctx
= dd_context(_pipe
);
1215 struct pipe_context
*pipe
= dctx
->pipe
;
1216 struct dd_call call
;
1218 call
.type
= CALL_RESOURCE_COPY_REGION
;
1219 call
.info
.resource_copy_region
.dst
= dst
;
1220 call
.info
.resource_copy_region
.dst_level
= dst_level
;
1221 call
.info
.resource_copy_region
.dstx
= dstx
;
1222 call
.info
.resource_copy_region
.dsty
= dsty
;
1223 call
.info
.resource_copy_region
.dstz
= dstz
;
1224 call
.info
.resource_copy_region
.src
= src
;
1225 call
.info
.resource_copy_region
.src_level
= src_level
;
1226 call
.info
.resource_copy_region
.src_box
= *src_box
;
1228 dd_before_draw(dctx
);
1229 pipe
->resource_copy_region(pipe
,
1230 dst
, dst_level
, dstx
, dsty
, dstz
,
1231 src
, src_level
, src_box
);
1232 dd_after_draw(dctx
, &call
);
1236 dd_context_blit(struct pipe_context
*_pipe
, const struct pipe_blit_info
*info
)
1238 struct dd_context
*dctx
= dd_context(_pipe
);
1239 struct pipe_context
*pipe
= dctx
->pipe
;
1240 struct dd_call call
;
1242 call
.type
= CALL_BLIT
;
1243 call
.info
.blit
= *info
;
1245 dd_before_draw(dctx
);
1246 pipe
->blit(pipe
, info
);
1247 dd_after_draw(dctx
, &call
);
1251 dd_context_generate_mipmap(struct pipe_context
*_pipe
,
1252 struct pipe_resource
*res
,
1253 enum pipe_format format
,
1254 unsigned base_level
,
1255 unsigned last_level
,
1256 unsigned first_layer
,
1257 unsigned last_layer
)
1259 struct dd_context
*dctx
= dd_context(_pipe
);
1260 struct pipe_context
*pipe
= dctx
->pipe
;
1261 struct dd_call call
;
1264 call
.type
= CALL_GENERATE_MIPMAP
;
1265 call
.info
.generate_mipmap
.res
= res
;
1266 call
.info
.generate_mipmap
.format
= format
;
1267 call
.info
.generate_mipmap
.base_level
= base_level
;
1268 call
.info
.generate_mipmap
.last_level
= last_level
;
1269 call
.info
.generate_mipmap
.first_layer
= first_layer
;
1270 call
.info
.generate_mipmap
.last_layer
= last_layer
;
1272 dd_before_draw(dctx
);
1273 result
= pipe
->generate_mipmap(pipe
, res
, format
, base_level
, last_level
,
1274 first_layer
, last_layer
);
1275 dd_after_draw(dctx
, &call
);
1280 dd_context_get_query_result_resource(struct pipe_context
*_pipe
,
1281 struct pipe_query
*query
,
1283 enum pipe_query_value_type result_type
,
1285 struct pipe_resource
*resource
,
1288 struct dd_context
*dctx
= dd_context(_pipe
);
1289 struct dd_query
*dquery
= dd_query(query
);
1290 struct pipe_context
*pipe
= dctx
->pipe
;
1291 struct dd_call call
;
1293 call
.type
= CALL_GET_QUERY_RESULT_RESOURCE
;
1294 call
.info
.get_query_result_resource
.query
= query
;
1295 call
.info
.get_query_result_resource
.wait
= wait
;
1296 call
.info
.get_query_result_resource
.result_type
= result_type
;
1297 call
.info
.get_query_result_resource
.index
= index
;
1298 call
.info
.get_query_result_resource
.resource
= resource
;
1299 call
.info
.get_query_result_resource
.offset
= offset
;
1301 /* In pipelined mode, the query may be deleted by the time we need to
1304 call
.info
.get_query_result_resource
.query_type
= dquery
->type
;
1306 dd_before_draw(dctx
);
1307 pipe
->get_query_result_resource(pipe
, dquery
->query
, wait
,
1308 result_type
, index
, resource
, offset
);
1309 dd_after_draw(dctx
, &call
);
1313 dd_context_flush_resource(struct pipe_context
*_pipe
,
1314 struct pipe_resource
*resource
)
1316 struct dd_context
*dctx
= dd_context(_pipe
);
1317 struct pipe_context
*pipe
= dctx
->pipe
;
1318 struct dd_call call
;
1320 call
.type
= CALL_FLUSH_RESOURCE
;
1321 call
.info
.flush_resource
= resource
;
1323 dd_before_draw(dctx
);
1324 pipe
->flush_resource(pipe
, resource
);
1325 dd_after_draw(dctx
, &call
);
1329 dd_context_clear(struct pipe_context
*_pipe
, unsigned buffers
,
1330 const union pipe_color_union
*color
, double depth
,
1333 struct dd_context
*dctx
= dd_context(_pipe
);
1334 struct pipe_context
*pipe
= dctx
->pipe
;
1335 struct dd_call call
;
1337 call
.type
= CALL_CLEAR
;
1338 call
.info
.clear
.buffers
= buffers
;
1339 call
.info
.clear
.color
= *color
;
1340 call
.info
.clear
.depth
= depth
;
1341 call
.info
.clear
.stencil
= stencil
;
1343 dd_before_draw(dctx
);
1344 pipe
->clear(pipe
, buffers
, color
, depth
, stencil
);
1345 dd_after_draw(dctx
, &call
);
1349 dd_context_clear_render_target(struct pipe_context
*_pipe
,
1350 struct pipe_surface
*dst
,
1351 const union pipe_color_union
*color
,
1352 unsigned dstx
, unsigned dsty
,
1353 unsigned width
, unsigned height
,
1354 bool render_condition_enabled
)
1356 struct dd_context
*dctx
= dd_context(_pipe
);
1357 struct pipe_context
*pipe
= dctx
->pipe
;
1358 struct dd_call call
;
1360 call
.type
= CALL_CLEAR_RENDER_TARGET
;
1362 dd_before_draw(dctx
);
1363 pipe
->clear_render_target(pipe
, dst
, color
, dstx
, dsty
, width
, height
,
1364 render_condition_enabled
);
1365 dd_after_draw(dctx
, &call
);
1369 dd_context_clear_depth_stencil(struct pipe_context
*_pipe
,
1370 struct pipe_surface
*dst
, unsigned clear_flags
,
1371 double depth
, unsigned stencil
, unsigned dstx
,
1372 unsigned dsty
, unsigned width
, unsigned height
,
1373 bool render_condition_enabled
)
1375 struct dd_context
*dctx
= dd_context(_pipe
);
1376 struct pipe_context
*pipe
= dctx
->pipe
;
1377 struct dd_call call
;
1379 call
.type
= CALL_CLEAR_DEPTH_STENCIL
;
1381 dd_before_draw(dctx
);
1382 pipe
->clear_depth_stencil(pipe
, dst
, clear_flags
, depth
, stencil
,
1383 dstx
, dsty
, width
, height
,
1384 render_condition_enabled
);
1385 dd_after_draw(dctx
, &call
);
1389 dd_context_clear_buffer(struct pipe_context
*_pipe
, struct pipe_resource
*res
,
1390 unsigned offset
, unsigned size
,
1391 const void *clear_value
, int clear_value_size
)
1393 struct dd_context
*dctx
= dd_context(_pipe
);
1394 struct pipe_context
*pipe
= dctx
->pipe
;
1395 struct dd_call call
;
1397 call
.type
= CALL_CLEAR_BUFFER
;
1398 call
.info
.clear_buffer
.res
= res
;
1399 call
.info
.clear_buffer
.offset
= offset
;
1400 call
.info
.clear_buffer
.size
= size
;
1401 call
.info
.clear_buffer
.clear_value
= clear_value
;
1402 call
.info
.clear_buffer
.clear_value_size
= clear_value_size
;
1404 dd_before_draw(dctx
);
1405 pipe
->clear_buffer(pipe
, res
, offset
, size
, clear_value
, clear_value_size
);
1406 dd_after_draw(dctx
, &call
);
1410 dd_context_clear_texture(struct pipe_context
*_pipe
,
1411 struct pipe_resource
*res
,
1413 const struct pipe_box
*box
,
1416 struct dd_context
*dctx
= dd_context(_pipe
);
1417 struct pipe_context
*pipe
= dctx
->pipe
;
1418 struct dd_call call
;
1420 call
.type
= CALL_CLEAR_TEXTURE
;
1422 dd_before_draw(dctx
);
1423 pipe
->clear_texture(pipe
, res
, level
, box
, data
);
1424 dd_after_draw(dctx
, &call
);
1428 dd_init_draw_functions(struct dd_context
*dctx
)
1432 CTX_INIT(launch_grid
);
1433 CTX_INIT(resource_copy_region
);
1436 CTX_INIT(clear_render_target
);
1437 CTX_INIT(clear_depth_stencil
);
1438 CTX_INIT(clear_buffer
);
1439 CTX_INIT(clear_texture
);
1440 CTX_INIT(flush_resource
);
1441 CTX_INIT(generate_mipmap
);
1442 CTX_INIT(get_query_result_resource
);