1 /**************************************************************************
3 * Copyright 2015 Advanced Micro Devices, Inc.
4 * Copyright 2008 VMware, Inc.
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the "Software"),
9 * to deal in the Software without restriction, including without limitation
10 * on the rights to use, copy, modify, merge, publish, distribute, sub
11 * license, and/or sell copies of the Software, and to permit persons to whom
12 * the Software is furnished to do so, subject to the following conditions:
14 * The above copyright notice and this permission notice (including the next
15 * paragraph) shall be included in all copies or substantial portions of the
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
30 #include "util/u_dump.h"
31 #include "util/u_format.h"
32 #include "util/u_framebuffer.h"
33 #include "util/u_helpers.h"
34 #include "util/u_inlines.h"
35 #include "util/u_memory.h"
36 #include "tgsi/tgsi_parse.h"
37 #include "tgsi/tgsi_scan.h"
38 #include "util/os_time.h"
40 #include "pipe/p_config.h"
44 dd_write_header(FILE *f
, struct pipe_screen
*screen
, unsigned apitrace_call_number
)
47 if (os_get_command_line(cmd_line
, sizeof(cmd_line
)))
48 fprintf(f
, "Command: %s\n", cmd_line
);
49 fprintf(f
, "Driver vendor: %s\n", screen
->get_vendor(screen
));
50 fprintf(f
, "Device vendor: %s\n", screen
->get_device_vendor(screen
));
51 fprintf(f
, "Device name: %s\n\n", screen
->get_name(screen
));
53 if (apitrace_call_number
)
54 fprintf(f
, "Last apitrace call: %u\n\n", apitrace_call_number
);
58 dd_get_file_stream(struct dd_screen
*dscreen
, unsigned apitrace_call_number
)
60 struct pipe_screen
*screen
= dscreen
->screen
;
62 FILE *f
= dd_get_debug_file(dscreen
->verbose
);
66 dd_write_header(f
, screen
, apitrace_call_number
);
71 dd_dump_dmesg(FILE *f
)
75 FILE *p
= popen("dmesg | tail -n60", "r");
80 fprintf(f
, "\nLast 60 lines of dmesg:\n\n");
81 while (fgets(line
, sizeof(line
), p
))
89 dd_num_active_viewports(struct dd_draw_state
*dstate
)
91 struct tgsi_shader_info info
;
92 const struct tgsi_token
*tokens
;
94 if (dstate
->shaders
[PIPE_SHADER_GEOMETRY
])
95 tokens
= dstate
->shaders
[PIPE_SHADER_GEOMETRY
]->state
.shader
.tokens
;
96 else if (dstate
->shaders
[PIPE_SHADER_TESS_EVAL
])
97 tokens
= dstate
->shaders
[PIPE_SHADER_TESS_EVAL
]->state
.shader
.tokens
;
98 else if (dstate
->shaders
[PIPE_SHADER_VERTEX
])
99 tokens
= dstate
->shaders
[PIPE_SHADER_VERTEX
]->state
.shader
.tokens
;
104 tgsi_scan_shader(tokens
, &info
);
105 if (info
.writes_viewport_index
)
106 return PIPE_MAX_VIEWPORTS
;
112 #define COLOR_RESET "\033[0m"
113 #define COLOR_SHADER "\033[1;32m"
114 #define COLOR_STATE "\033[1;33m"
116 #define DUMP(name, var) do { \
117 fprintf(f, COLOR_STATE #name ": " COLOR_RESET); \
118 util_dump_##name(f, var); \
122 #define DUMP_I(name, var, i) do { \
123 fprintf(f, COLOR_STATE #name " %i: " COLOR_RESET, i); \
124 util_dump_##name(f, var); \
128 #define DUMP_M(name, var, member) do { \
129 fprintf(f, " " #member ": "); \
130 util_dump_##name(f, (var)->member); \
134 #define DUMP_M_ADDR(name, var, member) do { \
135 fprintf(f, " " #member ": "); \
136 util_dump_##name(f, &(var)->member); \
140 #define PRINT_NAMED(type, name, value) \
142 fprintf(f, COLOR_STATE "%s" COLOR_RESET " = ", name); \
143 util_dump_##type(f, value); \
148 util_dump_uint(FILE *f
, unsigned i
)
154 util_dump_int(FILE *f
, int i
)
160 util_dump_hex(FILE *f
, unsigned i
)
162 fprintf(f
, "0x%x", i
);
166 util_dump_double(FILE *f
, double d
)
172 util_dump_format(FILE *f
, enum pipe_format format
)
174 fprintf(f
, "%s", util_format_name(format
));
178 util_dump_color_union(FILE *f
, const union pipe_color_union
*color
)
180 fprintf(f
, "{f = {%f, %f, %f, %f}, ui = {%u, %u, %u, %u}",
181 color
->f
[0], color
->f
[1], color
->f
[2], color
->f
[3],
182 color
->ui
[0], color
->ui
[1], color
->ui
[2], color
->ui
[3]);
186 dd_dump_render_condition(struct dd_draw_state
*dstate
, FILE *f
)
188 if (dstate
->render_cond
.query
) {
189 fprintf(f
, "render condition:\n");
190 DUMP_M(query_type
, &dstate
->render_cond
, query
->type
);
191 DUMP_M(uint
, &dstate
->render_cond
, condition
);
192 DUMP_M(uint
, &dstate
->render_cond
, mode
);
198 dd_dump_shader(struct dd_draw_state
*dstate
, enum pipe_shader_type sh
, FILE *f
)
201 const char *shader_str
[PIPE_SHADER_TYPES
];
203 shader_str
[PIPE_SHADER_VERTEX
] = "VERTEX";
204 shader_str
[PIPE_SHADER_TESS_CTRL
] = "TESS_CTRL";
205 shader_str
[PIPE_SHADER_TESS_EVAL
] = "TESS_EVAL";
206 shader_str
[PIPE_SHADER_GEOMETRY
] = "GEOMETRY";
207 shader_str
[PIPE_SHADER_FRAGMENT
] = "FRAGMENT";
208 shader_str
[PIPE_SHADER_COMPUTE
] = "COMPUTE";
210 if (sh
== PIPE_SHADER_TESS_CTRL
&&
211 !dstate
->shaders
[PIPE_SHADER_TESS_CTRL
] &&
212 dstate
->shaders
[PIPE_SHADER_TESS_EVAL
])
213 fprintf(f
, "tess_state: {default_outer_level = {%f, %f, %f, %f}, "
214 "default_inner_level = {%f, %f}}\n",
215 dstate
->tess_default_levels
[0],
216 dstate
->tess_default_levels
[1],
217 dstate
->tess_default_levels
[2],
218 dstate
->tess_default_levels
[3],
219 dstate
->tess_default_levels
[4],
220 dstate
->tess_default_levels
[5]);
222 if (sh
== PIPE_SHADER_FRAGMENT
)
224 unsigned num_viewports
= dd_num_active_viewports(dstate
);
226 if (dstate
->rs
->state
.rs
.clip_plane_enable
)
227 DUMP(clip_state
, &dstate
->clip_state
);
229 for (i
= 0; i
< num_viewports
; i
++)
230 DUMP_I(viewport_state
, &dstate
->viewports
[i
], i
);
232 if (dstate
->rs
->state
.rs
.scissor
)
233 for (i
= 0; i
< num_viewports
; i
++)
234 DUMP_I(scissor_state
, &dstate
->scissors
[i
], i
);
236 DUMP(rasterizer_state
, &dstate
->rs
->state
.rs
);
238 if (dstate
->rs
->state
.rs
.poly_stipple_enable
)
239 DUMP(poly_stipple
, &dstate
->polygon_stipple
);
243 if (!dstate
->shaders
[sh
])
246 fprintf(f
, COLOR_SHADER
"begin shader: %s" COLOR_RESET
"\n", shader_str
[sh
]);
247 DUMP(shader_state
, &dstate
->shaders
[sh
]->state
.shader
);
249 for (i
= 0; i
< PIPE_MAX_CONSTANT_BUFFERS
; i
++)
250 if (dstate
->constant_buffers
[sh
][i
].buffer
||
251 dstate
->constant_buffers
[sh
][i
].user_buffer
) {
252 DUMP_I(constant_buffer
, &dstate
->constant_buffers
[sh
][i
], i
);
253 if (dstate
->constant_buffers
[sh
][i
].buffer
)
254 DUMP_M(resource
, &dstate
->constant_buffers
[sh
][i
], buffer
);
257 for (i
= 0; i
< PIPE_MAX_SAMPLERS
; i
++)
258 if (dstate
->sampler_states
[sh
][i
])
259 DUMP_I(sampler_state
, &dstate
->sampler_states
[sh
][i
]->state
.sampler
, i
);
261 for (i
= 0; i
< PIPE_MAX_SAMPLERS
; i
++)
262 if (dstate
->sampler_views
[sh
][i
]) {
263 DUMP_I(sampler_view
, dstate
->sampler_views
[sh
][i
], i
);
264 DUMP_M(resource
, dstate
->sampler_views
[sh
][i
], texture
);
267 for (i
= 0; i
< PIPE_MAX_SHADER_IMAGES
; i
++)
268 if (dstate
->shader_images
[sh
][i
].resource
) {
269 DUMP_I(image_view
, &dstate
->shader_images
[sh
][i
], i
);
270 if (dstate
->shader_images
[sh
][i
].resource
)
271 DUMP_M(resource
, &dstate
->shader_images
[sh
][i
], resource
);
274 for (i
= 0; i
< PIPE_MAX_SHADER_BUFFERS
; i
++)
275 if (dstate
->shader_buffers
[sh
][i
].buffer
) {
276 DUMP_I(shader_buffer
, &dstate
->shader_buffers
[sh
][i
], i
);
277 if (dstate
->shader_buffers
[sh
][i
].buffer
)
278 DUMP_M(resource
, &dstate
->shader_buffers
[sh
][i
], buffer
);
281 fprintf(f
, COLOR_SHADER
"end shader: %s" COLOR_RESET
"\n\n", shader_str
[sh
]);
285 dd_dump_draw_vbo(struct dd_draw_state
*dstate
, struct pipe_draw_info
*info
, FILE *f
)
289 DUMP(draw_info
, info
);
290 if (info
->count_from_stream_output
)
291 DUMP_M(stream_output_target
, info
,
292 count_from_stream_output
);
293 if (info
->indirect
) {
294 DUMP_M(resource
, info
, indirect
->buffer
);
295 if (info
->indirect
->indirect_draw_count
)
296 DUMP_M(resource
, info
, indirect
->indirect_draw_count
);
301 /* TODO: dump active queries */
303 dd_dump_render_condition(dstate
, f
);
305 for (i
= 0; i
< PIPE_MAX_ATTRIBS
; i
++)
306 if (dstate
->vertex_buffers
[i
].buffer
.resource
) {
307 DUMP_I(vertex_buffer
, &dstate
->vertex_buffers
[i
], i
);
308 if (!dstate
->vertex_buffers
[i
].is_user_buffer
)
309 DUMP_M(resource
, &dstate
->vertex_buffers
[i
], buffer
.resource
);
312 if (dstate
->velems
) {
313 PRINT_NAMED(uint
, "num vertex elements",
314 dstate
->velems
->state
.velems
.count
);
315 for (i
= 0; i
< dstate
->velems
->state
.velems
.count
; i
++) {
317 DUMP_I(vertex_element
, &dstate
->velems
->state
.velems
.velems
[i
], i
);
321 PRINT_NAMED(uint
, "num stream output targets", dstate
->num_so_targets
);
322 for (i
= 0; i
< dstate
->num_so_targets
; i
++)
323 if (dstate
->so_targets
[i
]) {
324 DUMP_I(stream_output_target
, dstate
->so_targets
[i
], i
);
325 DUMP_M(resource
, dstate
->so_targets
[i
], buffer
);
326 fprintf(f
, " offset = %i\n", dstate
->so_offsets
[i
]);
330 for (sh
= 0; sh
< PIPE_SHADER_TYPES
; sh
++) {
331 if (sh
== PIPE_SHADER_COMPUTE
)
334 dd_dump_shader(dstate
, sh
, f
);
338 DUMP(depth_stencil_alpha_state
, &dstate
->dsa
->state
.dsa
);
339 DUMP(stencil_ref
, &dstate
->stencil_ref
);
342 DUMP(blend_state
, &dstate
->blend
->state
.blend
);
343 DUMP(blend_color
, &dstate
->blend_color
);
345 PRINT_NAMED(uint
, "min_samples", dstate
->min_samples
);
346 PRINT_NAMED(hex
, "sample_mask", dstate
->sample_mask
);
349 DUMP(framebuffer_state
, &dstate
->framebuffer_state
);
350 for (i
= 0; i
< dstate
->framebuffer_state
.nr_cbufs
; i
++)
351 if (dstate
->framebuffer_state
.cbufs
[i
]) {
352 fprintf(f
, " " COLOR_STATE
"cbufs[%i]:" COLOR_RESET
"\n ", i
);
353 DUMP(surface
, dstate
->framebuffer_state
.cbufs
[i
]);
355 DUMP(resource
, dstate
->framebuffer_state
.cbufs
[i
]->texture
);
357 if (dstate
->framebuffer_state
.zsbuf
) {
358 fprintf(f
, " " COLOR_STATE
"zsbuf:" COLOR_RESET
"\n ");
359 DUMP(surface
, dstate
->framebuffer_state
.zsbuf
);
361 DUMP(resource
, dstate
->framebuffer_state
.zsbuf
->texture
);
367 dd_dump_launch_grid(struct dd_draw_state
*dstate
, struct pipe_grid_info
*info
, FILE *f
)
369 fprintf(f
, "%s:\n", __func__
+8);
370 DUMP(grid_info
, info
);
373 dd_dump_shader(dstate
, PIPE_SHADER_COMPUTE
, f
);
378 dd_dump_resource_copy_region(struct dd_draw_state
*dstate
,
379 struct call_resource_copy_region
*info
,
382 fprintf(f
, "%s:\n", __func__
+8);
383 DUMP_M(resource
, info
, dst
);
384 DUMP_M(uint
, info
, dst_level
);
385 DUMP_M(uint
, info
, dstx
);
386 DUMP_M(uint
, info
, dsty
);
387 DUMP_M(uint
, info
, dstz
);
388 DUMP_M(resource
, info
, src
);
389 DUMP_M(uint
, info
, src_level
);
390 DUMP_M_ADDR(box
, info
, src_box
);
394 dd_dump_blit(struct dd_draw_state
*dstate
, struct pipe_blit_info
*info
, FILE *f
)
396 fprintf(f
, "%s:\n", __func__
+8);
397 DUMP_M(resource
, info
, dst
.resource
);
398 DUMP_M(uint
, info
, dst
.level
);
399 DUMP_M_ADDR(box
, info
, dst
.box
);
400 DUMP_M(format
, info
, dst
.format
);
402 DUMP_M(resource
, info
, src
.resource
);
403 DUMP_M(uint
, info
, src
.level
);
404 DUMP_M_ADDR(box
, info
, src
.box
);
405 DUMP_M(format
, info
, src
.format
);
407 DUMP_M(hex
, info
, mask
);
408 DUMP_M(uint
, info
, filter
);
409 DUMP_M(uint
, info
, scissor_enable
);
410 DUMP_M_ADDR(scissor_state
, info
, scissor
);
411 DUMP_M(uint
, info
, render_condition_enable
);
413 if (info
->render_condition_enable
)
414 dd_dump_render_condition(dstate
, f
);
418 dd_dump_generate_mipmap(struct dd_draw_state
*dstate
, FILE *f
)
420 fprintf(f
, "%s:\n", __func__
+8);
425 dd_dump_get_query_result_resource(struct call_get_query_result_resource
*info
, FILE *f
)
427 fprintf(f
, "%s:\n", __func__
+ 8);
428 DUMP_M(query_type
, info
, query_type
);
429 DUMP_M(uint
, info
, wait
);
430 DUMP_M(query_value_type
, info
, result_type
);
431 DUMP_M(int, info
, index
);
432 DUMP_M(resource
, info
, resource
);
433 DUMP_M(uint
, info
, offset
);
437 dd_dump_flush_resource(struct dd_draw_state
*dstate
, struct pipe_resource
*res
,
440 fprintf(f
, "%s:\n", __func__
+8);
445 dd_dump_clear(struct dd_draw_state
*dstate
, struct call_clear
*info
, FILE *f
)
447 fprintf(f
, "%s:\n", __func__
+8);
448 DUMP_M(uint
, info
, buffers
);
449 DUMP_M_ADDR(color_union
, info
, color
);
450 DUMP_M(double, info
, depth
);
451 DUMP_M(hex
, info
, stencil
);
455 dd_dump_clear_buffer(struct dd_draw_state
*dstate
, struct call_clear_buffer
*info
,
459 const char *value
= (const char*)info
->clear_value
;
461 fprintf(f
, "%s:\n", __func__
+8);
462 DUMP_M(resource
, info
, res
);
463 DUMP_M(uint
, info
, offset
);
464 DUMP_M(uint
, info
, size
);
465 DUMP_M(uint
, info
, clear_value_size
);
467 fprintf(f
, " clear_value:");
468 for (i
= 0; i
< info
->clear_value_size
; i
++)
469 fprintf(f
, " %02x", value
[i
]);
474 dd_dump_transfer_map(struct call_transfer_map
*info
, FILE *f
)
476 fprintf(f
, "%s:\n", __func__
+8);
477 DUMP_M_ADDR(transfer
, info
, transfer
);
478 DUMP_M(ptr
, info
, transfer_ptr
);
479 DUMP_M(ptr
, info
, ptr
);
483 dd_dump_transfer_flush_region(struct call_transfer_flush_region
*info
, FILE *f
)
485 fprintf(f
, "%s:\n", __func__
+8);
486 DUMP_M_ADDR(transfer
, info
, transfer
);
487 DUMP_M(ptr
, info
, transfer_ptr
);
488 DUMP_M_ADDR(box
, info
, box
);
492 dd_dump_transfer_unmap(struct call_transfer_unmap
*info
, FILE *f
)
494 fprintf(f
, "%s:\n", __func__
+8);
495 DUMP_M_ADDR(transfer
, info
, transfer
);
496 DUMP_M(ptr
, info
, transfer_ptr
);
500 dd_dump_buffer_subdata(struct call_buffer_subdata
*info
, FILE *f
)
502 fprintf(f
, "%s:\n", __func__
+8);
503 DUMP_M(resource
, info
, resource
);
504 DUMP_M(transfer_usage
, info
, usage
);
505 DUMP_M(uint
, info
, offset
);
506 DUMP_M(uint
, info
, size
);
507 DUMP_M(ptr
, info
, data
);
511 dd_dump_texture_subdata(struct call_texture_subdata
*info
, FILE *f
)
513 fprintf(f
, "%s:\n", __func__
+8);
514 DUMP_M(resource
, info
, resource
);
515 DUMP_M(uint
, info
, level
);
516 DUMP_M(transfer_usage
, info
, usage
);
517 DUMP_M_ADDR(box
, info
, box
);
518 DUMP_M(ptr
, info
, data
);
519 DUMP_M(uint
, info
, stride
);
520 DUMP_M(uint
, info
, layer_stride
);
524 dd_dump_clear_texture(struct dd_draw_state
*dstate
, FILE *f
)
526 fprintf(f
, "%s:\n", __func__
+8);
531 dd_dump_clear_render_target(struct dd_draw_state
*dstate
, FILE *f
)
533 fprintf(f
, "%s:\n", __func__
+8);
538 dd_dump_clear_depth_stencil(struct dd_draw_state
*dstate
, FILE *f
)
540 fprintf(f
, "%s:\n", __func__
+8);
545 dd_dump_driver_state(struct dd_context
*dctx
, FILE *f
, unsigned flags
)
547 if (dctx
->pipe
->dump_debug_state
) {
548 fprintf(f
,"\n\n**************************************************"
549 "***************************\n");
550 fprintf(f
, "Driver-specific state:\n\n");
551 dctx
->pipe
->dump_debug_state(dctx
->pipe
, f
, flags
);
556 dd_dump_call(FILE *f
, struct dd_draw_state
*state
, struct dd_call
*call
)
558 switch (call
->type
) {
560 dd_dump_draw_vbo(state
, &call
->info
.draw_vbo
.draw
, f
);
562 case CALL_LAUNCH_GRID
:
563 dd_dump_launch_grid(state
, &call
->info
.launch_grid
, f
);
565 case CALL_RESOURCE_COPY_REGION
:
566 dd_dump_resource_copy_region(state
,
567 &call
->info
.resource_copy_region
, f
);
570 dd_dump_blit(state
, &call
->info
.blit
, f
);
572 case CALL_FLUSH_RESOURCE
:
573 dd_dump_flush_resource(state
, call
->info
.flush_resource
, f
);
576 dd_dump_clear(state
, &call
->info
.clear
, f
);
578 case CALL_CLEAR_BUFFER
:
579 dd_dump_clear_buffer(state
, &call
->info
.clear_buffer
, f
);
581 case CALL_CLEAR_TEXTURE
:
582 dd_dump_clear_texture(state
, f
);
584 case CALL_CLEAR_RENDER_TARGET
:
585 dd_dump_clear_render_target(state
, f
);
587 case CALL_CLEAR_DEPTH_STENCIL
:
588 dd_dump_clear_depth_stencil(state
, f
);
590 case CALL_GENERATE_MIPMAP
:
591 dd_dump_generate_mipmap(state
, f
);
593 case CALL_GET_QUERY_RESULT_RESOURCE
:
594 dd_dump_get_query_result_resource(&call
->info
.get_query_result_resource
, f
);
596 case CALL_TRANSFER_MAP
:
597 dd_dump_transfer_map(&call
->info
.transfer_map
, f
);
599 case CALL_TRANSFER_FLUSH_REGION
:
600 dd_dump_transfer_flush_region(&call
->info
.transfer_flush_region
, f
);
602 case CALL_TRANSFER_UNMAP
:
603 dd_dump_transfer_unmap(&call
->info
.transfer_unmap
, f
);
605 case CALL_BUFFER_SUBDATA
:
606 dd_dump_buffer_subdata(&call
->info
.buffer_subdata
, f
);
608 case CALL_TEXTURE_SUBDATA
:
609 dd_dump_texture_subdata(&call
->info
.texture_subdata
, f
);
615 dd_kill_process(void)
620 fprintf(stderr
, "dd: Aborting the process...\n");
627 dd_unreference_copy_of_call(struct dd_call
*dst
)
631 pipe_so_target_reference(&dst
->info
.draw_vbo
.draw
.count_from_stream_output
, NULL
);
632 pipe_resource_reference(&dst
->info
.draw_vbo
.indirect
.buffer
, NULL
);
633 pipe_resource_reference(&dst
->info
.draw_vbo
.indirect
.indirect_draw_count
, NULL
);
634 if (dst
->info
.draw_vbo
.draw
.index_size
&&
635 !dst
->info
.draw_vbo
.draw
.has_user_indices
)
636 pipe_resource_reference(&dst
->info
.draw_vbo
.draw
.index
.resource
, NULL
);
638 dst
->info
.draw_vbo
.draw
.index
.user
= NULL
;
640 case CALL_LAUNCH_GRID
:
641 pipe_resource_reference(&dst
->info
.launch_grid
.indirect
, NULL
);
643 case CALL_RESOURCE_COPY_REGION
:
644 pipe_resource_reference(&dst
->info
.resource_copy_region
.dst
, NULL
);
645 pipe_resource_reference(&dst
->info
.resource_copy_region
.src
, NULL
);
648 pipe_resource_reference(&dst
->info
.blit
.dst
.resource
, NULL
);
649 pipe_resource_reference(&dst
->info
.blit
.src
.resource
, NULL
);
651 case CALL_FLUSH_RESOURCE
:
652 pipe_resource_reference(&dst
->info
.flush_resource
, NULL
);
656 case CALL_CLEAR_BUFFER
:
657 pipe_resource_reference(&dst
->info
.clear_buffer
.res
, NULL
);
659 case CALL_CLEAR_TEXTURE
:
661 case CALL_CLEAR_RENDER_TARGET
:
663 case CALL_CLEAR_DEPTH_STENCIL
:
665 case CALL_GENERATE_MIPMAP
:
666 pipe_resource_reference(&dst
->info
.generate_mipmap
.res
, NULL
);
668 case CALL_GET_QUERY_RESULT_RESOURCE
:
669 pipe_resource_reference(&dst
->info
.get_query_result_resource
.resource
, NULL
);
671 case CALL_TRANSFER_MAP
:
672 pipe_resource_reference(&dst
->info
.transfer_map
.transfer
.resource
, NULL
);
674 case CALL_TRANSFER_FLUSH_REGION
:
675 pipe_resource_reference(&dst
->info
.transfer_flush_region
.transfer
.resource
, NULL
);
677 case CALL_TRANSFER_UNMAP
:
678 pipe_resource_reference(&dst
->info
.transfer_unmap
.transfer
.resource
, NULL
);
680 case CALL_BUFFER_SUBDATA
:
681 pipe_resource_reference(&dst
->info
.buffer_subdata
.resource
, NULL
);
683 case CALL_TEXTURE_SUBDATA
:
684 pipe_resource_reference(&dst
->info
.texture_subdata
.resource
, NULL
);
690 dd_init_copy_of_draw_state(struct dd_draw_state_copy
*state
)
694 /* Just clear pointers to gallium objects. Don't clear the whole structure,
695 * because it would kill performance with its size of 130 KB.
697 memset(state
->base
.vertex_buffers
, 0,
698 sizeof(state
->base
.vertex_buffers
));
699 memset(state
->base
.so_targets
, 0,
700 sizeof(state
->base
.so_targets
));
701 memset(state
->base
.constant_buffers
, 0,
702 sizeof(state
->base
.constant_buffers
));
703 memset(state
->base
.sampler_views
, 0,
704 sizeof(state
->base
.sampler_views
));
705 memset(state
->base
.shader_images
, 0,
706 sizeof(state
->base
.shader_images
));
707 memset(state
->base
.shader_buffers
, 0,
708 sizeof(state
->base
.shader_buffers
));
709 memset(&state
->base
.framebuffer_state
, 0,
710 sizeof(state
->base
.framebuffer_state
));
712 memset(state
->shaders
, 0, sizeof(state
->shaders
));
714 state
->base
.render_cond
.query
= &state
->render_cond
;
716 for (i
= 0; i
< PIPE_SHADER_TYPES
; i
++) {
717 state
->base
.shaders
[i
] = &state
->shaders
[i
];
718 for (j
= 0; j
< PIPE_MAX_SAMPLERS
; j
++)
719 state
->base
.sampler_states
[i
][j
] = &state
->sampler_states
[i
][j
];
722 state
->base
.velems
= &state
->velems
;
723 state
->base
.rs
= &state
->rs
;
724 state
->base
.dsa
= &state
->dsa
;
725 state
->base
.blend
= &state
->blend
;
729 dd_unreference_copy_of_draw_state(struct dd_draw_state_copy
*state
)
731 struct dd_draw_state
*dst
= &state
->base
;
734 for (i
= 0; i
< ARRAY_SIZE(dst
->vertex_buffers
); i
++)
735 pipe_vertex_buffer_unreference(&dst
->vertex_buffers
[i
]);
736 for (i
= 0; i
< ARRAY_SIZE(dst
->so_targets
); i
++)
737 pipe_so_target_reference(&dst
->so_targets
[i
], NULL
);
739 for (i
= 0; i
< PIPE_SHADER_TYPES
; i
++) {
741 tgsi_free_tokens(dst
->shaders
[i
]->state
.shader
.tokens
);
743 for (j
= 0; j
< PIPE_MAX_CONSTANT_BUFFERS
; j
++)
744 pipe_resource_reference(&dst
->constant_buffers
[i
][j
].buffer
, NULL
);
745 for (j
= 0; j
< PIPE_MAX_SAMPLERS
; j
++)
746 pipe_sampler_view_reference(&dst
->sampler_views
[i
][j
], NULL
);
747 for (j
= 0; j
< PIPE_MAX_SHADER_IMAGES
; j
++)
748 pipe_resource_reference(&dst
->shader_images
[i
][j
].resource
, NULL
);
749 for (j
= 0; j
< PIPE_MAX_SHADER_BUFFERS
; j
++)
750 pipe_resource_reference(&dst
->shader_buffers
[i
][j
].buffer
, NULL
);
753 util_unreference_framebuffer_state(&dst
->framebuffer_state
);
757 dd_copy_draw_state(struct dd_draw_state
*dst
, struct dd_draw_state
*src
)
761 if (src
->render_cond
.query
) {
762 *dst
->render_cond
.query
= *src
->render_cond
.query
;
763 dst
->render_cond
.condition
= src
->render_cond
.condition
;
764 dst
->render_cond
.mode
= src
->render_cond
.mode
;
766 dst
->render_cond
.query
= NULL
;
769 for (i
= 0; i
< ARRAY_SIZE(src
->vertex_buffers
); i
++) {
770 pipe_vertex_buffer_reference(&dst
->vertex_buffers
[i
],
771 &src
->vertex_buffers
[i
]);
774 dst
->num_so_targets
= src
->num_so_targets
;
775 for (i
= 0; i
< src
->num_so_targets
; i
++)
776 pipe_so_target_reference(&dst
->so_targets
[i
], src
->so_targets
[i
]);
777 memcpy(dst
->so_offsets
, src
->so_offsets
, sizeof(src
->so_offsets
));
779 for (i
= 0; i
< PIPE_SHADER_TYPES
; i
++) {
780 if (!src
->shaders
[i
]) {
781 dst
->shaders
[i
] = NULL
;
785 if (src
->shaders
[i
]) {
786 dst
->shaders
[i
]->state
.shader
= src
->shaders
[i
]->state
.shader
;
787 if (src
->shaders
[i
]->state
.shader
.tokens
) {
788 dst
->shaders
[i
]->state
.shader
.tokens
=
789 tgsi_dup_tokens(src
->shaders
[i
]->state
.shader
.tokens
);
791 dst
->shaders
[i
]->state
.shader
.ir
.nir
= NULL
;
794 dst
->shaders
[i
] = NULL
;
797 for (j
= 0; j
< PIPE_MAX_CONSTANT_BUFFERS
; j
++) {
798 pipe_resource_reference(&dst
->constant_buffers
[i
][j
].buffer
,
799 src
->constant_buffers
[i
][j
].buffer
);
800 memcpy(&dst
->constant_buffers
[i
][j
], &src
->constant_buffers
[i
][j
],
801 sizeof(src
->constant_buffers
[i
][j
]));
804 for (j
= 0; j
< PIPE_MAX_SAMPLERS
; j
++) {
805 pipe_sampler_view_reference(&dst
->sampler_views
[i
][j
],
806 src
->sampler_views
[i
][j
]);
807 if (src
->sampler_states
[i
][j
])
808 dst
->sampler_states
[i
][j
]->state
.sampler
=
809 src
->sampler_states
[i
][j
]->state
.sampler
;
811 dst
->sampler_states
[i
][j
] = NULL
;
814 for (j
= 0; j
< PIPE_MAX_SHADER_IMAGES
; j
++) {
815 pipe_resource_reference(&dst
->shader_images
[i
][j
].resource
,
816 src
->shader_images
[i
][j
].resource
);
817 memcpy(&dst
->shader_images
[i
][j
], &src
->shader_images
[i
][j
],
818 sizeof(src
->shader_images
[i
][j
]));
821 for (j
= 0; j
< PIPE_MAX_SHADER_BUFFERS
; j
++) {
822 pipe_resource_reference(&dst
->shader_buffers
[i
][j
].buffer
,
823 src
->shader_buffers
[i
][j
].buffer
);
824 memcpy(&dst
->shader_buffers
[i
][j
], &src
->shader_buffers
[i
][j
],
825 sizeof(src
->shader_buffers
[i
][j
]));
830 dst
->velems
->state
.velems
= src
->velems
->state
.velems
;
835 dst
->rs
->state
.rs
= src
->rs
->state
.rs
;
840 dst
->dsa
->state
.dsa
= src
->dsa
->state
.dsa
;
845 dst
->blend
->state
.blend
= src
->blend
->state
.blend
;
849 dst
->blend_color
= src
->blend_color
;
850 dst
->stencil_ref
= src
->stencil_ref
;
851 dst
->sample_mask
= src
->sample_mask
;
852 dst
->min_samples
= src
->min_samples
;
853 dst
->clip_state
= src
->clip_state
;
854 util_copy_framebuffer_state(&dst
->framebuffer_state
, &src
->framebuffer_state
);
855 memcpy(dst
->scissors
, src
->scissors
, sizeof(src
->scissors
));
856 memcpy(dst
->viewports
, src
->viewports
, sizeof(src
->viewports
));
857 memcpy(dst
->tess_default_levels
, src
->tess_default_levels
,
858 sizeof(src
->tess_default_levels
));
859 dst
->apitrace_call_number
= src
->apitrace_call_number
;
863 dd_free_record(struct pipe_screen
*screen
, struct dd_draw_record
*record
)
865 u_log_page_destroy(record
->log_page
);
866 dd_unreference_copy_of_call(&record
->call
);
867 dd_unreference_copy_of_draw_state(&record
->draw_state
);
868 screen
->fence_reference(screen
, &record
->prev_bottom_of_pipe
, NULL
);
869 screen
->fence_reference(screen
, &record
->top_of_pipe
, NULL
);
870 screen
->fence_reference(screen
, &record
->bottom_of_pipe
, NULL
);
871 util_queue_fence_destroy(&record
->driver_finished
);
876 dd_write_record(FILE *f
, struct dd_draw_record
*record
)
878 PRINT_NAMED(ptr
, "pipe", record
->dctx
->pipe
);
879 PRINT_NAMED(ns
, "time before (API call)", record
->time_before
);
880 PRINT_NAMED(ns
, "time after (driver done)", record
->time_after
);
883 dd_dump_call(f
, &record
->draw_state
.base
, &record
->call
);
885 if (record
->log_page
) {
886 fprintf(f
,"\n\n**************************************************"
887 "***************************\n");
888 fprintf(f
, "Context Log:\n\n");
889 u_log_page_print(record
->log_page
, f
);
894 dd_maybe_dump_record(struct dd_screen
*dscreen
, struct dd_draw_record
*record
)
896 if (dscreen
->dump_mode
== DD_DUMP_ONLY_HANGS
||
897 (dscreen
->dump_mode
== DD_DUMP_APITRACE_CALL
&&
898 dscreen
->apitrace_dump_call
!= record
->draw_state
.base
.apitrace_call_number
))
902 dd_get_debug_filename_and_mkdir(name
, sizeof(name
), dscreen
->verbose
);
903 FILE *f
= fopen(name
, "w");
905 fprintf(stderr
, "dd: failed to open %s\n", name
);
909 dd_write_header(f
, dscreen
->screen
, record
->draw_state
.base
.apitrace_call_number
);
910 dd_write_record(f
, record
);
916 dd_fence_state(struct pipe_screen
*screen
, struct pipe_fence_handle
*fence
,
922 bool ok
= screen
->fence_finish(screen
, NULL
, fence
, 0);
924 if (not_reached
&& !ok
)
927 return ok
? "YES" : "NO ";
931 dd_report_hang(struct dd_context
*dctx
)
933 struct dd_screen
*dscreen
= dd_screen(dctx
->base
.screen
);
934 struct pipe_screen
*screen
= dscreen
->screen
;
935 bool encountered_hang
= false;
936 bool stop_output
= false;
937 unsigned num_later
= 0;
939 fprintf(stderr
, "GPU hang detected, collecting information...\n\n");
941 fprintf(stderr
, "Draw # driver prev BOP TOP BOP dump file\n"
942 "-------------------------------------------------------------\n");
944 list_for_each_entry(struct dd_draw_record
, record
, &dctx
->records
, list
) {
945 if (!encountered_hang
&&
946 screen
->fence_finish(screen
, NULL
, record
->bottom_of_pipe
, 0)) {
947 dd_maybe_dump_record(dscreen
, record
);
952 dd_maybe_dump_record(dscreen
, record
);
957 bool driver
= util_queue_fence_is_signalled(&record
->driver_finished
);
958 bool top_not_reached
= false;
959 const char *prev_bop
= dd_fence_state(screen
, record
->prev_bottom_of_pipe
, NULL
);
960 const char *top
= dd_fence_state(screen
, record
->top_of_pipe
, &top_not_reached
);
961 const char *bop
= dd_fence_state(screen
, record
->bottom_of_pipe
, NULL
);
963 fprintf(stderr
, "%-9u %s %s %s %s ",
964 record
->draw_call
, driver
? "YES" : "NO ", prev_bop
, top
, bop
);
967 dd_get_debug_filename_and_mkdir(name
, sizeof(name
), false);
969 FILE *f
= fopen(name
, "w");
971 fprintf(stderr
, "fopen failed\n");
973 fprintf(stderr
, "%s\n", name
);
975 dd_write_header(f
, dscreen
->screen
, record
->draw_state
.base
.apitrace_call_number
);
976 dd_write_record(f
, record
);
978 if (!encountered_hang
) {
979 dd_dump_driver_state(dctx
, f
, PIPE_DUMP_DEVICE_STATUS_REGISTERS
);
988 encountered_hang
= true;
992 fprintf(stderr
, "... and %u additional draws.\n", num_later
);
994 fprintf(stderr
, "\nDone.\n");
999 dd_thread_main(void *input
)
1001 struct dd_context
*dctx
= (struct dd_context
*)input
;
1002 struct dd_screen
*dscreen
= dd_screen(dctx
->base
.screen
);
1003 struct pipe_screen
*screen
= dscreen
->screen
;
1005 mtx_lock(&dctx
->mutex
);
1008 struct list_head records
;
1009 list_replace(&dctx
->records
, &records
);
1010 list_inithead(&dctx
->records
);
1011 dctx
->num_records
= 0;
1013 if (dctx
->api_stalled
)
1014 cnd_signal(&dctx
->cond
);
1016 if (list_empty(&records
)) {
1017 if (dctx
->kill_thread
)
1020 cnd_wait(&dctx
->cond
, &dctx
->mutex
);
1024 mtx_unlock(&dctx
->mutex
);
1026 /* Wait for the youngest draw. This means hangs can take a bit longer
1027 * to detect, but it's more efficient this way. */
1028 struct dd_draw_record
*youngest
=
1029 list_last_entry(&records
, struct dd_draw_record
, list
);
1031 if (dscreen
->timeout_ms
> 0) {
1032 uint64_t abs_timeout
= os_time_get_absolute_timeout(
1033 (uint64_t)dscreen
->timeout_ms
* 1000*1000);
1035 if (!util_queue_fence_wait_timeout(&youngest
->driver_finished
, abs_timeout
) ||
1036 !screen
->fence_finish(screen
, NULL
, youngest
->bottom_of_pipe
,
1037 (uint64_t)dscreen
->timeout_ms
* 1000*1000)) {
1038 mtx_lock(&dctx
->mutex
);
1039 list_splice(&records
, &dctx
->records
);
1040 dd_report_hang(dctx
);
1041 /* we won't actually get here */
1042 mtx_unlock(&dctx
->mutex
);
1045 util_queue_fence_wait(&youngest
->driver_finished
);
1048 list_for_each_entry_safe(struct dd_draw_record
, record
, &records
, list
) {
1049 dd_maybe_dump_record(dscreen
, record
);
1050 list_del(&record
->list
);
1051 dd_free_record(screen
, record
);
1054 mtx_lock(&dctx
->mutex
);
1056 mtx_unlock(&dctx
->mutex
);
1060 static struct dd_draw_record
*
1061 dd_create_record(struct dd_context
*dctx
)
1063 struct dd_draw_record
*record
;
1065 record
= MALLOC_STRUCT(dd_draw_record
);
1069 record
->dctx
= dctx
;
1070 record
->draw_call
= dctx
->num_draw_calls
;
1072 record
->prev_bottom_of_pipe
= NULL
;
1073 record
->top_of_pipe
= NULL
;
1074 record
->bottom_of_pipe
= NULL
;
1075 record
->log_page
= NULL
;
1076 util_queue_fence_init(&record
->driver_finished
);
1077 util_queue_fence_reset(&record
->driver_finished
);
1079 dd_init_copy_of_draw_state(&record
->draw_state
);
1080 dd_copy_draw_state(&record
->draw_state
.base
, &dctx
->draw_state
);
1086 dd_context_flush(struct pipe_context
*_pipe
,
1087 struct pipe_fence_handle
**fence
, unsigned flags
)
1089 struct dd_context
*dctx
= dd_context(_pipe
);
1090 struct pipe_context
*pipe
= dctx
->pipe
;
1092 pipe
->flush(pipe
, fence
, flags
);
1096 dd_before_draw(struct dd_context
*dctx
, struct dd_draw_record
*record
)
1098 struct dd_screen
*dscreen
= dd_screen(dctx
->base
.screen
);
1099 struct pipe_context
*pipe
= dctx
->pipe
;
1100 struct pipe_screen
*screen
= dscreen
->screen
;
1102 record
->time_before
= os_time_get_nano();
1104 if (dscreen
->timeout_ms
> 0) {
1105 if (dscreen
->flush_always
&& dctx
->num_draw_calls
>= dscreen
->skip_count
) {
1106 pipe
->flush(pipe
, &record
->prev_bottom_of_pipe
, 0);
1107 screen
->fence_reference(screen
, &record
->top_of_pipe
, record
->prev_bottom_of_pipe
);
1109 pipe
->flush(pipe
, &record
->prev_bottom_of_pipe
,
1110 PIPE_FLUSH_DEFERRED
| PIPE_FLUSH_BOTTOM_OF_PIPE
);
1111 pipe
->flush(pipe
, &record
->top_of_pipe
,
1112 PIPE_FLUSH_DEFERRED
| PIPE_FLUSH_TOP_OF_PIPE
);
1114 } else if (dscreen
->flush_always
&& dctx
->num_draw_calls
>= dscreen
->skip_count
) {
1115 pipe
->flush(pipe
, NULL
, 0);
1118 mtx_lock(&dctx
->mutex
);
1119 if (unlikely(dctx
->num_records
> 10000)) {
1120 dctx
->api_stalled
= true;
1121 /* Since this is only a heuristic to prevent the API thread from getting
1122 * too far ahead, we don't need a loop here. */
1123 cnd_wait(&dctx
->cond
, &dctx
->mutex
);
1124 dctx
->api_stalled
= false;
1127 if (list_empty(&dctx
->records
))
1128 cnd_signal(&dctx
->cond
);
1130 list_addtail(&record
->list
, &dctx
->records
);
1131 dctx
->num_records
++;
1132 mtx_unlock(&dctx
->mutex
);
1136 dd_after_draw_async(void *data
)
1138 struct dd_draw_record
*record
= (struct dd_draw_record
*)data
;
1139 struct dd_context
*dctx
= record
->dctx
;
1140 struct dd_screen
*dscreen
= dd_screen(dctx
->base
.screen
);
1142 record
->log_page
= u_log_new_page(&dctx
->log
);
1143 record
->time_after
= os_time_get_nano();
1145 util_queue_fence_signal(&record
->driver_finished
);
1147 if (dscreen
->dump_mode
== DD_DUMP_APITRACE_CALL
&&
1148 dscreen
->apitrace_dump_call
> dctx
->draw_state
.apitrace_call_number
) {
1149 dd_thread_join(dctx
);
1150 /* No need to continue. */
1156 dd_after_draw(struct dd_context
*dctx
, struct dd_draw_record
*record
)
1158 struct dd_screen
*dscreen
= dd_screen(dctx
->base
.screen
);
1159 struct pipe_context
*pipe
= dctx
->pipe
;
1161 if (dscreen
->timeout_ms
> 0) {
1162 unsigned flush_flags
;
1163 if (dscreen
->flush_always
&& dctx
->num_draw_calls
>= dscreen
->skip_count
)
1166 flush_flags
= PIPE_FLUSH_DEFERRED
| PIPE_FLUSH_BOTTOM_OF_PIPE
;
1167 pipe
->flush(pipe
, &record
->bottom_of_pipe
, flush_flags
);
1170 if (pipe
->callback
) {
1171 pipe
->callback(pipe
, dd_after_draw_async
, record
, true);
1173 dd_after_draw_async(record
);
1176 ++dctx
->num_draw_calls
;
1177 if (dscreen
->skip_count
&& dctx
->num_draw_calls
% 10000 == 0)
1178 fprintf(stderr
, "Gallium debugger reached %u draw calls.\n",
1179 dctx
->num_draw_calls
);
1183 dd_context_draw_vbo(struct pipe_context
*_pipe
,
1184 const struct pipe_draw_info
*info
)
1186 struct dd_context
*dctx
= dd_context(_pipe
);
1187 struct pipe_context
*pipe
= dctx
->pipe
;
1188 struct dd_draw_record
*record
= dd_create_record(dctx
);
1190 record
->call
.type
= CALL_DRAW_VBO
;
1191 record
->call
.info
.draw_vbo
.draw
= *info
;
1192 record
->call
.info
.draw_vbo
.draw
.count_from_stream_output
= NULL
;
1193 pipe_so_target_reference(&record
->call
.info
.draw_vbo
.draw
.count_from_stream_output
,
1194 info
->count_from_stream_output
);
1195 if (info
->index_size
&& !info
->has_user_indices
) {
1196 record
->call
.info
.draw_vbo
.draw
.index
.resource
= NULL
;
1197 pipe_resource_reference(&record
->call
.info
.draw_vbo
.draw
.index
.resource
,
1198 info
->index
.resource
);
1201 if (info
->indirect
) {
1202 record
->call
.info
.draw_vbo
.indirect
= *info
->indirect
;
1203 record
->call
.info
.draw_vbo
.draw
.indirect
= &record
->call
.info
.draw_vbo
.indirect
;
1205 record
->call
.info
.draw_vbo
.indirect
.buffer
= NULL
;
1206 pipe_resource_reference(&record
->call
.info
.draw_vbo
.indirect
.buffer
,
1207 info
->indirect
->buffer
);
1208 record
->call
.info
.draw_vbo
.indirect
.indirect_draw_count
= NULL
;
1209 pipe_resource_reference(&record
->call
.info
.draw_vbo
.indirect
.indirect_draw_count
,
1210 info
->indirect
->indirect_draw_count
);
1212 memset(&record
->call
.info
.draw_vbo
.indirect
, 0, sizeof(*info
->indirect
));
1215 dd_before_draw(dctx
, record
);
1216 pipe
->draw_vbo(pipe
, info
);
1217 dd_after_draw(dctx
, record
);
1221 dd_context_launch_grid(struct pipe_context
*_pipe
,
1222 const struct pipe_grid_info
*info
)
1224 struct dd_context
*dctx
= dd_context(_pipe
);
1225 struct pipe_context
*pipe
= dctx
->pipe
;
1226 struct dd_draw_record
*record
= dd_create_record(dctx
);
1228 record
->call
.type
= CALL_LAUNCH_GRID
;
1229 record
->call
.info
.launch_grid
= *info
;
1230 record
->call
.info
.launch_grid
.indirect
= NULL
;
1231 pipe_resource_reference(&record
->call
.info
.launch_grid
.indirect
, info
->indirect
);
1233 dd_before_draw(dctx
, record
);
1234 pipe
->launch_grid(pipe
, info
);
1235 dd_after_draw(dctx
, record
);
1239 dd_context_resource_copy_region(struct pipe_context
*_pipe
,
1240 struct pipe_resource
*dst
, unsigned dst_level
,
1241 unsigned dstx
, unsigned dsty
, unsigned dstz
,
1242 struct pipe_resource
*src
, unsigned src_level
,
1243 const struct pipe_box
*src_box
)
1245 struct dd_context
*dctx
= dd_context(_pipe
);
1246 struct pipe_context
*pipe
= dctx
->pipe
;
1247 struct dd_draw_record
*record
= dd_create_record(dctx
);
1249 record
->call
.type
= CALL_RESOURCE_COPY_REGION
;
1250 record
->call
.info
.resource_copy_region
.dst
= NULL
;
1251 pipe_resource_reference(&record
->call
.info
.resource_copy_region
.dst
, dst
);
1252 record
->call
.info
.resource_copy_region
.dst_level
= dst_level
;
1253 record
->call
.info
.resource_copy_region
.dstx
= dstx
;
1254 record
->call
.info
.resource_copy_region
.dsty
= dsty
;
1255 record
->call
.info
.resource_copy_region
.dstz
= dstz
;
1256 record
->call
.info
.resource_copy_region
.src
= NULL
;
1257 pipe_resource_reference(&record
->call
.info
.resource_copy_region
.src
, src
);
1258 record
->call
.info
.resource_copy_region
.src_level
= src_level
;
1259 record
->call
.info
.resource_copy_region
.src_box
= *src_box
;
1261 dd_before_draw(dctx
, record
);
1262 pipe
->resource_copy_region(pipe
,
1263 dst
, dst_level
, dstx
, dsty
, dstz
,
1264 src
, src_level
, src_box
);
1265 dd_after_draw(dctx
, record
);
1269 dd_context_blit(struct pipe_context
*_pipe
, const struct pipe_blit_info
*info
)
1271 struct dd_context
*dctx
= dd_context(_pipe
);
1272 struct pipe_context
*pipe
= dctx
->pipe
;
1273 struct dd_draw_record
*record
= dd_create_record(dctx
);
1275 record
->call
.type
= CALL_BLIT
;
1276 record
->call
.info
.blit
= *info
;
1277 record
->call
.info
.blit
.dst
.resource
= NULL
;
1278 pipe_resource_reference(&record
->call
.info
.blit
.dst
.resource
, info
->dst
.resource
);
1279 record
->call
.info
.blit
.src
.resource
= NULL
;
1280 pipe_resource_reference(&record
->call
.info
.blit
.src
.resource
, info
->src
.resource
);
1282 dd_before_draw(dctx
, record
);
1283 pipe
->blit(pipe
, info
);
1284 dd_after_draw(dctx
, record
);
1288 dd_context_generate_mipmap(struct pipe_context
*_pipe
,
1289 struct pipe_resource
*res
,
1290 enum pipe_format format
,
1291 unsigned base_level
,
1292 unsigned last_level
,
1293 unsigned first_layer
,
1294 unsigned last_layer
)
1296 struct dd_context
*dctx
= dd_context(_pipe
);
1297 struct pipe_context
*pipe
= dctx
->pipe
;
1298 struct dd_draw_record
*record
= dd_create_record(dctx
);
1301 record
->call
.type
= CALL_GENERATE_MIPMAP
;
1302 record
->call
.info
.generate_mipmap
.res
= NULL
;
1303 pipe_resource_reference(&record
->call
.info
.generate_mipmap
.res
, res
);
1304 record
->call
.info
.generate_mipmap
.format
= format
;
1305 record
->call
.info
.generate_mipmap
.base_level
= base_level
;
1306 record
->call
.info
.generate_mipmap
.last_level
= last_level
;
1307 record
->call
.info
.generate_mipmap
.first_layer
= first_layer
;
1308 record
->call
.info
.generate_mipmap
.last_layer
= last_layer
;
1310 dd_before_draw(dctx
, record
);
1311 result
= pipe
->generate_mipmap(pipe
, res
, format
, base_level
, last_level
,
1312 first_layer
, last_layer
);
1313 dd_after_draw(dctx
, record
);
1318 dd_context_get_query_result_resource(struct pipe_context
*_pipe
,
1319 struct pipe_query
*query
,
1321 enum pipe_query_value_type result_type
,
1323 struct pipe_resource
*resource
,
1326 struct dd_context
*dctx
= dd_context(_pipe
);
1327 struct dd_query
*dquery
= dd_query(query
);
1328 struct pipe_context
*pipe
= dctx
->pipe
;
1329 struct dd_draw_record
*record
= dd_create_record(dctx
);
1331 record
->call
.type
= CALL_GET_QUERY_RESULT_RESOURCE
;
1332 record
->call
.info
.get_query_result_resource
.query
= query
;
1333 record
->call
.info
.get_query_result_resource
.wait
= wait
;
1334 record
->call
.info
.get_query_result_resource
.result_type
= result_type
;
1335 record
->call
.info
.get_query_result_resource
.index
= index
;
1336 record
->call
.info
.get_query_result_resource
.resource
= NULL
;
1337 pipe_resource_reference(&record
->call
.info
.get_query_result_resource
.resource
,
1339 record
->call
.info
.get_query_result_resource
.offset
= offset
;
1341 /* The query may be deleted by the time we need to print it. */
1342 record
->call
.info
.get_query_result_resource
.query_type
= dquery
->type
;
1344 dd_before_draw(dctx
, record
);
1345 pipe
->get_query_result_resource(pipe
, dquery
->query
, wait
,
1346 result_type
, index
, resource
, offset
);
1347 dd_after_draw(dctx
, record
);
1351 dd_context_flush_resource(struct pipe_context
*_pipe
,
1352 struct pipe_resource
*resource
)
1354 struct dd_context
*dctx
= dd_context(_pipe
);
1355 struct pipe_context
*pipe
= dctx
->pipe
;
1356 struct dd_draw_record
*record
= dd_create_record(dctx
);
1358 record
->call
.type
= CALL_FLUSH_RESOURCE
;
1359 record
->call
.info
.flush_resource
= NULL
;
1360 pipe_resource_reference(&record
->call
.info
.flush_resource
, resource
);
1362 dd_before_draw(dctx
, record
);
1363 pipe
->flush_resource(pipe
, resource
);
1364 dd_after_draw(dctx
, record
);
1368 dd_context_clear(struct pipe_context
*_pipe
, unsigned buffers
,
1369 const union pipe_color_union
*color
, double depth
,
1372 struct dd_context
*dctx
= dd_context(_pipe
);
1373 struct pipe_context
*pipe
= dctx
->pipe
;
1374 struct dd_draw_record
*record
= dd_create_record(dctx
);
1376 record
->call
.type
= CALL_CLEAR
;
1377 record
->call
.info
.clear
.buffers
= buffers
;
1378 record
->call
.info
.clear
.color
= *color
;
1379 record
->call
.info
.clear
.depth
= depth
;
1380 record
->call
.info
.clear
.stencil
= stencil
;
1382 dd_before_draw(dctx
, record
);
1383 pipe
->clear(pipe
, buffers
, color
, depth
, stencil
);
1384 dd_after_draw(dctx
, record
);
1388 dd_context_clear_render_target(struct pipe_context
*_pipe
,
1389 struct pipe_surface
*dst
,
1390 const union pipe_color_union
*color
,
1391 unsigned dstx
, unsigned dsty
,
1392 unsigned width
, unsigned height
,
1393 bool render_condition_enabled
)
1395 struct dd_context
*dctx
= dd_context(_pipe
);
1396 struct pipe_context
*pipe
= dctx
->pipe
;
1397 struct dd_draw_record
*record
= dd_create_record(dctx
);
1399 record
->call
.type
= CALL_CLEAR_RENDER_TARGET
;
1401 dd_before_draw(dctx
, record
);
1402 pipe
->clear_render_target(pipe
, dst
, color
, dstx
, dsty
, width
, height
,
1403 render_condition_enabled
);
1404 dd_after_draw(dctx
, record
);
1408 dd_context_clear_depth_stencil(struct pipe_context
*_pipe
,
1409 struct pipe_surface
*dst
, unsigned clear_flags
,
1410 double depth
, unsigned stencil
, unsigned dstx
,
1411 unsigned dsty
, unsigned width
, unsigned height
,
1412 bool render_condition_enabled
)
1414 struct dd_context
*dctx
= dd_context(_pipe
);
1415 struct pipe_context
*pipe
= dctx
->pipe
;
1416 struct dd_draw_record
*record
= dd_create_record(dctx
);
1418 record
->call
.type
= CALL_CLEAR_DEPTH_STENCIL
;
1420 dd_before_draw(dctx
, record
);
1421 pipe
->clear_depth_stencil(pipe
, dst
, clear_flags
, depth
, stencil
,
1422 dstx
, dsty
, width
, height
,
1423 render_condition_enabled
);
1424 dd_after_draw(dctx
, record
);
1428 dd_context_clear_buffer(struct pipe_context
*_pipe
, struct pipe_resource
*res
,
1429 unsigned offset
, unsigned size
,
1430 const void *clear_value
, int clear_value_size
)
1432 struct dd_context
*dctx
= dd_context(_pipe
);
1433 struct pipe_context
*pipe
= dctx
->pipe
;
1434 struct dd_draw_record
*record
= dd_create_record(dctx
);
1436 record
->call
.type
= CALL_CLEAR_BUFFER
;
1437 record
->call
.info
.clear_buffer
.res
= NULL
;
1438 pipe_resource_reference(&record
->call
.info
.clear_buffer
.res
, res
);
1439 record
->call
.info
.clear_buffer
.offset
= offset
;
1440 record
->call
.info
.clear_buffer
.size
= size
;
1441 record
->call
.info
.clear_buffer
.clear_value
= clear_value
;
1442 record
->call
.info
.clear_buffer
.clear_value_size
= clear_value_size
;
1444 dd_before_draw(dctx
, record
);
1445 pipe
->clear_buffer(pipe
, res
, offset
, size
, clear_value
, clear_value_size
);
1446 dd_after_draw(dctx
, record
);
1450 dd_context_clear_texture(struct pipe_context
*_pipe
,
1451 struct pipe_resource
*res
,
1453 const struct pipe_box
*box
,
1456 struct dd_context
*dctx
= dd_context(_pipe
);
1457 struct pipe_context
*pipe
= dctx
->pipe
;
1458 struct dd_draw_record
*record
= dd_create_record(dctx
);
1460 record
->call
.type
= CALL_CLEAR_TEXTURE
;
1462 dd_before_draw(dctx
, record
);
1463 pipe
->clear_texture(pipe
, res
, level
, box
, data
);
1464 dd_after_draw(dctx
, record
);
1467 /********************************************************************
1472 dd_context_transfer_map(struct pipe_context
*_pipe
,
1473 struct pipe_resource
*resource
, unsigned level
,
1474 unsigned usage
, const struct pipe_box
*box
,
1475 struct pipe_transfer
**transfer
)
1477 struct dd_context
*dctx
= dd_context(_pipe
);
1478 struct pipe_context
*pipe
= dctx
->pipe
;
1479 struct dd_draw_record
*record
=
1480 dd_screen(dctx
->base
.screen
)->transfers
? dd_create_record(dctx
) : NULL
;
1483 record
->call
.type
= CALL_TRANSFER_MAP
;
1485 dd_before_draw(dctx
, record
);
1487 void *ptr
= pipe
->transfer_map(pipe
, resource
, level
, usage
, box
, transfer
);
1489 record
->call
.info
.transfer_map
.transfer_ptr
= *transfer
;
1490 record
->call
.info
.transfer_map
.ptr
= ptr
;
1492 record
->call
.info
.transfer_map
.transfer
= **transfer
;
1493 record
->call
.info
.transfer_map
.transfer
.resource
= NULL
;
1494 pipe_resource_reference(&record
->call
.info
.transfer_map
.transfer
.resource
,
1495 (*transfer
)->resource
);
1497 memset(&record
->call
.info
.transfer_map
.transfer
, 0, sizeof(struct pipe_transfer
));
1500 dd_after_draw(dctx
, record
);
1506 dd_context_transfer_flush_region(struct pipe_context
*_pipe
,
1507 struct pipe_transfer
*transfer
,
1508 const struct pipe_box
*box
)
1510 struct dd_context
*dctx
= dd_context(_pipe
);
1511 struct pipe_context
*pipe
= dctx
->pipe
;
1512 struct dd_draw_record
*record
=
1513 dd_screen(dctx
->base
.screen
)->transfers
? dd_create_record(dctx
) : NULL
;
1516 record
->call
.type
= CALL_TRANSFER_FLUSH_REGION
;
1517 record
->call
.info
.transfer_flush_region
.transfer_ptr
= transfer
;
1518 record
->call
.info
.transfer_flush_region
.box
= *box
;
1519 record
->call
.info
.transfer_flush_region
.transfer
= *transfer
;
1520 record
->call
.info
.transfer_flush_region
.transfer
.resource
= NULL
;
1521 pipe_resource_reference(
1522 &record
->call
.info
.transfer_flush_region
.transfer
.resource
,
1523 transfer
->resource
);
1525 dd_before_draw(dctx
, record
);
1527 pipe
->transfer_flush_region(pipe
, transfer
, box
);
1529 dd_after_draw(dctx
, record
);
1533 dd_context_transfer_unmap(struct pipe_context
*_pipe
,
1534 struct pipe_transfer
*transfer
)
1536 struct dd_context
*dctx
= dd_context(_pipe
);
1537 struct pipe_context
*pipe
= dctx
->pipe
;
1538 struct dd_draw_record
*record
=
1539 dd_screen(dctx
->base
.screen
)->transfers
? dd_create_record(dctx
) : NULL
;
1542 record
->call
.type
= CALL_TRANSFER_UNMAP
;
1543 record
->call
.info
.transfer_unmap
.transfer_ptr
= transfer
;
1544 record
->call
.info
.transfer_unmap
.transfer
= *transfer
;
1545 record
->call
.info
.transfer_unmap
.transfer
.resource
= NULL
;
1546 pipe_resource_reference(
1547 &record
->call
.info
.transfer_unmap
.transfer
.resource
,
1548 transfer
->resource
);
1550 dd_before_draw(dctx
, record
);
1552 pipe
->transfer_unmap(pipe
, transfer
);
1554 dd_after_draw(dctx
, record
);
1558 dd_context_buffer_subdata(struct pipe_context
*_pipe
,
1559 struct pipe_resource
*resource
,
1560 unsigned usage
, unsigned offset
,
1561 unsigned size
, const void *data
)
1563 struct dd_context
*dctx
= dd_context(_pipe
);
1564 struct pipe_context
*pipe
= dctx
->pipe
;
1565 struct dd_draw_record
*record
=
1566 dd_screen(dctx
->base
.screen
)->transfers
? dd_create_record(dctx
) : NULL
;
1569 record
->call
.type
= CALL_BUFFER_SUBDATA
;
1570 record
->call
.info
.buffer_subdata
.resource
= NULL
;
1571 pipe_resource_reference(&record
->call
.info
.buffer_subdata
.resource
, resource
);
1572 record
->call
.info
.buffer_subdata
.usage
= usage
;
1573 record
->call
.info
.buffer_subdata
.offset
= offset
;
1574 record
->call
.info
.buffer_subdata
.size
= size
;
1575 record
->call
.info
.buffer_subdata
.data
= data
;
1577 dd_before_draw(dctx
, record
);
1579 pipe
->buffer_subdata(pipe
, resource
, usage
, offset
, size
, data
);
1581 dd_after_draw(dctx
, record
);
1585 dd_context_texture_subdata(struct pipe_context
*_pipe
,
1586 struct pipe_resource
*resource
,
1587 unsigned level
, unsigned usage
,
1588 const struct pipe_box
*box
,
1589 const void *data
, unsigned stride
,
1590 unsigned layer_stride
)
1592 struct dd_context
*dctx
= dd_context(_pipe
);
1593 struct pipe_context
*pipe
= dctx
->pipe
;
1594 struct dd_draw_record
*record
=
1595 dd_screen(dctx
->base
.screen
)->transfers
? dd_create_record(dctx
) : NULL
;
1598 record
->call
.type
= CALL_TEXTURE_SUBDATA
;
1599 record
->call
.info
.texture_subdata
.resource
= NULL
;
1600 pipe_resource_reference(&record
->call
.info
.texture_subdata
.resource
, resource
);
1601 record
->call
.info
.texture_subdata
.level
= level
;
1602 record
->call
.info
.texture_subdata
.usage
= usage
;
1603 record
->call
.info
.texture_subdata
.box
= *box
;
1604 record
->call
.info
.texture_subdata
.data
= data
;
1605 record
->call
.info
.texture_subdata
.stride
= stride
;
1606 record
->call
.info
.texture_subdata
.layer_stride
= layer_stride
;
1608 dd_before_draw(dctx
, record
);
1610 pipe
->texture_subdata(pipe
, resource
, level
, usage
, box
, data
,
1611 stride
, layer_stride
);
1613 dd_after_draw(dctx
, record
);
1617 dd_init_draw_functions(struct dd_context
*dctx
)
1621 CTX_INIT(launch_grid
);
1622 CTX_INIT(resource_copy_region
);
1625 CTX_INIT(clear_render_target
);
1626 CTX_INIT(clear_depth_stencil
);
1627 CTX_INIT(clear_buffer
);
1628 CTX_INIT(clear_texture
);
1629 CTX_INIT(flush_resource
);
1630 CTX_INIT(generate_mipmap
);
1631 CTX_INIT(get_query_result_resource
);
1632 CTX_INIT(transfer_map
);
1633 CTX_INIT(transfer_flush_region
);
1634 CTX_INIT(transfer_unmap
);
1635 CTX_INIT(buffer_subdata
);
1636 CTX_INIT(texture_subdata
);