1 /**************************************************************************
3 * Copyright 2015 Advanced Micro Devices, Inc.
4 * Copyright 2008 VMware, Inc.
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the "Software"),
9 * to deal in the Software without restriction, including without limitation
10 * on the rights to use, copy, modify, merge, publish, distribute, sub
11 * license, and/or sell copies of the Software, and to permit persons to whom
12 * the Software is furnished to do so, subject to the following conditions:
14 * The above copyright notice and this permission notice (including the next
15 * paragraph) shall be included in all copies or substantial portions of the
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
30 #include "util/u_dump.h"
31 #include "util/u_format.h"
32 #include "util/u_framebuffer.h"
33 #include "util/u_helpers.h"
34 #include "util/u_inlines.h"
35 #include "util/u_memory.h"
36 #include "tgsi/tgsi_parse.h"
37 #include "tgsi/tgsi_scan.h"
38 #include "util/os_time.h"
43 dd_write_header(FILE *f
, struct pipe_screen
*screen
, unsigned apitrace_call_number
)
46 if (os_get_command_line(cmd_line
, sizeof(cmd_line
)))
47 fprintf(f
, "Command: %s\n", cmd_line
);
48 fprintf(f
, "Driver vendor: %s\n", screen
->get_vendor(screen
));
49 fprintf(f
, "Device vendor: %s\n", screen
->get_device_vendor(screen
));
50 fprintf(f
, "Device name: %s\n\n", screen
->get_name(screen
));
52 if (apitrace_call_number
)
53 fprintf(f
, "Last apitrace call: %u\n\n", apitrace_call_number
);
57 dd_get_file_stream(struct dd_screen
*dscreen
, unsigned apitrace_call_number
)
59 struct pipe_screen
*screen
= dscreen
->screen
;
61 FILE *f
= dd_get_debug_file(dscreen
->verbose
);
65 dd_write_header(f
, screen
, apitrace_call_number
);
70 dd_dump_dmesg(FILE *f
)
73 FILE *p
= popen("dmesg | tail -n60", "r");
78 fprintf(f
, "\nLast 60 lines of dmesg:\n\n");
79 while (fgets(line
, sizeof(line
), p
))
86 dd_num_active_viewports(struct dd_draw_state
*dstate
)
88 struct tgsi_shader_info info
;
89 const struct tgsi_token
*tokens
;
91 if (dstate
->shaders
[PIPE_SHADER_GEOMETRY
])
92 tokens
= dstate
->shaders
[PIPE_SHADER_GEOMETRY
]->state
.shader
.tokens
;
93 else if (dstate
->shaders
[PIPE_SHADER_TESS_EVAL
])
94 tokens
= dstate
->shaders
[PIPE_SHADER_TESS_EVAL
]->state
.shader
.tokens
;
95 else if (dstate
->shaders
[PIPE_SHADER_VERTEX
])
96 tokens
= dstate
->shaders
[PIPE_SHADER_VERTEX
]->state
.shader
.tokens
;
101 tgsi_scan_shader(tokens
, &info
);
102 if (info
.writes_viewport_index
)
103 return PIPE_MAX_VIEWPORTS
;
109 #define COLOR_RESET "\033[0m"
110 #define COLOR_SHADER "\033[1;32m"
111 #define COLOR_STATE "\033[1;33m"
113 #define DUMP(name, var) do { \
114 fprintf(f, COLOR_STATE #name ": " COLOR_RESET); \
115 util_dump_##name(f, var); \
119 #define DUMP_I(name, var, i) do { \
120 fprintf(f, COLOR_STATE #name " %i: " COLOR_RESET, i); \
121 util_dump_##name(f, var); \
125 #define DUMP_M(name, var, member) do { \
126 fprintf(f, " " #member ": "); \
127 util_dump_##name(f, (var)->member); \
131 #define DUMP_M_ADDR(name, var, member) do { \
132 fprintf(f, " " #member ": "); \
133 util_dump_##name(f, &(var)->member); \
137 #define PRINT_NAMED(type, name, value) \
139 fprintf(f, COLOR_STATE "%s" COLOR_RESET " = ", name); \
140 util_dump_##type(f, value); \
145 util_dump_uint(FILE *f
, unsigned i
)
151 util_dump_int(FILE *f
, int i
)
157 util_dump_hex(FILE *f
, unsigned i
)
159 fprintf(f
, "0x%x", i
);
163 util_dump_double(FILE *f
, double d
)
169 util_dump_format(FILE *f
, enum pipe_format format
)
171 fprintf(f
, "%s", util_format_name(format
));
175 util_dump_color_union(FILE *f
, const union pipe_color_union
*color
)
177 fprintf(f
, "{f = {%f, %f, %f, %f}, ui = {%u, %u, %u, %u}",
178 color
->f
[0], color
->f
[1], color
->f
[2], color
->f
[3],
179 color
->ui
[0], color
->ui
[1], color
->ui
[2], color
->ui
[3]);
183 dd_dump_render_condition(struct dd_draw_state
*dstate
, FILE *f
)
185 if (dstate
->render_cond
.query
) {
186 fprintf(f
, "render condition:\n");
187 DUMP_M(query_type
, &dstate
->render_cond
, query
->type
);
188 DUMP_M(uint
, &dstate
->render_cond
, condition
);
189 DUMP_M(uint
, &dstate
->render_cond
, mode
);
195 dd_dump_shader(struct dd_draw_state
*dstate
, enum pipe_shader_type sh
, FILE *f
)
198 const char *shader_str
[PIPE_SHADER_TYPES
];
200 shader_str
[PIPE_SHADER_VERTEX
] = "VERTEX";
201 shader_str
[PIPE_SHADER_TESS_CTRL
] = "TESS_CTRL";
202 shader_str
[PIPE_SHADER_TESS_EVAL
] = "TESS_EVAL";
203 shader_str
[PIPE_SHADER_GEOMETRY
] = "GEOMETRY";
204 shader_str
[PIPE_SHADER_FRAGMENT
] = "FRAGMENT";
205 shader_str
[PIPE_SHADER_COMPUTE
] = "COMPUTE";
207 if (sh
== PIPE_SHADER_TESS_CTRL
&&
208 !dstate
->shaders
[PIPE_SHADER_TESS_CTRL
] &&
209 dstate
->shaders
[PIPE_SHADER_TESS_EVAL
])
210 fprintf(f
, "tess_state: {default_outer_level = {%f, %f, %f, %f}, "
211 "default_inner_level = {%f, %f}}\n",
212 dstate
->tess_default_levels
[0],
213 dstate
->tess_default_levels
[1],
214 dstate
->tess_default_levels
[2],
215 dstate
->tess_default_levels
[3],
216 dstate
->tess_default_levels
[4],
217 dstate
->tess_default_levels
[5]);
219 if (sh
== PIPE_SHADER_FRAGMENT
)
221 unsigned num_viewports
= dd_num_active_viewports(dstate
);
223 if (dstate
->rs
->state
.rs
.clip_plane_enable
)
224 DUMP(clip_state
, &dstate
->clip_state
);
226 for (i
= 0; i
< num_viewports
; i
++)
227 DUMP_I(viewport_state
, &dstate
->viewports
[i
], i
);
229 if (dstate
->rs
->state
.rs
.scissor
)
230 for (i
= 0; i
< num_viewports
; i
++)
231 DUMP_I(scissor_state
, &dstate
->scissors
[i
], i
);
233 DUMP(rasterizer_state
, &dstate
->rs
->state
.rs
);
235 if (dstate
->rs
->state
.rs
.poly_stipple_enable
)
236 DUMP(poly_stipple
, &dstate
->polygon_stipple
);
240 if (!dstate
->shaders
[sh
])
243 fprintf(f
, COLOR_SHADER
"begin shader: %s" COLOR_RESET
"\n", shader_str
[sh
]);
244 DUMP(shader_state
, &dstate
->shaders
[sh
]->state
.shader
);
246 for (i
= 0; i
< PIPE_MAX_CONSTANT_BUFFERS
; i
++)
247 if (dstate
->constant_buffers
[sh
][i
].buffer
||
248 dstate
->constant_buffers
[sh
][i
].user_buffer
) {
249 DUMP_I(constant_buffer
, &dstate
->constant_buffers
[sh
][i
], i
);
250 if (dstate
->constant_buffers
[sh
][i
].buffer
)
251 DUMP_M(resource
, &dstate
->constant_buffers
[sh
][i
], buffer
);
254 for (i
= 0; i
< PIPE_MAX_SAMPLERS
; i
++)
255 if (dstate
->sampler_states
[sh
][i
])
256 DUMP_I(sampler_state
, &dstate
->sampler_states
[sh
][i
]->state
.sampler
, i
);
258 for (i
= 0; i
< PIPE_MAX_SAMPLERS
; i
++)
259 if (dstate
->sampler_views
[sh
][i
]) {
260 DUMP_I(sampler_view
, dstate
->sampler_views
[sh
][i
], i
);
261 DUMP_M(resource
, dstate
->sampler_views
[sh
][i
], texture
);
264 for (i
= 0; i
< PIPE_MAX_SHADER_IMAGES
; i
++)
265 if (dstate
->shader_images
[sh
][i
].resource
) {
266 DUMP_I(image_view
, &dstate
->shader_images
[sh
][i
], i
);
267 if (dstate
->shader_images
[sh
][i
].resource
)
268 DUMP_M(resource
, &dstate
->shader_images
[sh
][i
], resource
);
271 for (i
= 0; i
< PIPE_MAX_SHADER_BUFFERS
; i
++)
272 if (dstate
->shader_buffers
[sh
][i
].buffer
) {
273 DUMP_I(shader_buffer
, &dstate
->shader_buffers
[sh
][i
], i
);
274 if (dstate
->shader_buffers
[sh
][i
].buffer
)
275 DUMP_M(resource
, &dstate
->shader_buffers
[sh
][i
], buffer
);
278 fprintf(f
, COLOR_SHADER
"end shader: %s" COLOR_RESET
"\n\n", shader_str
[sh
]);
282 dd_dump_draw_vbo(struct dd_draw_state
*dstate
, struct pipe_draw_info
*info
, FILE *f
)
286 DUMP(draw_info
, info
);
287 if (info
->count_from_stream_output
)
288 DUMP_M(stream_output_target
, info
,
289 count_from_stream_output
);
290 if (info
->indirect
) {
291 DUMP_M(resource
, info
, indirect
->buffer
);
292 if (info
->indirect
->indirect_draw_count
)
293 DUMP_M(resource
, info
, indirect
->indirect_draw_count
);
298 /* TODO: dump active queries */
300 dd_dump_render_condition(dstate
, f
);
302 for (i
= 0; i
< PIPE_MAX_ATTRIBS
; i
++)
303 if (dstate
->vertex_buffers
[i
].buffer
.resource
) {
304 DUMP_I(vertex_buffer
, &dstate
->vertex_buffers
[i
], i
);
305 if (!dstate
->vertex_buffers
[i
].is_user_buffer
)
306 DUMP_M(resource
, &dstate
->vertex_buffers
[i
], buffer
.resource
);
309 if (dstate
->velems
) {
310 PRINT_NAMED(uint
, "num vertex elements",
311 dstate
->velems
->state
.velems
.count
);
312 for (i
= 0; i
< dstate
->velems
->state
.velems
.count
; i
++) {
314 DUMP_I(vertex_element
, &dstate
->velems
->state
.velems
.velems
[i
], i
);
318 PRINT_NAMED(uint
, "num stream output targets", dstate
->num_so_targets
);
319 for (i
= 0; i
< dstate
->num_so_targets
; i
++)
320 if (dstate
->so_targets
[i
]) {
321 DUMP_I(stream_output_target
, dstate
->so_targets
[i
], i
);
322 DUMP_M(resource
, dstate
->so_targets
[i
], buffer
);
323 fprintf(f
, " offset = %i\n", dstate
->so_offsets
[i
]);
327 for (sh
= 0; sh
< PIPE_SHADER_TYPES
; sh
++) {
328 if (sh
== PIPE_SHADER_COMPUTE
)
331 dd_dump_shader(dstate
, sh
, f
);
335 DUMP(depth_stencil_alpha_state
, &dstate
->dsa
->state
.dsa
);
336 DUMP(stencil_ref
, &dstate
->stencil_ref
);
339 DUMP(blend_state
, &dstate
->blend
->state
.blend
);
340 DUMP(blend_color
, &dstate
->blend_color
);
342 PRINT_NAMED(uint
, "min_samples", dstate
->min_samples
);
343 PRINT_NAMED(hex
, "sample_mask", dstate
->sample_mask
);
346 DUMP(framebuffer_state
, &dstate
->framebuffer_state
);
347 for (i
= 0; i
< dstate
->framebuffer_state
.nr_cbufs
; i
++)
348 if (dstate
->framebuffer_state
.cbufs
[i
]) {
349 fprintf(f
, " " COLOR_STATE
"cbufs[%i]:" COLOR_RESET
"\n ", i
);
350 DUMP(surface
, dstate
->framebuffer_state
.cbufs
[i
]);
352 DUMP(resource
, dstate
->framebuffer_state
.cbufs
[i
]->texture
);
354 if (dstate
->framebuffer_state
.zsbuf
) {
355 fprintf(f
, " " COLOR_STATE
"zsbuf:" COLOR_RESET
"\n ");
356 DUMP(surface
, dstate
->framebuffer_state
.zsbuf
);
358 DUMP(resource
, dstate
->framebuffer_state
.zsbuf
->texture
);
364 dd_dump_launch_grid(struct dd_draw_state
*dstate
, struct pipe_grid_info
*info
, FILE *f
)
366 fprintf(f
, "%s:\n", __func__
+8);
367 DUMP(grid_info
, info
);
370 dd_dump_shader(dstate
, PIPE_SHADER_COMPUTE
, f
);
375 dd_dump_resource_copy_region(struct dd_draw_state
*dstate
,
376 struct call_resource_copy_region
*info
,
379 fprintf(f
, "%s:\n", __func__
+8);
380 DUMP_M(resource
, info
, dst
);
381 DUMP_M(uint
, info
, dst_level
);
382 DUMP_M(uint
, info
, dstx
);
383 DUMP_M(uint
, info
, dsty
);
384 DUMP_M(uint
, info
, dstz
);
385 DUMP_M(resource
, info
, src
);
386 DUMP_M(uint
, info
, src_level
);
387 DUMP_M_ADDR(box
, info
, src_box
);
391 dd_dump_blit(struct dd_draw_state
*dstate
, struct pipe_blit_info
*info
, FILE *f
)
393 fprintf(f
, "%s:\n", __func__
+8);
394 DUMP_M(resource
, info
, dst
.resource
);
395 DUMP_M(uint
, info
, dst
.level
);
396 DUMP_M_ADDR(box
, info
, dst
.box
);
397 DUMP_M(format
, info
, dst
.format
);
399 DUMP_M(resource
, info
, src
.resource
);
400 DUMP_M(uint
, info
, src
.level
);
401 DUMP_M_ADDR(box
, info
, src
.box
);
402 DUMP_M(format
, info
, src
.format
);
404 DUMP_M(hex
, info
, mask
);
405 DUMP_M(uint
, info
, filter
);
406 DUMP_M(uint
, info
, scissor_enable
);
407 DUMP_M_ADDR(scissor_state
, info
, scissor
);
408 DUMP_M(uint
, info
, render_condition_enable
);
410 if (info
->render_condition_enable
)
411 dd_dump_render_condition(dstate
, f
);
415 dd_dump_generate_mipmap(struct dd_draw_state
*dstate
, FILE *f
)
417 fprintf(f
, "%s:\n", __func__
+8);
422 dd_dump_get_query_result_resource(struct call_get_query_result_resource
*info
, FILE *f
)
424 fprintf(f
, "%s:\n", __func__
+ 8);
425 DUMP_M(query_type
, info
, query_type
);
426 DUMP_M(uint
, info
, wait
);
427 DUMP_M(query_value_type
, info
, result_type
);
428 DUMP_M(int, info
, index
);
429 DUMP_M(resource
, info
, resource
);
430 DUMP_M(uint
, info
, offset
);
434 dd_dump_flush_resource(struct dd_draw_state
*dstate
, struct pipe_resource
*res
,
437 fprintf(f
, "%s:\n", __func__
+8);
442 dd_dump_clear(struct dd_draw_state
*dstate
, struct call_clear
*info
, FILE *f
)
444 fprintf(f
, "%s:\n", __func__
+8);
445 DUMP_M(uint
, info
, buffers
);
446 DUMP_M_ADDR(color_union
, info
, color
);
447 DUMP_M(double, info
, depth
);
448 DUMP_M(hex
, info
, stencil
);
452 dd_dump_clear_buffer(struct dd_draw_state
*dstate
, struct call_clear_buffer
*info
,
456 const char *value
= (const char*)info
->clear_value
;
458 fprintf(f
, "%s:\n", __func__
+8);
459 DUMP_M(resource
, info
, res
);
460 DUMP_M(uint
, info
, offset
);
461 DUMP_M(uint
, info
, size
);
462 DUMP_M(uint
, info
, clear_value_size
);
464 fprintf(f
, " clear_value:");
465 for (i
= 0; i
< info
->clear_value_size
; i
++)
466 fprintf(f
, " %02x", value
[i
]);
471 dd_dump_transfer_map(struct call_transfer_map
*info
, FILE *f
)
473 fprintf(f
, "%s:\n", __func__
+8);
474 DUMP_M_ADDR(transfer
, info
, transfer
);
475 DUMP_M(ptr
, info
, transfer_ptr
);
476 DUMP_M(ptr
, info
, ptr
);
480 dd_dump_transfer_flush_region(struct call_transfer_flush_region
*info
, FILE *f
)
482 fprintf(f
, "%s:\n", __func__
+8);
483 DUMP_M_ADDR(transfer
, info
, transfer
);
484 DUMP_M(ptr
, info
, transfer_ptr
);
485 DUMP_M_ADDR(box
, info
, box
);
489 dd_dump_transfer_unmap(struct call_transfer_unmap
*info
, FILE *f
)
491 fprintf(f
, "%s:\n", __func__
+8);
492 DUMP_M_ADDR(transfer
, info
, transfer
);
493 DUMP_M(ptr
, info
, transfer_ptr
);
497 dd_dump_buffer_subdata(struct call_buffer_subdata
*info
, FILE *f
)
499 fprintf(f
, "%s:\n", __func__
+8);
500 DUMP_M(resource
, info
, resource
);
501 DUMP_M(transfer_usage
, info
, usage
);
502 DUMP_M(uint
, info
, offset
);
503 DUMP_M(uint
, info
, size
);
504 DUMP_M(ptr
, info
, data
);
508 dd_dump_texture_subdata(struct call_texture_subdata
*info
, FILE *f
)
510 fprintf(f
, "%s:\n", __func__
+8);
511 DUMP_M(resource
, info
, resource
);
512 DUMP_M(uint
, info
, level
);
513 DUMP_M(transfer_usage
, info
, usage
);
514 DUMP_M_ADDR(box
, info
, box
);
515 DUMP_M(ptr
, info
, data
);
516 DUMP_M(uint
, info
, stride
);
517 DUMP_M(uint
, info
, layer_stride
);
521 dd_dump_clear_texture(struct dd_draw_state
*dstate
, FILE *f
)
523 fprintf(f
, "%s:\n", __func__
+8);
528 dd_dump_clear_render_target(struct dd_draw_state
*dstate
, FILE *f
)
530 fprintf(f
, "%s:\n", __func__
+8);
535 dd_dump_clear_depth_stencil(struct dd_draw_state
*dstate
, FILE *f
)
537 fprintf(f
, "%s:\n", __func__
+8);
542 dd_dump_driver_state(struct dd_context
*dctx
, FILE *f
, unsigned flags
)
544 if (dctx
->pipe
->dump_debug_state
) {
545 fprintf(f
,"\n\n**************************************************"
546 "***************************\n");
547 fprintf(f
, "Driver-specific state:\n\n");
548 dctx
->pipe
->dump_debug_state(dctx
->pipe
, f
, flags
);
553 dd_dump_call(FILE *f
, struct dd_draw_state
*state
, struct dd_call
*call
)
555 switch (call
->type
) {
557 dd_dump_draw_vbo(state
, &call
->info
.draw_vbo
.draw
, f
);
559 case CALL_LAUNCH_GRID
:
560 dd_dump_launch_grid(state
, &call
->info
.launch_grid
, f
);
562 case CALL_RESOURCE_COPY_REGION
:
563 dd_dump_resource_copy_region(state
,
564 &call
->info
.resource_copy_region
, f
);
567 dd_dump_blit(state
, &call
->info
.blit
, f
);
569 case CALL_FLUSH_RESOURCE
:
570 dd_dump_flush_resource(state
, call
->info
.flush_resource
, f
);
573 dd_dump_clear(state
, &call
->info
.clear
, f
);
575 case CALL_CLEAR_BUFFER
:
576 dd_dump_clear_buffer(state
, &call
->info
.clear_buffer
, f
);
578 case CALL_CLEAR_TEXTURE
:
579 dd_dump_clear_texture(state
, f
);
581 case CALL_CLEAR_RENDER_TARGET
:
582 dd_dump_clear_render_target(state
, f
);
584 case CALL_CLEAR_DEPTH_STENCIL
:
585 dd_dump_clear_depth_stencil(state
, f
);
587 case CALL_GENERATE_MIPMAP
:
588 dd_dump_generate_mipmap(state
, f
);
590 case CALL_GET_QUERY_RESULT_RESOURCE
:
591 dd_dump_get_query_result_resource(&call
->info
.get_query_result_resource
, f
);
593 case CALL_TRANSFER_MAP
:
594 dd_dump_transfer_map(&call
->info
.transfer_map
, f
);
596 case CALL_TRANSFER_FLUSH_REGION
:
597 dd_dump_transfer_flush_region(&call
->info
.transfer_flush_region
, f
);
599 case CALL_TRANSFER_UNMAP
:
600 dd_dump_transfer_unmap(&call
->info
.transfer_unmap
, f
);
602 case CALL_BUFFER_SUBDATA
:
603 dd_dump_buffer_subdata(&call
->info
.buffer_subdata
, f
);
605 case CALL_TEXTURE_SUBDATA
:
606 dd_dump_texture_subdata(&call
->info
.texture_subdata
, f
);
612 dd_kill_process(void)
615 fprintf(stderr
, "dd: Aborting the process...\n");
622 dd_unreference_copy_of_call(struct dd_call
*dst
)
626 pipe_so_target_reference(&dst
->info
.draw_vbo
.draw
.count_from_stream_output
, NULL
);
627 pipe_resource_reference(&dst
->info
.draw_vbo
.indirect
.buffer
, NULL
);
628 pipe_resource_reference(&dst
->info
.draw_vbo
.indirect
.indirect_draw_count
, NULL
);
629 if (dst
->info
.draw_vbo
.draw
.index_size
&&
630 !dst
->info
.draw_vbo
.draw
.has_user_indices
)
631 pipe_resource_reference(&dst
->info
.draw_vbo
.draw
.index
.resource
, NULL
);
633 dst
->info
.draw_vbo
.draw
.index
.user
= NULL
;
635 case CALL_LAUNCH_GRID
:
636 pipe_resource_reference(&dst
->info
.launch_grid
.indirect
, NULL
);
638 case CALL_RESOURCE_COPY_REGION
:
639 pipe_resource_reference(&dst
->info
.resource_copy_region
.dst
, NULL
);
640 pipe_resource_reference(&dst
->info
.resource_copy_region
.src
, NULL
);
643 pipe_resource_reference(&dst
->info
.blit
.dst
.resource
, NULL
);
644 pipe_resource_reference(&dst
->info
.blit
.src
.resource
, NULL
);
646 case CALL_FLUSH_RESOURCE
:
647 pipe_resource_reference(&dst
->info
.flush_resource
, NULL
);
651 case CALL_CLEAR_BUFFER
:
652 pipe_resource_reference(&dst
->info
.clear_buffer
.res
, NULL
);
654 case CALL_CLEAR_TEXTURE
:
656 case CALL_CLEAR_RENDER_TARGET
:
658 case CALL_CLEAR_DEPTH_STENCIL
:
660 case CALL_GENERATE_MIPMAP
:
661 pipe_resource_reference(&dst
->info
.generate_mipmap
.res
, NULL
);
663 case CALL_GET_QUERY_RESULT_RESOURCE
:
664 pipe_resource_reference(&dst
->info
.get_query_result_resource
.resource
, NULL
);
666 case CALL_TRANSFER_MAP
:
667 pipe_resource_reference(&dst
->info
.transfer_map
.transfer
.resource
, NULL
);
669 case CALL_TRANSFER_FLUSH_REGION
:
670 pipe_resource_reference(&dst
->info
.transfer_flush_region
.transfer
.resource
, NULL
);
672 case CALL_TRANSFER_UNMAP
:
673 pipe_resource_reference(&dst
->info
.transfer_unmap
.transfer
.resource
, NULL
);
675 case CALL_BUFFER_SUBDATA
:
676 pipe_resource_reference(&dst
->info
.buffer_subdata
.resource
, NULL
);
678 case CALL_TEXTURE_SUBDATA
:
679 pipe_resource_reference(&dst
->info
.texture_subdata
.resource
, NULL
);
685 dd_init_copy_of_draw_state(struct dd_draw_state_copy
*state
)
689 /* Just clear pointers to gallium objects. Don't clear the whole structure,
690 * because it would kill performance with its size of 130 KB.
692 memset(state
->base
.vertex_buffers
, 0,
693 sizeof(state
->base
.vertex_buffers
));
694 memset(state
->base
.so_targets
, 0,
695 sizeof(state
->base
.so_targets
));
696 memset(state
->base
.constant_buffers
, 0,
697 sizeof(state
->base
.constant_buffers
));
698 memset(state
->base
.sampler_views
, 0,
699 sizeof(state
->base
.sampler_views
));
700 memset(state
->base
.shader_images
, 0,
701 sizeof(state
->base
.shader_images
));
702 memset(state
->base
.shader_buffers
, 0,
703 sizeof(state
->base
.shader_buffers
));
704 memset(&state
->base
.framebuffer_state
, 0,
705 sizeof(state
->base
.framebuffer_state
));
707 memset(state
->shaders
, 0, sizeof(state
->shaders
));
709 state
->base
.render_cond
.query
= &state
->render_cond
;
711 for (i
= 0; i
< PIPE_SHADER_TYPES
; i
++) {
712 state
->base
.shaders
[i
] = &state
->shaders
[i
];
713 for (j
= 0; j
< PIPE_MAX_SAMPLERS
; j
++)
714 state
->base
.sampler_states
[i
][j
] = &state
->sampler_states
[i
][j
];
717 state
->base
.velems
= &state
->velems
;
718 state
->base
.rs
= &state
->rs
;
719 state
->base
.dsa
= &state
->dsa
;
720 state
->base
.blend
= &state
->blend
;
724 dd_unreference_copy_of_draw_state(struct dd_draw_state_copy
*state
)
726 struct dd_draw_state
*dst
= &state
->base
;
729 for (i
= 0; i
< ARRAY_SIZE(dst
->vertex_buffers
); i
++)
730 pipe_vertex_buffer_unreference(&dst
->vertex_buffers
[i
]);
731 for (i
= 0; i
< ARRAY_SIZE(dst
->so_targets
); i
++)
732 pipe_so_target_reference(&dst
->so_targets
[i
], NULL
);
734 for (i
= 0; i
< PIPE_SHADER_TYPES
; i
++) {
736 tgsi_free_tokens(dst
->shaders
[i
]->state
.shader
.tokens
);
738 for (j
= 0; j
< PIPE_MAX_CONSTANT_BUFFERS
; j
++)
739 pipe_resource_reference(&dst
->constant_buffers
[i
][j
].buffer
, NULL
);
740 for (j
= 0; j
< PIPE_MAX_SAMPLERS
; j
++)
741 pipe_sampler_view_reference(&dst
->sampler_views
[i
][j
], NULL
);
742 for (j
= 0; j
< PIPE_MAX_SHADER_IMAGES
; j
++)
743 pipe_resource_reference(&dst
->shader_images
[i
][j
].resource
, NULL
);
744 for (j
= 0; j
< PIPE_MAX_SHADER_BUFFERS
; j
++)
745 pipe_resource_reference(&dst
->shader_buffers
[i
][j
].buffer
, NULL
);
748 util_unreference_framebuffer_state(&dst
->framebuffer_state
);
752 dd_copy_draw_state(struct dd_draw_state
*dst
, struct dd_draw_state
*src
)
756 if (src
->render_cond
.query
) {
757 *dst
->render_cond
.query
= *src
->render_cond
.query
;
758 dst
->render_cond
.condition
= src
->render_cond
.condition
;
759 dst
->render_cond
.mode
= src
->render_cond
.mode
;
761 dst
->render_cond
.query
= NULL
;
764 for (i
= 0; i
< ARRAY_SIZE(src
->vertex_buffers
); i
++) {
765 pipe_vertex_buffer_reference(&dst
->vertex_buffers
[i
],
766 &src
->vertex_buffers
[i
]);
769 dst
->num_so_targets
= src
->num_so_targets
;
770 for (i
= 0; i
< src
->num_so_targets
; i
++)
771 pipe_so_target_reference(&dst
->so_targets
[i
], src
->so_targets
[i
]);
772 memcpy(dst
->so_offsets
, src
->so_offsets
, sizeof(src
->so_offsets
));
774 for (i
= 0; i
< PIPE_SHADER_TYPES
; i
++) {
775 if (!src
->shaders
[i
]) {
776 dst
->shaders
[i
] = NULL
;
780 if (src
->shaders
[i
]) {
781 dst
->shaders
[i
]->state
.shader
= src
->shaders
[i
]->state
.shader
;
782 if (src
->shaders
[i
]->state
.shader
.tokens
) {
783 dst
->shaders
[i
]->state
.shader
.tokens
=
784 tgsi_dup_tokens(src
->shaders
[i
]->state
.shader
.tokens
);
786 dst
->shaders
[i
]->state
.shader
.ir
.nir
= NULL
;
789 dst
->shaders
[i
] = NULL
;
792 for (j
= 0; j
< PIPE_MAX_CONSTANT_BUFFERS
; j
++) {
793 pipe_resource_reference(&dst
->constant_buffers
[i
][j
].buffer
,
794 src
->constant_buffers
[i
][j
].buffer
);
795 memcpy(&dst
->constant_buffers
[i
][j
], &src
->constant_buffers
[i
][j
],
796 sizeof(src
->constant_buffers
[i
][j
]));
799 for (j
= 0; j
< PIPE_MAX_SAMPLERS
; j
++) {
800 pipe_sampler_view_reference(&dst
->sampler_views
[i
][j
],
801 src
->sampler_views
[i
][j
]);
802 if (src
->sampler_states
[i
][j
])
803 dst
->sampler_states
[i
][j
]->state
.sampler
=
804 src
->sampler_states
[i
][j
]->state
.sampler
;
806 dst
->sampler_states
[i
][j
] = NULL
;
809 for (j
= 0; j
< PIPE_MAX_SHADER_IMAGES
; j
++) {
810 pipe_resource_reference(&dst
->shader_images
[i
][j
].resource
,
811 src
->shader_images
[i
][j
].resource
);
812 memcpy(&dst
->shader_images
[i
][j
], &src
->shader_images
[i
][j
],
813 sizeof(src
->shader_images
[i
][j
]));
816 for (j
= 0; j
< PIPE_MAX_SHADER_BUFFERS
; j
++) {
817 pipe_resource_reference(&dst
->shader_buffers
[i
][j
].buffer
,
818 src
->shader_buffers
[i
][j
].buffer
);
819 memcpy(&dst
->shader_buffers
[i
][j
], &src
->shader_buffers
[i
][j
],
820 sizeof(src
->shader_buffers
[i
][j
]));
825 dst
->velems
->state
.velems
= src
->velems
->state
.velems
;
830 dst
->rs
->state
.rs
= src
->rs
->state
.rs
;
835 dst
->dsa
->state
.dsa
= src
->dsa
->state
.dsa
;
840 dst
->blend
->state
.blend
= src
->blend
->state
.blend
;
844 dst
->blend_color
= src
->blend_color
;
845 dst
->stencil_ref
= src
->stencil_ref
;
846 dst
->sample_mask
= src
->sample_mask
;
847 dst
->min_samples
= src
->min_samples
;
848 dst
->clip_state
= src
->clip_state
;
849 util_copy_framebuffer_state(&dst
->framebuffer_state
, &src
->framebuffer_state
);
850 memcpy(dst
->scissors
, src
->scissors
, sizeof(src
->scissors
));
851 memcpy(dst
->viewports
, src
->viewports
, sizeof(src
->viewports
));
852 memcpy(dst
->tess_default_levels
, src
->tess_default_levels
,
853 sizeof(src
->tess_default_levels
));
854 dst
->apitrace_call_number
= src
->apitrace_call_number
;
858 dd_free_record(struct pipe_screen
*screen
, struct dd_draw_record
*record
)
860 u_log_page_destroy(record
->log_page
);
861 dd_unreference_copy_of_call(&record
->call
);
862 dd_unreference_copy_of_draw_state(&record
->draw_state
);
863 screen
->fence_reference(screen
, &record
->prev_bottom_of_pipe
, NULL
);
864 screen
->fence_reference(screen
, &record
->top_of_pipe
, NULL
);
865 screen
->fence_reference(screen
, &record
->bottom_of_pipe
, NULL
);
866 util_queue_fence_destroy(&record
->driver_finished
);
871 dd_write_record(FILE *f
, struct dd_draw_record
*record
)
873 PRINT_NAMED(ptr
, "pipe", record
->dctx
->pipe
);
874 PRINT_NAMED(ns
, "time before (API call)", record
->time_before
);
875 PRINT_NAMED(ns
, "time after (driver done)", record
->time_after
);
878 dd_dump_call(f
, &record
->draw_state
.base
, &record
->call
);
880 if (record
->log_page
) {
881 fprintf(f
,"\n\n**************************************************"
882 "***************************\n");
883 fprintf(f
, "Context Log:\n\n");
884 u_log_page_print(record
->log_page
, f
);
889 dd_maybe_dump_record(struct dd_screen
*dscreen
, struct dd_draw_record
*record
)
891 if (dscreen
->dump_mode
== DD_DUMP_ONLY_HANGS
||
892 (dscreen
->dump_mode
== DD_DUMP_APITRACE_CALL
&&
893 dscreen
->apitrace_dump_call
!= record
->draw_state
.base
.apitrace_call_number
))
897 dd_get_debug_filename_and_mkdir(name
, sizeof(name
), dscreen
->verbose
);
898 FILE *f
= fopen(name
, "w");
900 fprintf(stderr
, "dd: failed to open %s\n", name
);
904 dd_write_header(f
, dscreen
->screen
, record
->draw_state
.base
.apitrace_call_number
);
905 dd_write_record(f
, record
);
911 dd_fence_state(struct pipe_screen
*screen
, struct pipe_fence_handle
*fence
,
917 bool ok
= screen
->fence_finish(screen
, NULL
, fence
, 0);
919 if (not_reached
&& !ok
)
922 return ok
? "YES" : "NO ";
926 dd_report_hang(struct dd_context
*dctx
)
928 struct dd_screen
*dscreen
= dd_screen(dctx
->base
.screen
);
929 struct pipe_screen
*screen
= dscreen
->screen
;
930 bool encountered_hang
= false;
931 bool stop_output
= false;
932 unsigned num_later
= 0;
934 fprintf(stderr
, "GPU hang detected, collecting information...\n\n");
936 fprintf(stderr
, "Draw # driver prev BOP TOP BOP dump file\n"
937 "-------------------------------------------------------------\n");
939 list_for_each_entry(struct dd_draw_record
, record
, &dctx
->records
, list
) {
940 if (!encountered_hang
&&
941 screen
->fence_finish(screen
, NULL
, record
->bottom_of_pipe
, 0)) {
942 dd_maybe_dump_record(dscreen
, record
);
947 dd_maybe_dump_record(dscreen
, record
);
952 bool driver
= util_queue_fence_is_signalled(&record
->driver_finished
);
953 bool top_not_reached
= false;
954 const char *prev_bop
= dd_fence_state(screen
, record
->prev_bottom_of_pipe
, NULL
);
955 const char *top
= dd_fence_state(screen
, record
->top_of_pipe
, &top_not_reached
);
956 const char *bop
= dd_fence_state(screen
, record
->bottom_of_pipe
, NULL
);
958 fprintf(stderr
, "%-9u %s %s %s %s ",
959 record
->draw_call
, driver
? "YES" : "NO ", prev_bop
, top
, bop
);
962 dd_get_debug_filename_and_mkdir(name
, sizeof(name
), false);
964 FILE *f
= fopen(name
, "w");
966 fprintf(stderr
, "fopen failed\n");
968 fprintf(stderr
, "%s\n", name
);
970 dd_write_header(f
, dscreen
->screen
, record
->draw_state
.base
.apitrace_call_number
);
971 dd_write_record(f
, record
);
973 if (!encountered_hang
) {
974 dd_dump_driver_state(dctx
, f
, PIPE_DUMP_DEVICE_STATUS_REGISTERS
);
983 encountered_hang
= true;
986 if (num_later
|| dctx
->record_pending
) {
987 fprintf(stderr
, "... and %u%s additional draws.\n", num_later
,
988 dctx
->record_pending
? "+1 (pending)" : "");
991 fprintf(stderr
, "\nDone.\n");
996 dd_thread_main(void *input
)
998 struct dd_context
*dctx
= (struct dd_context
*)input
;
999 struct dd_screen
*dscreen
= dd_screen(dctx
->base
.screen
);
1000 struct pipe_screen
*screen
= dscreen
->screen
;
1002 mtx_lock(&dctx
->mutex
);
1005 struct list_head records
;
1006 struct pipe_fence_handle
*fence
;
1007 struct pipe_fence_handle
*fence2
= NULL
;
1009 list_replace(&dctx
->records
, &records
);
1010 list_inithead(&dctx
->records
);
1011 dctx
->num_records
= 0;
1013 if (dctx
->api_stalled
)
1014 cnd_signal(&dctx
->cond
);
1016 if (!list_empty(&records
)) {
1017 /* Wait for the youngest draw. This means hangs can take a bit longer
1018 * to detect, but it's more efficient this way. */
1019 struct dd_draw_record
*youngest
=
1020 LIST_ENTRY(struct dd_draw_record
, records
.prev
, list
);
1021 fence
= youngest
->bottom_of_pipe
;
1022 } else if (dctx
->record_pending
) {
1023 /* Wait for pending fences, in case the driver ends up hanging internally. */
1024 fence
= dctx
->record_pending
->prev_bottom_of_pipe
;
1025 fence2
= dctx
->record_pending
->top_of_pipe
;
1026 } else if (dctx
->kill_thread
) {
1029 cnd_wait(&dctx
->cond
, &dctx
->mutex
);
1032 mtx_unlock(&dctx
->mutex
);
1034 /* Fences can be NULL legitimately when timeout detection is disabled. */
1036 !screen
->fence_finish(screen
, NULL
, fence
,
1037 (uint64_t)dscreen
->timeout_ms
* 1000*1000)) ||
1039 !screen
->fence_finish(screen
, NULL
, fence2
,
1040 (uint64_t)dscreen
->timeout_ms
* 1000*1000))) {
1041 mtx_lock(&dctx
->mutex
);
1042 list_splice(&records
, &dctx
->records
);
1043 dd_report_hang(dctx
);
1044 /* we won't actually get here */
1045 mtx_unlock(&dctx
->mutex
);
1048 list_for_each_entry_safe(struct dd_draw_record
, record
, &records
, list
) {
1049 dd_maybe_dump_record(dscreen
, record
);
1050 list_del(&record
->list
);
1051 dd_free_record(screen
, record
);
1054 mtx_lock(&dctx
->mutex
);
1056 mtx_unlock(&dctx
->mutex
);
1060 static struct dd_draw_record
*
1061 dd_create_record(struct dd_context
*dctx
)
1063 struct dd_draw_record
*record
;
1065 record
= MALLOC_STRUCT(dd_draw_record
);
1069 record
->dctx
= dctx
;
1070 record
->draw_call
= dctx
->num_draw_calls
;
1072 record
->prev_bottom_of_pipe
= NULL
;
1073 record
->top_of_pipe
= NULL
;
1074 record
->bottom_of_pipe
= NULL
;
1075 record
->log_page
= NULL
;
1076 util_queue_fence_init(&record
->driver_finished
);
1078 dd_init_copy_of_draw_state(&record
->draw_state
);
1079 dd_copy_draw_state(&record
->draw_state
.base
, &dctx
->draw_state
);
1085 dd_context_flush(struct pipe_context
*_pipe
,
1086 struct pipe_fence_handle
**fence
, unsigned flags
)
1088 struct dd_context
*dctx
= dd_context(_pipe
);
1089 struct pipe_context
*pipe
= dctx
->pipe
;
1091 pipe
->flush(pipe
, fence
, flags
);
1095 dd_before_draw(struct dd_context
*dctx
, struct dd_draw_record
*record
)
1097 struct dd_screen
*dscreen
= dd_screen(dctx
->base
.screen
);
1098 struct pipe_context
*pipe
= dctx
->pipe
;
1099 struct pipe_screen
*screen
= dscreen
->screen
;
1101 record
->time_before
= os_time_get_nano();
1103 if (dscreen
->timeout_ms
> 0) {
1104 if (dscreen
->flush_always
&& dctx
->num_draw_calls
>= dscreen
->skip_count
) {
1105 pipe
->flush(pipe
, &record
->prev_bottom_of_pipe
, 0);
1106 screen
->fence_reference(screen
, &record
->top_of_pipe
, record
->prev_bottom_of_pipe
);
1108 pipe
->flush(pipe
, &record
->prev_bottom_of_pipe
,
1109 PIPE_FLUSH_DEFERRED
| PIPE_FLUSH_BOTTOM_OF_PIPE
);
1110 pipe
->flush(pipe
, &record
->top_of_pipe
,
1111 PIPE_FLUSH_DEFERRED
| PIPE_FLUSH_TOP_OF_PIPE
);
1114 mtx_lock(&dctx
->mutex
);
1115 dctx
->record_pending
= record
;
1116 if (list_empty(&dctx
->records
))
1117 cnd_signal(&dctx
->cond
);
1118 mtx_unlock(&dctx
->mutex
);
1123 dd_after_draw_async(void *data
)
1125 struct dd_draw_record
*record
= (struct dd_draw_record
*)data
;
1126 struct dd_context
*dctx
= record
->dctx
;
1127 struct dd_screen
*dscreen
= dd_screen(dctx
->base
.screen
);
1129 record
->log_page
= u_log_new_page(&dctx
->log
);
1130 record
->time_after
= os_time_get_nano();
1132 if (!util_queue_fence_is_signalled(&record
->driver_finished
))
1133 util_queue_fence_signal(&record
->driver_finished
);
1135 if (dscreen
->dump_mode
== DD_DUMP_APITRACE_CALL
&&
1136 dscreen
->apitrace_dump_call
> dctx
->draw_state
.apitrace_call_number
) {
1137 dd_thread_join(dctx
);
1138 /* No need to continue. */
1144 dd_after_draw(struct dd_context
*dctx
, struct dd_draw_record
*record
)
1146 struct dd_screen
*dscreen
= dd_screen(dctx
->base
.screen
);
1147 struct pipe_context
*pipe
= dctx
->pipe
;
1149 if (dscreen
->timeout_ms
> 0) {
1150 unsigned flush_flags
;
1151 if (dscreen
->flush_always
&& dctx
->num_draw_calls
>= dscreen
->skip_count
)
1154 flush_flags
= PIPE_FLUSH_DEFERRED
| PIPE_FLUSH_BOTTOM_OF_PIPE
;
1155 pipe
->flush(pipe
, &record
->bottom_of_pipe
, flush_flags
);
1157 assert(record
== dctx
->record_pending
);
1160 if (pipe
->callback
) {
1161 util_queue_fence_reset(&record
->driver_finished
);
1162 pipe
->callback(pipe
, dd_after_draw_async
, record
, true);
1164 dd_after_draw_async(record
);
1167 mtx_lock(&dctx
->mutex
);
1168 if (unlikely(dctx
->num_records
> 10000)) {
1169 dctx
->api_stalled
= true;
1170 /* Since this is only a heuristic to prevent the API thread from getting
1171 * too far ahead, we don't need a loop here. */
1172 cnd_wait(&dctx
->cond
, &dctx
->mutex
);
1173 dctx
->api_stalled
= false;
1176 if (list_empty(&dctx
->records
))
1177 cnd_signal(&dctx
->cond
);
1179 list_addtail(&record
->list
, &dctx
->records
);
1180 dctx
->record_pending
= NULL
;
1181 dctx
->num_records
++;
1182 mtx_unlock(&dctx
->mutex
);
1184 ++dctx
->num_draw_calls
;
1185 if (dscreen
->skip_count
&& dctx
->num_draw_calls
% 10000 == 0)
1186 fprintf(stderr
, "Gallium debugger reached %u draw calls.\n",
1187 dctx
->num_draw_calls
);
1191 dd_context_draw_vbo(struct pipe_context
*_pipe
,
1192 const struct pipe_draw_info
*info
)
1194 struct dd_context
*dctx
= dd_context(_pipe
);
1195 struct pipe_context
*pipe
= dctx
->pipe
;
1196 struct dd_draw_record
*record
= dd_create_record(dctx
);
1198 record
->call
.type
= CALL_DRAW_VBO
;
1199 record
->call
.info
.draw_vbo
.draw
= *info
;
1200 record
->call
.info
.draw_vbo
.draw
.count_from_stream_output
= NULL
;
1201 pipe_so_target_reference(&record
->call
.info
.draw_vbo
.draw
.count_from_stream_output
,
1202 info
->count_from_stream_output
);
1203 if (info
->index_size
&& !info
->has_user_indices
) {
1204 record
->call
.info
.draw_vbo
.draw
.index
.resource
= NULL
;
1205 pipe_resource_reference(&record
->call
.info
.draw_vbo
.draw
.index
.resource
,
1206 info
->index
.resource
);
1209 if (info
->indirect
) {
1210 record
->call
.info
.draw_vbo
.indirect
= *info
->indirect
;
1211 record
->call
.info
.draw_vbo
.draw
.indirect
= &record
->call
.info
.draw_vbo
.indirect
;
1213 record
->call
.info
.draw_vbo
.indirect
.buffer
= NULL
;
1214 pipe_resource_reference(&record
->call
.info
.draw_vbo
.indirect
.buffer
,
1215 info
->indirect
->buffer
);
1216 record
->call
.info
.draw_vbo
.indirect
.indirect_draw_count
= NULL
;
1217 pipe_resource_reference(&record
->call
.info
.draw_vbo
.indirect
.indirect_draw_count
,
1218 info
->indirect
->indirect_draw_count
);
1220 memset(&record
->call
.info
.draw_vbo
.indirect
, 0, sizeof(*info
->indirect
));
1223 dd_before_draw(dctx
, record
);
1224 pipe
->draw_vbo(pipe
, info
);
1225 dd_after_draw(dctx
, record
);
1229 dd_context_launch_grid(struct pipe_context
*_pipe
,
1230 const struct pipe_grid_info
*info
)
1232 struct dd_context
*dctx
= dd_context(_pipe
);
1233 struct pipe_context
*pipe
= dctx
->pipe
;
1234 struct dd_draw_record
*record
= dd_create_record(dctx
);
1236 record
->call
.type
= CALL_LAUNCH_GRID
;
1237 record
->call
.info
.launch_grid
= *info
;
1238 record
->call
.info
.launch_grid
.indirect
= NULL
;
1239 pipe_resource_reference(&record
->call
.info
.launch_grid
.indirect
, info
->indirect
);
1241 dd_before_draw(dctx
, record
);
1242 pipe
->launch_grid(pipe
, info
);
1243 dd_after_draw(dctx
, record
);
1247 dd_context_resource_copy_region(struct pipe_context
*_pipe
,
1248 struct pipe_resource
*dst
, unsigned dst_level
,
1249 unsigned dstx
, unsigned dsty
, unsigned dstz
,
1250 struct pipe_resource
*src
, unsigned src_level
,
1251 const struct pipe_box
*src_box
)
1253 struct dd_context
*dctx
= dd_context(_pipe
);
1254 struct pipe_context
*pipe
= dctx
->pipe
;
1255 struct dd_draw_record
*record
= dd_create_record(dctx
);
1257 record
->call
.type
= CALL_RESOURCE_COPY_REGION
;
1258 record
->call
.info
.resource_copy_region
.dst
= NULL
;
1259 pipe_resource_reference(&record
->call
.info
.resource_copy_region
.dst
, dst
);
1260 record
->call
.info
.resource_copy_region
.dst_level
= dst_level
;
1261 record
->call
.info
.resource_copy_region
.dstx
= dstx
;
1262 record
->call
.info
.resource_copy_region
.dsty
= dsty
;
1263 record
->call
.info
.resource_copy_region
.dstz
= dstz
;
1264 record
->call
.info
.resource_copy_region
.src
= NULL
;
1265 pipe_resource_reference(&record
->call
.info
.resource_copy_region
.src
, src
);
1266 record
->call
.info
.resource_copy_region
.src_level
= src_level
;
1267 record
->call
.info
.resource_copy_region
.src_box
= *src_box
;
1269 dd_before_draw(dctx
, record
);
1270 pipe
->resource_copy_region(pipe
,
1271 dst
, dst_level
, dstx
, dsty
, dstz
,
1272 src
, src_level
, src_box
);
1273 dd_after_draw(dctx
, record
);
1277 dd_context_blit(struct pipe_context
*_pipe
, const struct pipe_blit_info
*info
)
1279 struct dd_context
*dctx
= dd_context(_pipe
);
1280 struct pipe_context
*pipe
= dctx
->pipe
;
1281 struct dd_draw_record
*record
= dd_create_record(dctx
);
1283 record
->call
.type
= CALL_BLIT
;
1284 record
->call
.info
.blit
= *info
;
1285 record
->call
.info
.blit
.dst
.resource
= NULL
;
1286 pipe_resource_reference(&record
->call
.info
.blit
.dst
.resource
, info
->dst
.resource
);
1287 record
->call
.info
.blit
.src
.resource
= NULL
;
1288 pipe_resource_reference(&record
->call
.info
.blit
.src
.resource
, info
->src
.resource
);
1290 dd_before_draw(dctx
, record
);
1291 pipe
->blit(pipe
, info
);
1292 dd_after_draw(dctx
, record
);
1296 dd_context_generate_mipmap(struct pipe_context
*_pipe
,
1297 struct pipe_resource
*res
,
1298 enum pipe_format format
,
1299 unsigned base_level
,
1300 unsigned last_level
,
1301 unsigned first_layer
,
1302 unsigned last_layer
)
1304 struct dd_context
*dctx
= dd_context(_pipe
);
1305 struct pipe_context
*pipe
= dctx
->pipe
;
1306 struct dd_draw_record
*record
= dd_create_record(dctx
);
1309 record
->call
.type
= CALL_GENERATE_MIPMAP
;
1310 record
->call
.info
.generate_mipmap
.res
= NULL
;
1311 pipe_resource_reference(&record
->call
.info
.generate_mipmap
.res
, res
);
1312 record
->call
.info
.generate_mipmap
.format
= format
;
1313 record
->call
.info
.generate_mipmap
.base_level
= base_level
;
1314 record
->call
.info
.generate_mipmap
.last_level
= last_level
;
1315 record
->call
.info
.generate_mipmap
.first_layer
= first_layer
;
1316 record
->call
.info
.generate_mipmap
.last_layer
= last_layer
;
1318 dd_before_draw(dctx
, record
);
1319 result
= pipe
->generate_mipmap(pipe
, res
, format
, base_level
, last_level
,
1320 first_layer
, last_layer
);
1321 dd_after_draw(dctx
, record
);
1326 dd_context_get_query_result_resource(struct pipe_context
*_pipe
,
1327 struct pipe_query
*query
,
1329 enum pipe_query_value_type result_type
,
1331 struct pipe_resource
*resource
,
1334 struct dd_context
*dctx
= dd_context(_pipe
);
1335 struct dd_query
*dquery
= dd_query(query
);
1336 struct pipe_context
*pipe
= dctx
->pipe
;
1337 struct dd_draw_record
*record
= dd_create_record(dctx
);
1339 record
->call
.type
= CALL_GET_QUERY_RESULT_RESOURCE
;
1340 record
->call
.info
.get_query_result_resource
.query
= query
;
1341 record
->call
.info
.get_query_result_resource
.wait
= wait
;
1342 record
->call
.info
.get_query_result_resource
.result_type
= result_type
;
1343 record
->call
.info
.get_query_result_resource
.index
= index
;
1344 record
->call
.info
.get_query_result_resource
.resource
= NULL
;
1345 pipe_resource_reference(&record
->call
.info
.get_query_result_resource
.resource
,
1347 record
->call
.info
.get_query_result_resource
.offset
= offset
;
1349 /* The query may be deleted by the time we need to print it. */
1350 record
->call
.info
.get_query_result_resource
.query_type
= dquery
->type
;
1352 dd_before_draw(dctx
, record
);
1353 pipe
->get_query_result_resource(pipe
, dquery
->query
, wait
,
1354 result_type
, index
, resource
, offset
);
1355 dd_after_draw(dctx
, record
);
1359 dd_context_flush_resource(struct pipe_context
*_pipe
,
1360 struct pipe_resource
*resource
)
1362 struct dd_context
*dctx
= dd_context(_pipe
);
1363 struct pipe_context
*pipe
= dctx
->pipe
;
1364 struct dd_draw_record
*record
= dd_create_record(dctx
);
1366 record
->call
.type
= CALL_FLUSH_RESOURCE
;
1367 record
->call
.info
.flush_resource
= NULL
;
1368 pipe_resource_reference(&record
->call
.info
.flush_resource
, resource
);
1370 dd_before_draw(dctx
, record
);
1371 pipe
->flush_resource(pipe
, resource
);
1372 dd_after_draw(dctx
, record
);
1376 dd_context_clear(struct pipe_context
*_pipe
, unsigned buffers
,
1377 const union pipe_color_union
*color
, double depth
,
1380 struct dd_context
*dctx
= dd_context(_pipe
);
1381 struct pipe_context
*pipe
= dctx
->pipe
;
1382 struct dd_draw_record
*record
= dd_create_record(dctx
);
1384 record
->call
.type
= CALL_CLEAR
;
1385 record
->call
.info
.clear
.buffers
= buffers
;
1386 record
->call
.info
.clear
.color
= *color
;
1387 record
->call
.info
.clear
.depth
= depth
;
1388 record
->call
.info
.clear
.stencil
= stencil
;
1390 dd_before_draw(dctx
, record
);
1391 pipe
->clear(pipe
, buffers
, color
, depth
, stencil
);
1392 dd_after_draw(dctx
, record
);
1396 dd_context_clear_render_target(struct pipe_context
*_pipe
,
1397 struct pipe_surface
*dst
,
1398 const union pipe_color_union
*color
,
1399 unsigned dstx
, unsigned dsty
,
1400 unsigned width
, unsigned height
,
1401 bool render_condition_enabled
)
1403 struct dd_context
*dctx
= dd_context(_pipe
);
1404 struct pipe_context
*pipe
= dctx
->pipe
;
1405 struct dd_draw_record
*record
= dd_create_record(dctx
);
1407 record
->call
.type
= CALL_CLEAR_RENDER_TARGET
;
1409 dd_before_draw(dctx
, record
);
1410 pipe
->clear_render_target(pipe
, dst
, color
, dstx
, dsty
, width
, height
,
1411 render_condition_enabled
);
1412 dd_after_draw(dctx
, record
);
1416 dd_context_clear_depth_stencil(struct pipe_context
*_pipe
,
1417 struct pipe_surface
*dst
, unsigned clear_flags
,
1418 double depth
, unsigned stencil
, unsigned dstx
,
1419 unsigned dsty
, unsigned width
, unsigned height
,
1420 bool render_condition_enabled
)
1422 struct dd_context
*dctx
= dd_context(_pipe
);
1423 struct pipe_context
*pipe
= dctx
->pipe
;
1424 struct dd_draw_record
*record
= dd_create_record(dctx
);
1426 record
->call
.type
= CALL_CLEAR_DEPTH_STENCIL
;
1428 dd_before_draw(dctx
, record
);
1429 pipe
->clear_depth_stencil(pipe
, dst
, clear_flags
, depth
, stencil
,
1430 dstx
, dsty
, width
, height
,
1431 render_condition_enabled
);
1432 dd_after_draw(dctx
, record
);
1436 dd_context_clear_buffer(struct pipe_context
*_pipe
, struct pipe_resource
*res
,
1437 unsigned offset
, unsigned size
,
1438 const void *clear_value
, int clear_value_size
)
1440 struct dd_context
*dctx
= dd_context(_pipe
);
1441 struct pipe_context
*pipe
= dctx
->pipe
;
1442 struct dd_draw_record
*record
= dd_create_record(dctx
);
1444 record
->call
.type
= CALL_CLEAR_BUFFER
;
1445 record
->call
.info
.clear_buffer
.res
= NULL
;
1446 pipe_resource_reference(&record
->call
.info
.clear_buffer
.res
, res
);
1447 record
->call
.info
.clear_buffer
.offset
= offset
;
1448 record
->call
.info
.clear_buffer
.size
= size
;
1449 record
->call
.info
.clear_buffer
.clear_value
= clear_value
;
1450 record
->call
.info
.clear_buffer
.clear_value_size
= clear_value_size
;
1452 dd_before_draw(dctx
, record
);
1453 pipe
->clear_buffer(pipe
, res
, offset
, size
, clear_value
, clear_value_size
);
1454 dd_after_draw(dctx
, record
);
1458 dd_context_clear_texture(struct pipe_context
*_pipe
,
1459 struct pipe_resource
*res
,
1461 const struct pipe_box
*box
,
1464 struct dd_context
*dctx
= dd_context(_pipe
);
1465 struct pipe_context
*pipe
= dctx
->pipe
;
1466 struct dd_draw_record
*record
= dd_create_record(dctx
);
1468 record
->call
.type
= CALL_CLEAR_TEXTURE
;
1470 dd_before_draw(dctx
, record
);
1471 pipe
->clear_texture(pipe
, res
, level
, box
, data
);
1472 dd_after_draw(dctx
, record
);
1475 /********************************************************************
1480 dd_context_transfer_map(struct pipe_context
*_pipe
,
1481 struct pipe_resource
*resource
, unsigned level
,
1482 unsigned usage
, const struct pipe_box
*box
,
1483 struct pipe_transfer
**transfer
)
1485 struct dd_context
*dctx
= dd_context(_pipe
);
1486 struct pipe_context
*pipe
= dctx
->pipe
;
1487 struct dd_draw_record
*record
=
1488 dd_screen(dctx
->base
.screen
)->transfers
? dd_create_record(dctx
) : NULL
;
1491 record
->call
.type
= CALL_TRANSFER_MAP
;
1493 dd_before_draw(dctx
, record
);
1495 void *ptr
= pipe
->transfer_map(pipe
, resource
, level
, usage
, box
, transfer
);
1497 record
->call
.info
.transfer_map
.transfer_ptr
= *transfer
;
1498 record
->call
.info
.transfer_map
.ptr
= ptr
;
1500 record
->call
.info
.transfer_map
.transfer
= **transfer
;
1501 record
->call
.info
.transfer_map
.transfer
.resource
= NULL
;
1502 pipe_resource_reference(&record
->call
.info
.transfer_map
.transfer
.resource
,
1503 (*transfer
)->resource
);
1505 memset(&record
->call
.info
.transfer_map
.transfer
, 0, sizeof(struct pipe_transfer
));
1508 dd_after_draw(dctx
, record
);
1514 dd_context_transfer_flush_region(struct pipe_context
*_pipe
,
1515 struct pipe_transfer
*transfer
,
1516 const struct pipe_box
*box
)
1518 struct dd_context
*dctx
= dd_context(_pipe
);
1519 struct pipe_context
*pipe
= dctx
->pipe
;
1520 struct dd_draw_record
*record
=
1521 dd_screen(dctx
->base
.screen
)->transfers
? dd_create_record(dctx
) : NULL
;
1524 record
->call
.type
= CALL_TRANSFER_FLUSH_REGION
;
1525 record
->call
.info
.transfer_flush_region
.transfer_ptr
= transfer
;
1526 record
->call
.info
.transfer_flush_region
.box
= *box
;
1527 record
->call
.info
.transfer_flush_region
.transfer
= *transfer
;
1528 record
->call
.info
.transfer_flush_region
.transfer
.resource
= NULL
;
1529 pipe_resource_reference(
1530 &record
->call
.info
.transfer_flush_region
.transfer
.resource
,
1531 transfer
->resource
);
1533 dd_before_draw(dctx
, record
);
1535 pipe
->transfer_flush_region(pipe
, transfer
, box
);
1537 dd_after_draw(dctx
, record
);
1541 dd_context_transfer_unmap(struct pipe_context
*_pipe
,
1542 struct pipe_transfer
*transfer
)
1544 struct dd_context
*dctx
= dd_context(_pipe
);
1545 struct pipe_context
*pipe
= dctx
->pipe
;
1546 struct dd_draw_record
*record
=
1547 dd_screen(dctx
->base
.screen
)->transfers
? dd_create_record(dctx
) : NULL
;
1550 record
->call
.type
= CALL_TRANSFER_UNMAP
;
1551 record
->call
.info
.transfer_unmap
.transfer_ptr
= transfer
;
1552 record
->call
.info
.transfer_unmap
.transfer
= *transfer
;
1553 record
->call
.info
.transfer_unmap
.transfer
.resource
= NULL
;
1554 pipe_resource_reference(
1555 &record
->call
.info
.transfer_unmap
.transfer
.resource
,
1556 transfer
->resource
);
1558 dd_before_draw(dctx
, record
);
1560 pipe
->transfer_unmap(pipe
, transfer
);
1562 dd_after_draw(dctx
, record
);
1566 dd_context_buffer_subdata(struct pipe_context
*_pipe
,
1567 struct pipe_resource
*resource
,
1568 unsigned usage
, unsigned offset
,
1569 unsigned size
, const void *data
)
1571 struct dd_context
*dctx
= dd_context(_pipe
);
1572 struct pipe_context
*pipe
= dctx
->pipe
;
1573 struct dd_draw_record
*record
=
1574 dd_screen(dctx
->base
.screen
)->transfers
? dd_create_record(dctx
) : NULL
;
1577 record
->call
.type
= CALL_BUFFER_SUBDATA
;
1578 record
->call
.info
.buffer_subdata
.resource
= NULL
;
1579 pipe_resource_reference(&record
->call
.info
.buffer_subdata
.resource
, resource
);
1580 record
->call
.info
.buffer_subdata
.usage
= usage
;
1581 record
->call
.info
.buffer_subdata
.offset
= offset
;
1582 record
->call
.info
.buffer_subdata
.size
= size
;
1583 record
->call
.info
.buffer_subdata
.data
= data
;
1585 dd_before_draw(dctx
, record
);
1587 pipe
->buffer_subdata(pipe
, resource
, usage
, offset
, size
, data
);
1589 dd_after_draw(dctx
, record
);
1593 dd_context_texture_subdata(struct pipe_context
*_pipe
,
1594 struct pipe_resource
*resource
,
1595 unsigned level
, unsigned usage
,
1596 const struct pipe_box
*box
,
1597 const void *data
, unsigned stride
,
1598 unsigned layer_stride
)
1600 struct dd_context
*dctx
= dd_context(_pipe
);
1601 struct pipe_context
*pipe
= dctx
->pipe
;
1602 struct dd_draw_record
*record
=
1603 dd_screen(dctx
->base
.screen
)->transfers
? dd_create_record(dctx
) : NULL
;
1606 record
->call
.type
= CALL_TEXTURE_SUBDATA
;
1607 record
->call
.info
.texture_subdata
.resource
= NULL
;
1608 pipe_resource_reference(&record
->call
.info
.texture_subdata
.resource
, resource
);
1609 record
->call
.info
.texture_subdata
.level
= level
;
1610 record
->call
.info
.texture_subdata
.usage
= usage
;
1611 record
->call
.info
.texture_subdata
.box
= *box
;
1612 record
->call
.info
.texture_subdata
.data
= data
;
1613 record
->call
.info
.texture_subdata
.stride
= stride
;
1614 record
->call
.info
.texture_subdata
.layer_stride
= layer_stride
;
1616 dd_before_draw(dctx
, record
);
1618 pipe
->texture_subdata(pipe
, resource
, level
, usage
, box
, data
,
1619 stride
, layer_stride
);
1621 dd_after_draw(dctx
, record
);
1625 dd_init_draw_functions(struct dd_context
*dctx
)
1629 CTX_INIT(launch_grid
);
1630 CTX_INIT(resource_copy_region
);
1633 CTX_INIT(clear_render_target
);
1634 CTX_INIT(clear_depth_stencil
);
1635 CTX_INIT(clear_buffer
);
1636 CTX_INIT(clear_texture
);
1637 CTX_INIT(flush_resource
);
1638 CTX_INIT(generate_mipmap
);
1639 CTX_INIT(get_query_result_resource
);
1640 CTX_INIT(transfer_map
);
1641 CTX_INIT(transfer_flush_region
);
1642 CTX_INIT(transfer_unmap
);
1643 CTX_INIT(buffer_subdata
);
1644 CTX_INIT(texture_subdata
);