gallium/ddebug: dump missing members of pipe_draw_info
[mesa.git] / src / gallium / drivers / ddebug / dd_draw.c
1 /**************************************************************************
2 *
3 * Copyright 2015 Advanced Micro Devices, Inc.
4 * Copyright 2008 VMware, Inc.
5 * All Rights Reserved.
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the "Software"),
9 * to deal in the Software without restriction, including without limitation
10 * on the rights to use, copy, modify, merge, publish, distribute, sub
11 * license, and/or sell copies of the Software, and to permit persons to whom
12 * the Software is furnished to do so, subject to the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the next
15 * paragraph) shall be included in all copies or substantial portions of the
16 * Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28 #include "dd_pipe.h"
29
30 #include "util/u_dump.h"
31 #include "util/u_format.h"
32 #include "util/u_framebuffer.h"
33 #include "util/u_helpers.h"
34 #include "util/u_inlines.h"
35 #include "util/u_memory.h"
36 #include "tgsi/tgsi_parse.h"
37 #include "tgsi/tgsi_scan.h"
38 #include "os/os_time.h"
39 #include <inttypes.h>
40
41
42 static FILE *
43 dd_get_file_stream(struct dd_screen *dscreen, unsigned apitrace_call_number)
44 {
45 struct pipe_screen *screen = dscreen->screen;
46 char cmd_line[4096];
47
48 FILE *f = dd_get_debug_file(dscreen->verbose);
49 if (!f)
50 return NULL;
51
52 if (os_get_command_line(cmd_line, sizeof(cmd_line)))
53 fprintf(f, "Command: %s\n", cmd_line);
54 fprintf(f, "Driver vendor: %s\n", screen->get_vendor(screen));
55 fprintf(f, "Device vendor: %s\n", screen->get_device_vendor(screen));
56 fprintf(f, "Device name: %s\n\n", screen->get_name(screen));
57
58 if (apitrace_call_number)
59 fprintf(f, "Last apitrace call: %u\n\n",
60 apitrace_call_number);
61 return f;
62 }
63
64 static void
65 dd_dump_dmesg(FILE *f)
66 {
67 char line[2000];
68 FILE *p = popen("dmesg | tail -n60", "r");
69
70 if (!p)
71 return;
72
73 fprintf(f, "\nLast 60 lines of dmesg:\n\n");
74 while (fgets(line, sizeof(line), p))
75 fputs(line, f);
76
77 pclose(p);
78 }
79
80 static void
81 dd_close_file_stream(FILE *f)
82 {
83 fclose(f);
84 }
85
86 static unsigned
87 dd_num_active_viewports(struct dd_draw_state *dstate)
88 {
89 struct tgsi_shader_info info;
90 const struct tgsi_token *tokens;
91
92 if (dstate->shaders[PIPE_SHADER_GEOMETRY])
93 tokens = dstate->shaders[PIPE_SHADER_GEOMETRY]->state.shader.tokens;
94 else if (dstate->shaders[PIPE_SHADER_TESS_EVAL])
95 tokens = dstate->shaders[PIPE_SHADER_TESS_EVAL]->state.shader.tokens;
96 else if (dstate->shaders[PIPE_SHADER_VERTEX])
97 tokens = dstate->shaders[PIPE_SHADER_VERTEX]->state.shader.tokens;
98 else
99 return 1;
100
101 tgsi_scan_shader(tokens, &info);
102 return info.writes_viewport_index ? PIPE_MAX_VIEWPORTS : 1;
103 }
104
105 #define COLOR_RESET "\033[0m"
106 #define COLOR_SHADER "\033[1;32m"
107 #define COLOR_STATE "\033[1;33m"
108
109 #define DUMP(name, var) do { \
110 fprintf(f, COLOR_STATE #name ": " COLOR_RESET); \
111 util_dump_##name(f, var); \
112 fprintf(f, "\n"); \
113 } while(0)
114
115 #define DUMP_I(name, var, i) do { \
116 fprintf(f, COLOR_STATE #name " %i: " COLOR_RESET, i); \
117 util_dump_##name(f, var); \
118 fprintf(f, "\n"); \
119 } while(0)
120
121 #define DUMP_M(name, var, member) do { \
122 fprintf(f, " " #member ": "); \
123 util_dump_##name(f, (var)->member); \
124 fprintf(f, "\n"); \
125 } while(0)
126
127 #define DUMP_M_ADDR(name, var, member) do { \
128 fprintf(f, " " #member ": "); \
129 util_dump_##name(f, &(var)->member); \
130 fprintf(f, "\n"); \
131 } while(0)
132
133 static void
134 print_named_value(FILE *f, const char *name, int value)
135 {
136 fprintf(f, COLOR_STATE "%s" COLOR_RESET " = %i\n", name, value);
137 }
138
139 static void
140 print_named_xvalue(FILE *f, const char *name, int value)
141 {
142 fprintf(f, COLOR_STATE "%s" COLOR_RESET " = 0x%08x\n", name, value);
143 }
144
145 static void
146 util_dump_uint(FILE *f, unsigned i)
147 {
148 fprintf(f, "%u", i);
149 }
150
151 static void
152 util_dump_hex(FILE *f, unsigned i)
153 {
154 fprintf(f, "0x%x", i);
155 }
156
157 static void
158 util_dump_double(FILE *f, double d)
159 {
160 fprintf(f, "%f", d);
161 }
162
163 static void
164 util_dump_format(FILE *f, enum pipe_format format)
165 {
166 fprintf(f, "%s", util_format_name(format));
167 }
168
169 static void
170 util_dump_color_union(FILE *f, const union pipe_color_union *color)
171 {
172 fprintf(f, "{f = {%f, %f, %f, %f}, ui = {%u, %u, %u, %u}",
173 color->f[0], color->f[1], color->f[2], color->f[3],
174 color->ui[0], color->ui[1], color->ui[2], color->ui[3]);
175 }
176
177 static void
178 util_dump_query(FILE *f, struct dd_query *query)
179 {
180 if (query->type >= PIPE_QUERY_DRIVER_SPECIFIC)
181 fprintf(f, "PIPE_QUERY_DRIVER_SPECIFIC + %i",
182 query->type - PIPE_QUERY_DRIVER_SPECIFIC);
183 else
184 fprintf(f, "%s", util_dump_query_type(query->type, false));
185 }
186
187 static void
188 dd_dump_render_condition(struct dd_draw_state *dstate, FILE *f)
189 {
190 if (dstate->render_cond.query) {
191 fprintf(f, "render condition:\n");
192 DUMP_M(query, &dstate->render_cond, query);
193 DUMP_M(uint, &dstate->render_cond, condition);
194 DUMP_M(uint, &dstate->render_cond, mode);
195 fprintf(f, "\n");
196 }
197 }
198
199 static void
200 dd_dump_draw_vbo(struct dd_draw_state *dstate, struct pipe_draw_info *info, FILE *f)
201 {
202 int sh, i;
203 const char *shader_str[PIPE_SHADER_TYPES];
204
205 shader_str[PIPE_SHADER_VERTEX] = "VERTEX";
206 shader_str[PIPE_SHADER_TESS_CTRL] = "TESS_CTRL";
207 shader_str[PIPE_SHADER_TESS_EVAL] = "TESS_EVAL";
208 shader_str[PIPE_SHADER_GEOMETRY] = "GEOMETRY";
209 shader_str[PIPE_SHADER_FRAGMENT] = "FRAGMENT";
210 shader_str[PIPE_SHADER_COMPUTE] = "COMPUTE";
211
212 DUMP(draw_info, info);
213 if (info->indexed) {
214 DUMP(index_buffer, &dstate->index_buffer);
215 if (dstate->index_buffer.buffer)
216 DUMP_M(resource, &dstate->index_buffer, buffer);
217 }
218 if (info->count_from_stream_output)
219 DUMP_M(stream_output_target, info,
220 count_from_stream_output);
221 if (info->indirect)
222 DUMP_M(resource, info, indirect);
223 if (info->indirect_params)
224 DUMP_M(resource, info, indirect_params);
225 fprintf(f, "\n");
226
227 /* TODO: dump active queries */
228
229 dd_dump_render_condition(dstate, f);
230
231 for (i = 0; i < PIPE_MAX_ATTRIBS; i++)
232 if (dstate->vertex_buffers[i].buffer ||
233 dstate->vertex_buffers[i].user_buffer) {
234 DUMP_I(vertex_buffer, &dstate->vertex_buffers[i], i);
235 if (dstate->vertex_buffers[i].buffer)
236 DUMP_M(resource, &dstate->vertex_buffers[i], buffer);
237 }
238
239 if (dstate->velems) {
240 print_named_value(f, "num vertex elements",
241 dstate->velems->state.velems.count);
242 for (i = 0; i < dstate->velems->state.velems.count; i++) {
243 fprintf(f, " ");
244 DUMP_I(vertex_element, &dstate->velems->state.velems.velems[i], i);
245 }
246 }
247
248 print_named_value(f, "num stream output targets", dstate->num_so_targets);
249 for (i = 0; i < dstate->num_so_targets; i++)
250 if (dstate->so_targets[i]) {
251 DUMP_I(stream_output_target, dstate->so_targets[i], i);
252 DUMP_M(resource, dstate->so_targets[i], buffer);
253 fprintf(f, " offset = %i\n", dstate->so_offsets[i]);
254 }
255
256 fprintf(f, "\n");
257 for (sh = 0; sh < PIPE_SHADER_TYPES; sh++) {
258 if (sh == PIPE_SHADER_COMPUTE)
259 continue;
260
261 if (sh == PIPE_SHADER_TESS_CTRL &&
262 !dstate->shaders[PIPE_SHADER_TESS_CTRL] &&
263 dstate->shaders[PIPE_SHADER_TESS_EVAL])
264 fprintf(f, "tess_state: {default_outer_level = {%f, %f, %f, %f}, "
265 "default_inner_level = {%f, %f}}\n",
266 dstate->tess_default_levels[0],
267 dstate->tess_default_levels[1],
268 dstate->tess_default_levels[2],
269 dstate->tess_default_levels[3],
270 dstate->tess_default_levels[4],
271 dstate->tess_default_levels[5]);
272
273 if (sh == PIPE_SHADER_FRAGMENT)
274 if (dstate->rs) {
275 unsigned num_viewports = dd_num_active_viewports(dstate);
276
277 if (dstate->rs->state.rs.clip_plane_enable)
278 DUMP(clip_state, &dstate->clip_state);
279
280 for (i = 0; i < num_viewports; i++)
281 DUMP_I(viewport_state, &dstate->viewports[i], i);
282
283 if (dstate->rs->state.rs.scissor)
284 for (i = 0; i < num_viewports; i++)
285 DUMP_I(scissor_state, &dstate->scissors[i], i);
286
287 DUMP(rasterizer_state, &dstate->rs->state.rs);
288
289 if (dstate->rs->state.rs.poly_stipple_enable)
290 DUMP(poly_stipple, &dstate->polygon_stipple);
291 fprintf(f, "\n");
292 }
293
294 if (!dstate->shaders[sh])
295 continue;
296
297 fprintf(f, COLOR_SHADER "begin shader: %s" COLOR_RESET "\n", shader_str[sh]);
298 DUMP(shader_state, &dstate->shaders[sh]->state.shader);
299
300 for (i = 0; i < PIPE_MAX_CONSTANT_BUFFERS; i++)
301 if (dstate->constant_buffers[sh][i].buffer ||
302 dstate->constant_buffers[sh][i].user_buffer) {
303 DUMP_I(constant_buffer, &dstate->constant_buffers[sh][i], i);
304 if (dstate->constant_buffers[sh][i].buffer)
305 DUMP_M(resource, &dstate->constant_buffers[sh][i], buffer);
306 }
307
308 for (i = 0; i < PIPE_MAX_SAMPLERS; i++)
309 if (dstate->sampler_states[sh][i])
310 DUMP_I(sampler_state, &dstate->sampler_states[sh][i]->state.sampler, i);
311
312 for (i = 0; i < PIPE_MAX_SAMPLERS; i++)
313 if (dstate->sampler_views[sh][i]) {
314 DUMP_I(sampler_view, dstate->sampler_views[sh][i], i);
315 DUMP_M(resource, dstate->sampler_views[sh][i], texture);
316 }
317
318 for (i = 0; i < PIPE_MAX_SHADER_IMAGES; i++)
319 if (dstate->shader_images[sh][i].resource) {
320 DUMP_I(image_view, &dstate->shader_images[sh][i], i);
321 if (dstate->shader_images[sh][i].resource)
322 DUMP_M(resource, &dstate->shader_images[sh][i], resource);
323 }
324
325 for (i = 0; i < PIPE_MAX_SHADER_BUFFERS; i++)
326 if (dstate->shader_buffers[sh][i].buffer) {
327 DUMP_I(shader_buffer, &dstate->shader_buffers[sh][i], i);
328 if (dstate->shader_buffers[sh][i].buffer)
329 DUMP_M(resource, &dstate->shader_buffers[sh][i], buffer);
330 }
331
332 fprintf(f, COLOR_SHADER "end shader: %s" COLOR_RESET "\n\n", shader_str[sh]);
333 }
334
335 if (dstate->dsa)
336 DUMP(depth_stencil_alpha_state, &dstate->dsa->state.dsa);
337 DUMP(stencil_ref, &dstate->stencil_ref);
338
339 if (dstate->blend)
340 DUMP(blend_state, &dstate->blend->state.blend);
341 DUMP(blend_color, &dstate->blend_color);
342
343 print_named_value(f, "min_samples", dstate->min_samples);
344 print_named_xvalue(f, "sample_mask", dstate->sample_mask);
345 fprintf(f, "\n");
346
347 DUMP(framebuffer_state, &dstate->framebuffer_state);
348 for (i = 0; i < dstate->framebuffer_state.nr_cbufs; i++)
349 if (dstate->framebuffer_state.cbufs[i]) {
350 fprintf(f, " " COLOR_STATE "cbufs[%i]:" COLOR_RESET "\n ", i);
351 DUMP(surface, dstate->framebuffer_state.cbufs[i]);
352 fprintf(f, " ");
353 DUMP(resource, dstate->framebuffer_state.cbufs[i]->texture);
354 }
355 if (dstate->framebuffer_state.zsbuf) {
356 fprintf(f, " " COLOR_STATE "zsbuf:" COLOR_RESET "\n ");
357 DUMP(surface, dstate->framebuffer_state.zsbuf);
358 fprintf(f, " ");
359 DUMP(resource, dstate->framebuffer_state.zsbuf->texture);
360 }
361 fprintf(f, "\n");
362 }
363
364 static void
365 dd_dump_launch_grid(struct dd_draw_state *dstate, struct pipe_grid_info *info, FILE *f)
366 {
367 fprintf(f, "%s:\n", __func__+8);
368 /* TODO */
369 }
370
371 static void
372 dd_dump_resource_copy_region(struct dd_draw_state *dstate,
373 struct call_resource_copy_region *info,
374 FILE *f)
375 {
376 fprintf(f, "%s:\n", __func__+8);
377 DUMP_M(resource, info, dst);
378 DUMP_M(uint, info, dst_level);
379 DUMP_M(uint, info, dstx);
380 DUMP_M(uint, info, dsty);
381 DUMP_M(uint, info, dstz);
382 DUMP_M(resource, info, src);
383 DUMP_M(uint, info, src_level);
384 DUMP_M_ADDR(box, info, src_box);
385 }
386
387 static void
388 dd_dump_blit(struct dd_draw_state *dstate, struct pipe_blit_info *info, FILE *f)
389 {
390 fprintf(f, "%s:\n", __func__+8);
391 DUMP_M(resource, info, dst.resource);
392 DUMP_M(uint, info, dst.level);
393 DUMP_M_ADDR(box, info, dst.box);
394 DUMP_M(format, info, dst.format);
395
396 DUMP_M(resource, info, src.resource);
397 DUMP_M(uint, info, src.level);
398 DUMP_M_ADDR(box, info, src.box);
399 DUMP_M(format, info, src.format);
400
401 DUMP_M(hex, info, mask);
402 DUMP_M(uint, info, filter);
403 DUMP_M(uint, info, scissor_enable);
404 DUMP_M_ADDR(scissor_state, info, scissor);
405 DUMP_M(uint, info, render_condition_enable);
406
407 if (info->render_condition_enable)
408 dd_dump_render_condition(dstate, f);
409 }
410
411 static void
412 dd_dump_generate_mipmap(struct dd_draw_state *dstate, FILE *f)
413 {
414 fprintf(f, "%s:\n", __func__+8);
415 /* TODO */
416 }
417
418 static void
419 dd_dump_flush_resource(struct dd_draw_state *dstate, struct pipe_resource *res,
420 FILE *f)
421 {
422 fprintf(f, "%s:\n", __func__+8);
423 DUMP(resource, res);
424 }
425
426 static void
427 dd_dump_clear(struct dd_draw_state *dstate, struct call_clear *info, FILE *f)
428 {
429 fprintf(f, "%s:\n", __func__+8);
430 DUMP_M(uint, info, buffers);
431 DUMP_M_ADDR(color_union, info, color);
432 DUMP_M(double, info, depth);
433 DUMP_M(hex, info, stencil);
434 }
435
436 static void
437 dd_dump_clear_buffer(struct dd_draw_state *dstate, struct call_clear_buffer *info,
438 FILE *f)
439 {
440 int i;
441 const char *value = (const char*)info->clear_value;
442
443 fprintf(f, "%s:\n", __func__+8);
444 DUMP_M(resource, info, res);
445 DUMP_M(uint, info, offset);
446 DUMP_M(uint, info, size);
447 DUMP_M(uint, info, clear_value_size);
448
449 fprintf(f, " clear_value:");
450 for (i = 0; i < info->clear_value_size; i++)
451 fprintf(f, " %02x", value[i]);
452 fprintf(f, "\n");
453 }
454
455 static void
456 dd_dump_clear_texture(struct dd_draw_state *dstate, FILE *f)
457 {
458 fprintf(f, "%s:\n", __func__+8);
459 /* TODO */
460 }
461
462 static void
463 dd_dump_clear_render_target(struct dd_draw_state *dstate, FILE *f)
464 {
465 fprintf(f, "%s:\n", __func__+8);
466 /* TODO */
467 }
468
469 static void
470 dd_dump_clear_depth_stencil(struct dd_draw_state *dstate, FILE *f)
471 {
472 fprintf(f, "%s:\n", __func__+8);
473 /* TODO */
474 }
475
476 static void
477 dd_dump_driver_state(struct dd_context *dctx, FILE *f, unsigned flags)
478 {
479 if (dctx->pipe->dump_debug_state) {
480 fprintf(f,"\n\n**************************************************"
481 "***************************\n");
482 fprintf(f, "Driver-specific state:\n\n");
483 dctx->pipe->dump_debug_state(dctx->pipe, f, flags);
484 }
485 }
486
487 static void
488 dd_dump_call(FILE *f, struct dd_draw_state *state, struct dd_call *call)
489 {
490 switch (call->type) {
491 case CALL_DRAW_VBO:
492 dd_dump_draw_vbo(state, &call->info.draw_vbo, f);
493 break;
494 case CALL_LAUNCH_GRID:
495 dd_dump_launch_grid(state, &call->info.launch_grid, f);
496 break;
497 case CALL_RESOURCE_COPY_REGION:
498 dd_dump_resource_copy_region(state,
499 &call->info.resource_copy_region, f);
500 break;
501 case CALL_BLIT:
502 dd_dump_blit(state, &call->info.blit, f);
503 break;
504 case CALL_FLUSH_RESOURCE:
505 dd_dump_flush_resource(state, call->info.flush_resource, f);
506 break;
507 case CALL_CLEAR:
508 dd_dump_clear(state, &call->info.clear, f);
509 break;
510 case CALL_CLEAR_BUFFER:
511 dd_dump_clear_buffer(state, &call->info.clear_buffer, f);
512 break;
513 case CALL_CLEAR_TEXTURE:
514 dd_dump_clear_texture(state, f);
515 break;
516 case CALL_CLEAR_RENDER_TARGET:
517 dd_dump_clear_render_target(state, f);
518 break;
519 case CALL_CLEAR_DEPTH_STENCIL:
520 dd_dump_clear_depth_stencil(state, f);
521 break;
522 case CALL_GENERATE_MIPMAP:
523 dd_dump_generate_mipmap(state, f);
524 break;
525 }
526 }
527
528 static void
529 dd_write_report(struct dd_context *dctx, struct dd_call *call, unsigned flags,
530 bool dump_dmesg)
531 {
532 FILE *f = dd_get_file_stream(dd_screen(dctx->base.screen),
533 dctx->draw_state.apitrace_call_number);
534
535 if (!f)
536 return;
537
538 dd_dump_call(f, &dctx->draw_state, call);
539 dd_dump_driver_state(dctx, f, flags);
540 if (dump_dmesg)
541 dd_dump_dmesg(f);
542 dd_close_file_stream(f);
543 }
544
545 static void
546 dd_kill_process(void)
547 {
548 sync();
549 fprintf(stderr, "dd: Aborting the process...\n");
550 fflush(stdout);
551 fflush(stderr);
552 exit(1);
553 }
554
555 static bool
556 dd_flush_and_check_hang(struct dd_context *dctx,
557 struct pipe_fence_handle **flush_fence,
558 unsigned flush_flags)
559 {
560 struct pipe_fence_handle *fence = NULL;
561 struct pipe_context *pipe = dctx->pipe;
562 struct pipe_screen *screen = pipe->screen;
563 uint64_t timeout_ms = dd_screen(dctx->base.screen)->timeout_ms;
564 bool idle;
565
566 assert(timeout_ms > 0);
567
568 pipe->flush(pipe, &fence, flush_flags);
569 if (flush_fence)
570 screen->fence_reference(screen, flush_fence, fence);
571 if (!fence)
572 return false;
573
574 idle = screen->fence_finish(screen, pipe, fence, timeout_ms * 1000000);
575 screen->fence_reference(screen, &fence, NULL);
576 if (!idle)
577 fprintf(stderr, "dd: GPU hang detected!\n");
578 return !idle;
579 }
580
581 static void
582 dd_flush_and_handle_hang(struct dd_context *dctx,
583 struct pipe_fence_handle **fence, unsigned flags,
584 const char *cause)
585 {
586 if (dd_flush_and_check_hang(dctx, fence, flags)) {
587 FILE *f = dd_get_file_stream(dd_screen(dctx->base.screen),
588 dctx->draw_state.apitrace_call_number);
589
590 if (f) {
591 fprintf(f, "dd: %s.\n", cause);
592 dd_dump_driver_state(dctx, f,
593 PIPE_DUMP_DEVICE_STATUS_REGISTERS |
594 PIPE_DUMP_CURRENT_STATES |
595 PIPE_DUMP_CURRENT_SHADERS |
596 PIPE_DUMP_LAST_COMMAND_BUFFER);
597 dd_dump_dmesg(f);
598 dd_close_file_stream(f);
599 }
600
601 /* Terminate the process to prevent future hangs. */
602 dd_kill_process();
603 }
604 }
605
606 static void
607 dd_unreference_copy_of_call(struct dd_call *dst)
608 {
609 switch (dst->type) {
610 case CALL_DRAW_VBO:
611 pipe_so_target_reference(&dst->info.draw_vbo.count_from_stream_output, NULL);
612 pipe_resource_reference(&dst->info.draw_vbo.indirect, NULL);
613 pipe_resource_reference(&dst->info.draw_vbo.indirect_params, NULL);
614 break;
615 case CALL_LAUNCH_GRID:
616 pipe_resource_reference(&dst->info.launch_grid.indirect, NULL);
617 break;
618 case CALL_RESOURCE_COPY_REGION:
619 pipe_resource_reference(&dst->info.resource_copy_region.dst, NULL);
620 pipe_resource_reference(&dst->info.resource_copy_region.src, NULL);
621 break;
622 case CALL_BLIT:
623 pipe_resource_reference(&dst->info.blit.dst.resource, NULL);
624 pipe_resource_reference(&dst->info.blit.src.resource, NULL);
625 break;
626 case CALL_FLUSH_RESOURCE:
627 pipe_resource_reference(&dst->info.flush_resource, NULL);
628 break;
629 case CALL_CLEAR:
630 break;
631 case CALL_CLEAR_BUFFER:
632 pipe_resource_reference(&dst->info.clear_buffer.res, NULL);
633 break;
634 case CALL_CLEAR_TEXTURE:
635 break;
636 case CALL_CLEAR_RENDER_TARGET:
637 break;
638 case CALL_CLEAR_DEPTH_STENCIL:
639 break;
640 case CALL_GENERATE_MIPMAP:
641 pipe_resource_reference(&dst->info.generate_mipmap.res, NULL);
642 break;
643 }
644 }
645
646 static void
647 dd_copy_call(struct dd_call *dst, struct dd_call *src)
648 {
649 dst->type = src->type;
650
651 switch (src->type) {
652 case CALL_DRAW_VBO:
653 pipe_so_target_reference(&dst->info.draw_vbo.count_from_stream_output,
654 src->info.draw_vbo.count_from_stream_output);
655 pipe_resource_reference(&dst->info.draw_vbo.indirect,
656 src->info.draw_vbo.indirect);
657 pipe_resource_reference(&dst->info.draw_vbo.indirect_params,
658 src->info.draw_vbo.indirect_params);
659 dst->info.draw_vbo = src->info.draw_vbo;
660 break;
661 case CALL_LAUNCH_GRID:
662 pipe_resource_reference(&dst->info.launch_grid.indirect,
663 src->info.launch_grid.indirect);
664 dst->info.launch_grid = src->info.launch_grid;
665 break;
666 case CALL_RESOURCE_COPY_REGION:
667 pipe_resource_reference(&dst->info.resource_copy_region.dst,
668 src->info.resource_copy_region.dst);
669 pipe_resource_reference(&dst->info.resource_copy_region.src,
670 src->info.resource_copy_region.src);
671 dst->info.resource_copy_region = src->info.resource_copy_region;
672 break;
673 case CALL_BLIT:
674 pipe_resource_reference(&dst->info.blit.dst.resource,
675 src->info.blit.dst.resource);
676 pipe_resource_reference(&dst->info.blit.src.resource,
677 src->info.blit.src.resource);
678 dst->info.blit = src->info.blit;
679 break;
680 case CALL_FLUSH_RESOURCE:
681 pipe_resource_reference(&dst->info.flush_resource,
682 src->info.flush_resource);
683 break;
684 case CALL_CLEAR:
685 dst->info.clear = src->info.clear;
686 break;
687 case CALL_CLEAR_BUFFER:
688 pipe_resource_reference(&dst->info.clear_buffer.res,
689 src->info.clear_buffer.res);
690 dst->info.clear_buffer = src->info.clear_buffer;
691 break;
692 case CALL_CLEAR_TEXTURE:
693 break;
694 case CALL_CLEAR_RENDER_TARGET:
695 break;
696 case CALL_CLEAR_DEPTH_STENCIL:
697 break;
698 case CALL_GENERATE_MIPMAP:
699 pipe_resource_reference(&dst->info.generate_mipmap.res,
700 src->info.generate_mipmap.res);
701 dst->info.generate_mipmap = src->info.generate_mipmap;
702 break;
703 }
704 }
705
706 static void
707 dd_init_copy_of_draw_state(struct dd_draw_state_copy *state)
708 {
709 unsigned i,j;
710
711 /* Just clear pointers to gallium objects. Don't clear the whole structure,
712 * because it would kill performance with its size of 130 KB.
713 */
714 memset(&state->base.index_buffer, 0,
715 sizeof(state->base.index_buffer));
716 memset(state->base.vertex_buffers, 0,
717 sizeof(state->base.vertex_buffers));
718 memset(state->base.so_targets, 0,
719 sizeof(state->base.so_targets));
720 memset(state->base.constant_buffers, 0,
721 sizeof(state->base.constant_buffers));
722 memset(state->base.sampler_views, 0,
723 sizeof(state->base.sampler_views));
724 memset(state->base.shader_images, 0,
725 sizeof(state->base.shader_images));
726 memset(state->base.shader_buffers, 0,
727 sizeof(state->base.shader_buffers));
728 memset(&state->base.framebuffer_state, 0,
729 sizeof(state->base.framebuffer_state));
730
731 memset(state->shaders, 0, sizeof(state->shaders));
732
733 state->base.render_cond.query = &state->render_cond;
734
735 for (i = 0; i < PIPE_SHADER_TYPES; i++) {
736 state->base.shaders[i] = &state->shaders[i];
737 for (j = 0; j < PIPE_MAX_SAMPLERS; j++)
738 state->base.sampler_states[i][j] = &state->sampler_states[i][j];
739 }
740
741 state->base.velems = &state->velems;
742 state->base.rs = &state->rs;
743 state->base.dsa = &state->dsa;
744 state->base.blend = &state->blend;
745 }
746
747 static void
748 dd_unreference_copy_of_draw_state(struct dd_draw_state_copy *state)
749 {
750 struct dd_draw_state *dst = &state->base;
751 unsigned i,j;
752
753 util_set_index_buffer(&dst->index_buffer, NULL);
754
755 for (i = 0; i < ARRAY_SIZE(dst->vertex_buffers); i++)
756 pipe_resource_reference(&dst->vertex_buffers[i].buffer, NULL);
757 for (i = 0; i < ARRAY_SIZE(dst->so_targets); i++)
758 pipe_so_target_reference(&dst->so_targets[i], NULL);
759
760 for (i = 0; i < PIPE_SHADER_TYPES; i++) {
761 if (dst->shaders[i])
762 tgsi_free_tokens(dst->shaders[i]->state.shader.tokens);
763
764 for (j = 0; j < PIPE_MAX_CONSTANT_BUFFERS; j++)
765 pipe_resource_reference(&dst->constant_buffers[i][j].buffer, NULL);
766 for (j = 0; j < PIPE_MAX_SAMPLERS; j++)
767 pipe_sampler_view_reference(&dst->sampler_views[i][j], NULL);
768 for (j = 0; j < PIPE_MAX_SHADER_IMAGES; j++)
769 pipe_resource_reference(&dst->shader_images[i][j].resource, NULL);
770 for (j = 0; j < PIPE_MAX_SHADER_BUFFERS; j++)
771 pipe_resource_reference(&dst->shader_buffers[i][j].buffer, NULL);
772 }
773
774 util_unreference_framebuffer_state(&dst->framebuffer_state);
775 }
776
777 static void
778 dd_copy_draw_state(struct dd_draw_state *dst, struct dd_draw_state *src)
779 {
780 unsigned i,j;
781
782 if (src->render_cond.query) {
783 *dst->render_cond.query = *src->render_cond.query;
784 dst->render_cond.condition = src->render_cond.condition;
785 dst->render_cond.mode = src->render_cond.mode;
786 } else {
787 dst->render_cond.query = NULL;
788 }
789
790 util_set_index_buffer(&dst->index_buffer, &src->index_buffer);
791
792 for (i = 0; i < ARRAY_SIZE(src->vertex_buffers); i++) {
793 pipe_resource_reference(&dst->vertex_buffers[i].buffer,
794 src->vertex_buffers[i].buffer);
795 memcpy(&dst->vertex_buffers[i], &src->vertex_buffers[i],
796 sizeof(src->vertex_buffers[i]));
797 }
798
799 dst->num_so_targets = src->num_so_targets;
800 for (i = 0; i < ARRAY_SIZE(src->so_targets); i++)
801 pipe_so_target_reference(&dst->so_targets[i], src->so_targets[i]);
802 memcpy(dst->so_offsets, src->so_offsets, sizeof(src->so_offsets));
803
804 for (i = 0; i < PIPE_SHADER_TYPES; i++) {
805 if (!src->shaders[i]) {
806 dst->shaders[i] = NULL;
807 continue;
808 }
809
810 if (src->shaders[i]) {
811 dst->shaders[i]->state.shader = src->shaders[i]->state.shader;
812 dst->shaders[i]->state.shader.tokens =
813 tgsi_dup_tokens(src->shaders[i]->state.shader.tokens);
814 } else {
815 dst->shaders[i] = NULL;
816 }
817
818 for (j = 0; j < PIPE_MAX_CONSTANT_BUFFERS; j++) {
819 pipe_resource_reference(&dst->constant_buffers[i][j].buffer,
820 src->constant_buffers[i][j].buffer);
821 memcpy(&dst->constant_buffers[i][j], &src->constant_buffers[i][j],
822 sizeof(src->constant_buffers[i][j]));
823 }
824
825 for (j = 0; j < PIPE_MAX_SAMPLERS; j++) {
826 pipe_sampler_view_reference(&dst->sampler_views[i][j],
827 src->sampler_views[i][j]);
828 if (src->sampler_states[i][j])
829 dst->sampler_states[i][j]->state.sampler =
830 src->sampler_states[i][j]->state.sampler;
831 else
832 dst->sampler_states[i][j] = NULL;
833 }
834
835 for (j = 0; j < PIPE_MAX_SHADER_IMAGES; j++) {
836 pipe_resource_reference(&dst->shader_images[i][j].resource,
837 src->shader_images[i][j].resource);
838 memcpy(&dst->shader_images[i][j], &src->shader_images[i][j],
839 sizeof(src->shader_images[i][j]));
840 }
841
842 for (j = 0; j < PIPE_MAX_SHADER_BUFFERS; j++) {
843 pipe_resource_reference(&dst->shader_buffers[i][j].buffer,
844 src->shader_buffers[i][j].buffer);
845 memcpy(&dst->shader_buffers[i][j], &src->shader_buffers[i][j],
846 sizeof(src->shader_buffers[i][j]));
847 }
848 }
849
850 if (src->velems)
851 dst->velems->state.velems = src->velems->state.velems;
852 else
853 dst->velems = NULL;
854
855 if (src->rs)
856 dst->rs->state.rs = src->rs->state.rs;
857 else
858 dst->rs = NULL;
859
860 if (src->dsa)
861 dst->dsa->state.dsa = src->dsa->state.dsa;
862 else
863 dst->dsa = NULL;
864
865 if (src->blend)
866 dst->blend->state.blend = src->blend->state.blend;
867 else
868 dst->blend = NULL;
869
870 dst->blend_color = src->blend_color;
871 dst->stencil_ref = src->stencil_ref;
872 dst->sample_mask = src->sample_mask;
873 dst->min_samples = src->min_samples;
874 dst->clip_state = src->clip_state;
875 util_copy_framebuffer_state(&dst->framebuffer_state, &src->framebuffer_state);
876 memcpy(dst->scissors, src->scissors, sizeof(src->scissors));
877 memcpy(dst->viewports, src->viewports, sizeof(src->viewports));
878 memcpy(dst->tess_default_levels, src->tess_default_levels,
879 sizeof(src->tess_default_levels));
880 dst->apitrace_call_number = src->apitrace_call_number;
881 }
882
883 static void
884 dd_free_record(struct dd_draw_record **record)
885 {
886 struct dd_draw_record *next = (*record)->next;
887
888 dd_unreference_copy_of_call(&(*record)->call);
889 dd_unreference_copy_of_draw_state(&(*record)->draw_state);
890 FREE((*record)->driver_state_log);
891 FREE(*record);
892 *record = next;
893 }
894
895 static void
896 dd_dump_record(struct dd_context *dctx, struct dd_draw_record *record,
897 uint32_t hw_sequence_no, int64_t now)
898 {
899 FILE *f = dd_get_file_stream(dd_screen(dctx->base.screen),
900 record->draw_state.base.apitrace_call_number);
901 if (!f)
902 return;
903
904 fprintf(f, "Draw call sequence # = %u\n", record->sequence_no);
905 fprintf(f, "HW reached sequence # = %u\n", hw_sequence_no);
906 fprintf(f, "Elapsed time = %"PRIi64" ms\n\n",
907 (now - record->timestamp) / 1000);
908
909 dd_dump_call(f, &record->draw_state.base, &record->call);
910 fprintf(f, "%s\n", record->driver_state_log);
911
912 dctx->pipe->dump_debug_state(dctx->pipe, f,
913 PIPE_DUMP_DEVICE_STATUS_REGISTERS);
914 dd_dump_dmesg(f);
915 fclose(f);
916 }
917
918 int
919 dd_thread_pipelined_hang_detect(void *input)
920 {
921 struct dd_context *dctx = (struct dd_context *)input;
922 struct dd_screen *dscreen = dd_screen(dctx->base.screen);
923
924 mtx_lock(&dctx->mutex);
925
926 while (!dctx->kill_thread) {
927 struct dd_draw_record **record = &dctx->records;
928
929 /* Loop over all records. */
930 while (*record) {
931 int64_t now;
932
933 /* If the fence has been signalled, release the record and all older
934 * records.
935 */
936 if (*dctx->mapped_fence >= (*record)->sequence_no) {
937 while (*record)
938 dd_free_record(record);
939 break;
940 }
941
942 /* The fence hasn't been signalled. Check the timeout. */
943 now = os_time_get();
944 if (os_time_timeout((*record)->timestamp,
945 (*record)->timestamp + dscreen->timeout_ms * 1000,
946 now)) {
947 fprintf(stderr, "GPU hang detected.\n");
948
949 /* Get the oldest unsignalled draw call. */
950 while ((*record)->next &&
951 *dctx->mapped_fence < (*record)->next->sequence_no)
952 record = &(*record)->next;
953
954 dd_dump_record(dctx, *record, *dctx->mapped_fence, now);
955 dd_kill_process();
956 }
957
958 record = &(*record)->next;
959 }
960
961 /* Unlock and sleep before starting all over again. */
962 mtx_unlock(&dctx->mutex);
963 os_time_sleep(10000); /* 10 ms */
964 mtx_lock(&dctx->mutex);
965 }
966
967 /* Thread termination. */
968 while (dctx->records)
969 dd_free_record(&dctx->records);
970
971 mtx_unlock(&dctx->mutex);
972 return 0;
973 }
974
975 static char *
976 dd_get_driver_shader_log(struct dd_context *dctx)
977 {
978 #if defined(PIPE_OS_LINUX)
979 FILE *f;
980 char *buf;
981 int written_bytes;
982
983 if (!dctx->max_log_buffer_size)
984 dctx->max_log_buffer_size = 16 * 1024;
985
986 /* Keep increasing the buffer size until there is enough space.
987 *
988 * open_memstream can resize automatically, but it's VERY SLOW.
989 * fmemopen is much faster.
990 */
991 while (1) {
992 buf = malloc(dctx->max_log_buffer_size);
993 buf[0] = 0;
994
995 f = fmemopen(buf, dctx->max_log_buffer_size, "a");
996 if (!f) {
997 free(buf);
998 return NULL;
999 }
1000
1001 dd_dump_driver_state(dctx, f, PIPE_DUMP_CURRENT_SHADERS);
1002 written_bytes = ftell(f);
1003 fclose(f);
1004
1005 /* Return if the backing buffer is large enough. */
1006 if (written_bytes < dctx->max_log_buffer_size - 1)
1007 break;
1008
1009 /* Try again. */
1010 free(buf);
1011 dctx->max_log_buffer_size *= 2;
1012 }
1013
1014 return buf;
1015 #else
1016 /* Return an empty string. */
1017 return (char*)calloc(1, 4);
1018 #endif
1019 }
1020
1021 static void
1022 dd_pipelined_process_draw(struct dd_context *dctx, struct dd_call *call)
1023 {
1024 struct pipe_context *pipe = dctx->pipe;
1025 struct dd_draw_record *record;
1026 char *log;
1027
1028 /* Make a record of the draw call. */
1029 record = MALLOC_STRUCT(dd_draw_record);
1030 if (!record)
1031 return;
1032
1033 /* Create the log. */
1034 log = dd_get_driver_shader_log(dctx);
1035 if (!log) {
1036 FREE(record);
1037 return;
1038 }
1039
1040 /* Update the fence with the GPU.
1041 *
1042 * radeonsi/clear_buffer waits in the command processor until shaders are
1043 * idle before writing to memory. That's a necessary condition for isolating
1044 * draw calls.
1045 */
1046 dctx->sequence_no++;
1047 pipe->clear_buffer(pipe, dctx->fence, 0, 4, &dctx->sequence_no, 4);
1048
1049 /* Initialize the record. */
1050 record->timestamp = os_time_get();
1051 record->sequence_no = dctx->sequence_no;
1052 record->driver_state_log = log;
1053
1054 memset(&record->call, 0, sizeof(record->call));
1055 dd_copy_call(&record->call, call);
1056
1057 dd_init_copy_of_draw_state(&record->draw_state);
1058 dd_copy_draw_state(&record->draw_state.base, &dctx->draw_state);
1059
1060 /* Add the record to the list. */
1061 mtx_lock(&dctx->mutex);
1062 record->next = dctx->records;
1063 dctx->records = record;
1064 mtx_unlock(&dctx->mutex);
1065 }
1066
1067 static void
1068 dd_context_flush(struct pipe_context *_pipe,
1069 struct pipe_fence_handle **fence, unsigned flags)
1070 {
1071 struct dd_context *dctx = dd_context(_pipe);
1072 struct pipe_context *pipe = dctx->pipe;
1073
1074 switch (dd_screen(dctx->base.screen)->mode) {
1075 case DD_DETECT_HANGS:
1076 dd_flush_and_handle_hang(dctx, fence, flags,
1077 "GPU hang detected in pipe->flush()");
1078 break;
1079 case DD_DETECT_HANGS_PIPELINED: /* nothing to do here */
1080 case DD_DUMP_ALL_CALLS:
1081 case DD_DUMP_APITRACE_CALL:
1082 pipe->flush(pipe, fence, flags);
1083 break;
1084 default:
1085 assert(0);
1086 }
1087 }
1088
1089 static void
1090 dd_before_draw(struct dd_context *dctx)
1091 {
1092 struct dd_screen *dscreen = dd_screen(dctx->base.screen);
1093
1094 if (dscreen->mode == DD_DETECT_HANGS &&
1095 !dscreen->no_flush &&
1096 dctx->num_draw_calls >= dscreen->skip_count)
1097 dd_flush_and_handle_hang(dctx, NULL, 0,
1098 "GPU hang most likely caused by internal "
1099 "driver commands");
1100 }
1101
1102 static void
1103 dd_after_draw(struct dd_context *dctx, struct dd_call *call)
1104 {
1105 struct dd_screen *dscreen = dd_screen(dctx->base.screen);
1106 struct pipe_context *pipe = dctx->pipe;
1107
1108 if (dctx->num_draw_calls >= dscreen->skip_count) {
1109 switch (dscreen->mode) {
1110 case DD_DETECT_HANGS:
1111 if (!dscreen->no_flush &&
1112 dd_flush_and_check_hang(dctx, NULL, 0)) {
1113 dd_write_report(dctx, call,
1114 PIPE_DUMP_DEVICE_STATUS_REGISTERS |
1115 PIPE_DUMP_CURRENT_STATES |
1116 PIPE_DUMP_CURRENT_SHADERS |
1117 PIPE_DUMP_LAST_COMMAND_BUFFER,
1118 true);
1119
1120 /* Terminate the process to prevent future hangs. */
1121 dd_kill_process();
1122 }
1123 break;
1124 case DD_DETECT_HANGS_PIPELINED:
1125 dd_pipelined_process_draw(dctx, call);
1126 break;
1127 case DD_DUMP_ALL_CALLS:
1128 if (!dscreen->no_flush)
1129 pipe->flush(pipe, NULL, 0);
1130 dd_write_report(dctx, call,
1131 PIPE_DUMP_CURRENT_STATES |
1132 PIPE_DUMP_CURRENT_SHADERS |
1133 PIPE_DUMP_LAST_COMMAND_BUFFER,
1134 false);
1135 break;
1136 case DD_DUMP_APITRACE_CALL:
1137 if (dscreen->apitrace_dump_call ==
1138 dctx->draw_state.apitrace_call_number) {
1139 dd_write_report(dctx, call,
1140 PIPE_DUMP_CURRENT_STATES |
1141 PIPE_DUMP_CURRENT_SHADERS,
1142 false);
1143 /* No need to continue. */
1144 exit(0);
1145 }
1146 break;
1147 default:
1148 assert(0);
1149 }
1150 }
1151
1152 ++dctx->num_draw_calls;
1153 if (dscreen->skip_count && dctx->num_draw_calls % 10000 == 0)
1154 fprintf(stderr, "Gallium debugger reached %u draw calls.\n",
1155 dctx->num_draw_calls);
1156 }
1157
1158 static void
1159 dd_context_draw_vbo(struct pipe_context *_pipe,
1160 const struct pipe_draw_info *info)
1161 {
1162 struct dd_context *dctx = dd_context(_pipe);
1163 struct pipe_context *pipe = dctx->pipe;
1164 struct dd_call call;
1165
1166 call.type = CALL_DRAW_VBO;
1167 call.info.draw_vbo = *info;
1168
1169 dd_before_draw(dctx);
1170 pipe->draw_vbo(pipe, info);
1171 dd_after_draw(dctx, &call);
1172 }
1173
1174 static void
1175 dd_context_launch_grid(struct pipe_context *_pipe,
1176 const struct pipe_grid_info *info)
1177 {
1178 struct dd_context *dctx = dd_context(_pipe);
1179 struct pipe_context *pipe = dctx->pipe;
1180 struct dd_call call;
1181
1182 call.type = CALL_LAUNCH_GRID;
1183 call.info.launch_grid = *info;
1184
1185 dd_before_draw(dctx);
1186 pipe->launch_grid(pipe, info);
1187 dd_after_draw(dctx, &call);
1188 }
1189
1190 static void
1191 dd_context_resource_copy_region(struct pipe_context *_pipe,
1192 struct pipe_resource *dst, unsigned dst_level,
1193 unsigned dstx, unsigned dsty, unsigned dstz,
1194 struct pipe_resource *src, unsigned src_level,
1195 const struct pipe_box *src_box)
1196 {
1197 struct dd_context *dctx = dd_context(_pipe);
1198 struct pipe_context *pipe = dctx->pipe;
1199 struct dd_call call;
1200
1201 call.type = CALL_RESOURCE_COPY_REGION;
1202 call.info.resource_copy_region.dst = dst;
1203 call.info.resource_copy_region.dst_level = dst_level;
1204 call.info.resource_copy_region.dstx = dstx;
1205 call.info.resource_copy_region.dsty = dsty;
1206 call.info.resource_copy_region.dstz = dstz;
1207 call.info.resource_copy_region.src = src;
1208 call.info.resource_copy_region.src_level = src_level;
1209 call.info.resource_copy_region.src_box = *src_box;
1210
1211 dd_before_draw(dctx);
1212 pipe->resource_copy_region(pipe,
1213 dst, dst_level, dstx, dsty, dstz,
1214 src, src_level, src_box);
1215 dd_after_draw(dctx, &call);
1216 }
1217
1218 static void
1219 dd_context_blit(struct pipe_context *_pipe, const struct pipe_blit_info *info)
1220 {
1221 struct dd_context *dctx = dd_context(_pipe);
1222 struct pipe_context *pipe = dctx->pipe;
1223 struct dd_call call;
1224
1225 call.type = CALL_BLIT;
1226 call.info.blit = *info;
1227
1228 dd_before_draw(dctx);
1229 pipe->blit(pipe, info);
1230 dd_after_draw(dctx, &call);
1231 }
1232
1233 static boolean
1234 dd_context_generate_mipmap(struct pipe_context *_pipe,
1235 struct pipe_resource *res,
1236 enum pipe_format format,
1237 unsigned base_level,
1238 unsigned last_level,
1239 unsigned first_layer,
1240 unsigned last_layer)
1241 {
1242 struct dd_context *dctx = dd_context(_pipe);
1243 struct pipe_context *pipe = dctx->pipe;
1244 struct dd_call call;
1245 boolean result;
1246
1247 call.type = CALL_GENERATE_MIPMAP;
1248 call.info.generate_mipmap.res = res;
1249 call.info.generate_mipmap.format = format;
1250 call.info.generate_mipmap.base_level = base_level;
1251 call.info.generate_mipmap.last_level = last_level;
1252 call.info.generate_mipmap.first_layer = first_layer;
1253 call.info.generate_mipmap.last_layer = last_layer;
1254
1255 dd_before_draw(dctx);
1256 result = pipe->generate_mipmap(pipe, res, format, base_level, last_level,
1257 first_layer, last_layer);
1258 dd_after_draw(dctx, &call);
1259 return result;
1260 }
1261
1262 static void
1263 dd_context_flush_resource(struct pipe_context *_pipe,
1264 struct pipe_resource *resource)
1265 {
1266 struct dd_context *dctx = dd_context(_pipe);
1267 struct pipe_context *pipe = dctx->pipe;
1268 struct dd_call call;
1269
1270 call.type = CALL_FLUSH_RESOURCE;
1271 call.info.flush_resource = resource;
1272
1273 dd_before_draw(dctx);
1274 pipe->flush_resource(pipe, resource);
1275 dd_after_draw(dctx, &call);
1276 }
1277
1278 static void
1279 dd_context_clear(struct pipe_context *_pipe, unsigned buffers,
1280 const union pipe_color_union *color, double depth,
1281 unsigned stencil)
1282 {
1283 struct dd_context *dctx = dd_context(_pipe);
1284 struct pipe_context *pipe = dctx->pipe;
1285 struct dd_call call;
1286
1287 call.type = CALL_CLEAR;
1288 call.info.clear.buffers = buffers;
1289 call.info.clear.color = *color;
1290 call.info.clear.depth = depth;
1291 call.info.clear.stencil = stencil;
1292
1293 dd_before_draw(dctx);
1294 pipe->clear(pipe, buffers, color, depth, stencil);
1295 dd_after_draw(dctx, &call);
1296 }
1297
1298 static void
1299 dd_context_clear_render_target(struct pipe_context *_pipe,
1300 struct pipe_surface *dst,
1301 const union pipe_color_union *color,
1302 unsigned dstx, unsigned dsty,
1303 unsigned width, unsigned height,
1304 bool render_condition_enabled)
1305 {
1306 struct dd_context *dctx = dd_context(_pipe);
1307 struct pipe_context *pipe = dctx->pipe;
1308 struct dd_call call;
1309
1310 call.type = CALL_CLEAR_RENDER_TARGET;
1311
1312 dd_before_draw(dctx);
1313 pipe->clear_render_target(pipe, dst, color, dstx, dsty, width, height,
1314 render_condition_enabled);
1315 dd_after_draw(dctx, &call);
1316 }
1317
1318 static void
1319 dd_context_clear_depth_stencil(struct pipe_context *_pipe,
1320 struct pipe_surface *dst, unsigned clear_flags,
1321 double depth, unsigned stencil, unsigned dstx,
1322 unsigned dsty, unsigned width, unsigned height,
1323 bool render_condition_enabled)
1324 {
1325 struct dd_context *dctx = dd_context(_pipe);
1326 struct pipe_context *pipe = dctx->pipe;
1327 struct dd_call call;
1328
1329 call.type = CALL_CLEAR_DEPTH_STENCIL;
1330
1331 dd_before_draw(dctx);
1332 pipe->clear_depth_stencil(pipe, dst, clear_flags, depth, stencil,
1333 dstx, dsty, width, height,
1334 render_condition_enabled);
1335 dd_after_draw(dctx, &call);
1336 }
1337
1338 static void
1339 dd_context_clear_buffer(struct pipe_context *_pipe, struct pipe_resource *res,
1340 unsigned offset, unsigned size,
1341 const void *clear_value, int clear_value_size)
1342 {
1343 struct dd_context *dctx = dd_context(_pipe);
1344 struct pipe_context *pipe = dctx->pipe;
1345 struct dd_call call;
1346
1347 call.type = CALL_CLEAR_BUFFER;
1348 call.info.clear_buffer.res = res;
1349 call.info.clear_buffer.offset = offset;
1350 call.info.clear_buffer.size = size;
1351 call.info.clear_buffer.clear_value = clear_value;
1352 call.info.clear_buffer.clear_value_size = clear_value_size;
1353
1354 dd_before_draw(dctx);
1355 pipe->clear_buffer(pipe, res, offset, size, clear_value, clear_value_size);
1356 dd_after_draw(dctx, &call);
1357 }
1358
1359 static void
1360 dd_context_clear_texture(struct pipe_context *_pipe,
1361 struct pipe_resource *res,
1362 unsigned level,
1363 const struct pipe_box *box,
1364 const void *data)
1365 {
1366 struct dd_context *dctx = dd_context(_pipe);
1367 struct pipe_context *pipe = dctx->pipe;
1368 struct dd_call call;
1369
1370 call.type = CALL_CLEAR_TEXTURE;
1371
1372 dd_before_draw(dctx);
1373 pipe->clear_texture(pipe, res, level, box, data);
1374 dd_after_draw(dctx, &call);
1375 }
1376
1377 void
1378 dd_init_draw_functions(struct dd_context *dctx)
1379 {
1380 CTX_INIT(flush);
1381 CTX_INIT(draw_vbo);
1382 CTX_INIT(launch_grid);
1383 CTX_INIT(resource_copy_region);
1384 CTX_INIT(blit);
1385 CTX_INIT(clear);
1386 CTX_INIT(clear_render_target);
1387 CTX_INIT(clear_depth_stencil);
1388 CTX_INIT(clear_buffer);
1389 CTX_INIT(clear_texture);
1390 CTX_INIT(flush_resource);
1391 CTX_INIT(generate_mipmap);
1392 }