ddebug: don't use fmemopen on non-Linux OS
[mesa.git] / src / gallium / drivers / ddebug / dd_draw.c
1 /**************************************************************************
2 *
3 * Copyright 2015 Advanced Micro Devices, Inc.
4 * Copyright 2008 VMware, Inc.
5 * All Rights Reserved.
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the "Software"),
9 * to deal in the Software without restriction, including without limitation
10 * on the rights to use, copy, modify, merge, publish, distribute, sub
11 * license, and/or sell copies of the Software, and to permit persons to whom
12 * the Software is furnished to do so, subject to the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the next
15 * paragraph) shall be included in all copies or substantial portions of the
16 * Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28 #include "dd_pipe.h"
29
30 #include "util/u_dump.h"
31 #include "util/u_format.h"
32 #include "util/u_framebuffer.h"
33 #include "util/u_helpers.h"
34 #include "util/u_inlines.h"
35 #include "util/u_memory.h"
36 #include "tgsi/tgsi_parse.h"
37 #include "tgsi/tgsi_scan.h"
38 #include "os/os_time.h"
39 #include <inttypes.h>
40
41
42 static FILE *
43 dd_get_file_stream(struct dd_screen *dscreen, unsigned apitrace_call_number)
44 {
45 struct pipe_screen *screen = dscreen->screen;
46 FILE *f = dd_get_debug_file(dscreen->verbose);
47 if (!f)
48 return NULL;
49
50 fprintf(f, "Driver vendor: %s\n", screen->get_vendor(screen));
51 fprintf(f, "Device vendor: %s\n", screen->get_device_vendor(screen));
52 fprintf(f, "Device name: %s\n\n", screen->get_name(screen));
53
54 if (apitrace_call_number)
55 fprintf(f, "Last apitrace call: %u\n\n",
56 apitrace_call_number);
57 return f;
58 }
59
60 static void
61 dd_dump_dmesg(FILE *f)
62 {
63 char line[2000];
64 FILE *p = popen("dmesg | tail -n60", "r");
65
66 if (!p)
67 return;
68
69 fprintf(f, "\nLast 60 lines of dmesg:\n\n");
70 while (fgets(line, sizeof(line), p))
71 fputs(line, f);
72
73 pclose(p);
74 }
75
76 static void
77 dd_close_file_stream(FILE *f)
78 {
79 fclose(f);
80 }
81
82 static unsigned
83 dd_num_active_viewports(struct dd_draw_state *dstate)
84 {
85 struct tgsi_shader_info info;
86 const struct tgsi_token *tokens;
87
88 if (dstate->shaders[PIPE_SHADER_GEOMETRY])
89 tokens = dstate->shaders[PIPE_SHADER_GEOMETRY]->state.shader.tokens;
90 else if (dstate->shaders[PIPE_SHADER_TESS_EVAL])
91 tokens = dstate->shaders[PIPE_SHADER_TESS_EVAL]->state.shader.tokens;
92 else if (dstate->shaders[PIPE_SHADER_VERTEX])
93 tokens = dstate->shaders[PIPE_SHADER_VERTEX]->state.shader.tokens;
94 else
95 return 1;
96
97 tgsi_scan_shader(tokens, &info);
98 return info.writes_viewport_index ? PIPE_MAX_VIEWPORTS : 1;
99 }
100
101 #define COLOR_RESET "\033[0m"
102 #define COLOR_SHADER "\033[1;32m"
103 #define COLOR_STATE "\033[1;33m"
104
105 #define DUMP(name, var) do { \
106 fprintf(f, COLOR_STATE #name ": " COLOR_RESET); \
107 util_dump_##name(f, var); \
108 fprintf(f, "\n"); \
109 } while(0)
110
111 #define DUMP_I(name, var, i) do { \
112 fprintf(f, COLOR_STATE #name " %i: " COLOR_RESET, i); \
113 util_dump_##name(f, var); \
114 fprintf(f, "\n"); \
115 } while(0)
116
117 #define DUMP_M(name, var, member) do { \
118 fprintf(f, " " #member ": "); \
119 util_dump_##name(f, (var)->member); \
120 fprintf(f, "\n"); \
121 } while(0)
122
123 #define DUMP_M_ADDR(name, var, member) do { \
124 fprintf(f, " " #member ": "); \
125 util_dump_##name(f, &(var)->member); \
126 fprintf(f, "\n"); \
127 } while(0)
128
129 static void
130 print_named_value(FILE *f, const char *name, int value)
131 {
132 fprintf(f, COLOR_STATE "%s" COLOR_RESET " = %i\n", name, value);
133 }
134
135 static void
136 print_named_xvalue(FILE *f, const char *name, int value)
137 {
138 fprintf(f, COLOR_STATE "%s" COLOR_RESET " = 0x%08x\n", name, value);
139 }
140
141 static void
142 util_dump_uint(FILE *f, unsigned i)
143 {
144 fprintf(f, "%u", i);
145 }
146
147 static void
148 util_dump_hex(FILE *f, unsigned i)
149 {
150 fprintf(f, "0x%x", i);
151 }
152
153 static void
154 util_dump_double(FILE *f, double d)
155 {
156 fprintf(f, "%f", d);
157 }
158
159 static void
160 util_dump_format(FILE *f, enum pipe_format format)
161 {
162 fprintf(f, "%s", util_format_name(format));
163 }
164
165 static void
166 util_dump_color_union(FILE *f, const union pipe_color_union *color)
167 {
168 fprintf(f, "{f = {%f, %f, %f, %f}, ui = {%u, %u, %u, %u}",
169 color->f[0], color->f[1], color->f[2], color->f[3],
170 color->ui[0], color->ui[1], color->ui[2], color->ui[3]);
171 }
172
173 static void
174 util_dump_query(FILE *f, struct dd_query *query)
175 {
176 if (query->type >= PIPE_QUERY_DRIVER_SPECIFIC)
177 fprintf(f, "PIPE_QUERY_DRIVER_SPECIFIC + %i",
178 query->type - PIPE_QUERY_DRIVER_SPECIFIC);
179 else
180 fprintf(f, "%s", util_dump_query_type(query->type, false));
181 }
182
183 static void
184 dd_dump_render_condition(struct dd_draw_state *dstate, FILE *f)
185 {
186 if (dstate->render_cond.query) {
187 fprintf(f, "render condition:\n");
188 DUMP_M(query, &dstate->render_cond, query);
189 DUMP_M(uint, &dstate->render_cond, condition);
190 DUMP_M(uint, &dstate->render_cond, mode);
191 fprintf(f, "\n");
192 }
193 }
194
195 static void
196 dd_dump_draw_vbo(struct dd_draw_state *dstate, struct pipe_draw_info *info, FILE *f)
197 {
198 int sh, i;
199 const char *shader_str[PIPE_SHADER_TYPES];
200
201 shader_str[PIPE_SHADER_VERTEX] = "VERTEX";
202 shader_str[PIPE_SHADER_TESS_CTRL] = "TESS_CTRL";
203 shader_str[PIPE_SHADER_TESS_EVAL] = "TESS_EVAL";
204 shader_str[PIPE_SHADER_GEOMETRY] = "GEOMETRY";
205 shader_str[PIPE_SHADER_FRAGMENT] = "FRAGMENT";
206 shader_str[PIPE_SHADER_COMPUTE] = "COMPUTE";
207
208 DUMP(draw_info, info);
209 if (info->indexed) {
210 DUMP(index_buffer, &dstate->index_buffer);
211 if (dstate->index_buffer.buffer)
212 DUMP_M(resource, &dstate->index_buffer, buffer);
213 }
214 if (info->count_from_stream_output)
215 DUMP_M(stream_output_target, info,
216 count_from_stream_output);
217 if (info->indirect)
218 DUMP_M(resource, info, indirect);
219 fprintf(f, "\n");
220
221 /* TODO: dump active queries */
222
223 dd_dump_render_condition(dstate, f);
224
225 for (i = 0; i < PIPE_MAX_ATTRIBS; i++)
226 if (dstate->vertex_buffers[i].buffer ||
227 dstate->vertex_buffers[i].user_buffer) {
228 DUMP_I(vertex_buffer, &dstate->vertex_buffers[i], i);
229 if (dstate->vertex_buffers[i].buffer)
230 DUMP_M(resource, &dstate->vertex_buffers[i], buffer);
231 }
232
233 if (dstate->velems) {
234 print_named_value(f, "num vertex elements",
235 dstate->velems->state.velems.count);
236 for (i = 0; i < dstate->velems->state.velems.count; i++) {
237 fprintf(f, " ");
238 DUMP_I(vertex_element, &dstate->velems->state.velems.velems[i], i);
239 }
240 }
241
242 print_named_value(f, "num stream output targets", dstate->num_so_targets);
243 for (i = 0; i < dstate->num_so_targets; i++)
244 if (dstate->so_targets[i]) {
245 DUMP_I(stream_output_target, dstate->so_targets[i], i);
246 DUMP_M(resource, dstate->so_targets[i], buffer);
247 fprintf(f, " offset = %i\n", dstate->so_offsets[i]);
248 }
249
250 fprintf(f, "\n");
251 for (sh = 0; sh < PIPE_SHADER_TYPES; sh++) {
252 if (sh == PIPE_SHADER_COMPUTE)
253 continue;
254
255 if (sh == PIPE_SHADER_TESS_CTRL &&
256 !dstate->shaders[PIPE_SHADER_TESS_CTRL] &&
257 dstate->shaders[PIPE_SHADER_TESS_EVAL])
258 fprintf(f, "tess_state: {default_outer_level = {%f, %f, %f, %f}, "
259 "default_inner_level = {%f, %f}}\n",
260 dstate->tess_default_levels[0],
261 dstate->tess_default_levels[1],
262 dstate->tess_default_levels[2],
263 dstate->tess_default_levels[3],
264 dstate->tess_default_levels[4],
265 dstate->tess_default_levels[5]);
266
267 if (sh == PIPE_SHADER_FRAGMENT)
268 if (dstate->rs) {
269 unsigned num_viewports = dd_num_active_viewports(dstate);
270
271 if (dstate->rs->state.rs.clip_plane_enable)
272 DUMP(clip_state, &dstate->clip_state);
273
274 for (i = 0; i < num_viewports; i++)
275 DUMP_I(viewport_state, &dstate->viewports[i], i);
276
277 if (dstate->rs->state.rs.scissor)
278 for (i = 0; i < num_viewports; i++)
279 DUMP_I(scissor_state, &dstate->scissors[i], i);
280
281 DUMP(rasterizer_state, &dstate->rs->state.rs);
282
283 if (dstate->rs->state.rs.poly_stipple_enable)
284 DUMP(poly_stipple, &dstate->polygon_stipple);
285 fprintf(f, "\n");
286 }
287
288 if (!dstate->shaders[sh])
289 continue;
290
291 fprintf(f, COLOR_SHADER "begin shader: %s" COLOR_RESET "\n", shader_str[sh]);
292 DUMP(shader_state, &dstate->shaders[sh]->state.shader);
293
294 for (i = 0; i < PIPE_MAX_CONSTANT_BUFFERS; i++)
295 if (dstate->constant_buffers[sh][i].buffer ||
296 dstate->constant_buffers[sh][i].user_buffer) {
297 DUMP_I(constant_buffer, &dstate->constant_buffers[sh][i], i);
298 if (dstate->constant_buffers[sh][i].buffer)
299 DUMP_M(resource, &dstate->constant_buffers[sh][i], buffer);
300 }
301
302 for (i = 0; i < PIPE_MAX_SAMPLERS; i++)
303 if (dstate->sampler_states[sh][i])
304 DUMP_I(sampler_state, &dstate->sampler_states[sh][i]->state.sampler, i);
305
306 for (i = 0; i < PIPE_MAX_SAMPLERS; i++)
307 if (dstate->sampler_views[sh][i]) {
308 DUMP_I(sampler_view, dstate->sampler_views[sh][i], i);
309 DUMP_M(resource, dstate->sampler_views[sh][i], texture);
310 }
311
312 /* TODO: print shader images */
313 /* TODO: print shader buffers */
314
315 fprintf(f, COLOR_SHADER "end shader: %s" COLOR_RESET "\n\n", shader_str[sh]);
316 }
317
318 if (dstate->dsa)
319 DUMP(depth_stencil_alpha_state, &dstate->dsa->state.dsa);
320 DUMP(stencil_ref, &dstate->stencil_ref);
321
322 if (dstate->blend)
323 DUMP(blend_state, &dstate->blend->state.blend);
324 DUMP(blend_color, &dstate->blend_color);
325
326 print_named_value(f, "min_samples", dstate->min_samples);
327 print_named_xvalue(f, "sample_mask", dstate->sample_mask);
328 fprintf(f, "\n");
329
330 DUMP(framebuffer_state, &dstate->framebuffer_state);
331 for (i = 0; i < dstate->framebuffer_state.nr_cbufs; i++)
332 if (dstate->framebuffer_state.cbufs[i]) {
333 fprintf(f, " " COLOR_STATE "cbufs[%i]:" COLOR_RESET "\n ", i);
334 DUMP(surface, dstate->framebuffer_state.cbufs[i]);
335 fprintf(f, " ");
336 DUMP(resource, dstate->framebuffer_state.cbufs[i]->texture);
337 }
338 if (dstate->framebuffer_state.zsbuf) {
339 fprintf(f, " " COLOR_STATE "zsbuf:" COLOR_RESET "\n ");
340 DUMP(surface, dstate->framebuffer_state.zsbuf);
341 fprintf(f, " ");
342 DUMP(resource, dstate->framebuffer_state.zsbuf->texture);
343 }
344 fprintf(f, "\n");
345 }
346
347 static void
348 dd_dump_launch_grid(struct dd_draw_state *dstate, struct pipe_grid_info *info, FILE *f)
349 {
350 fprintf(f, "%s:\n", __func__+8);
351 /* TODO */
352 }
353
354 static void
355 dd_dump_resource_copy_region(struct dd_draw_state *dstate,
356 struct call_resource_copy_region *info,
357 FILE *f)
358 {
359 fprintf(f, "%s:\n", __func__+8);
360 DUMP_M(resource, info, dst);
361 DUMP_M(uint, info, dst_level);
362 DUMP_M(uint, info, dstx);
363 DUMP_M(uint, info, dsty);
364 DUMP_M(uint, info, dstz);
365 DUMP_M(resource, info, src);
366 DUMP_M(uint, info, src_level);
367 DUMP_M_ADDR(box, info, src_box);
368 }
369
370 static void
371 dd_dump_blit(struct dd_draw_state *dstate, struct pipe_blit_info *info, FILE *f)
372 {
373 fprintf(f, "%s:\n", __func__+8);
374 DUMP_M(resource, info, dst.resource);
375 DUMP_M(uint, info, dst.level);
376 DUMP_M_ADDR(box, info, dst.box);
377 DUMP_M(format, info, dst.format);
378
379 DUMP_M(resource, info, src.resource);
380 DUMP_M(uint, info, src.level);
381 DUMP_M_ADDR(box, info, src.box);
382 DUMP_M(format, info, src.format);
383
384 DUMP_M(hex, info, mask);
385 DUMP_M(uint, info, filter);
386 DUMP_M(uint, info, scissor_enable);
387 DUMP_M_ADDR(scissor_state, info, scissor);
388 DUMP_M(uint, info, render_condition_enable);
389
390 if (info->render_condition_enable)
391 dd_dump_render_condition(dstate, f);
392 }
393
394 static void
395 dd_dump_generate_mipmap(struct dd_draw_state *dstate, FILE *f)
396 {
397 fprintf(f, "%s:\n", __func__+8);
398 /* TODO */
399 }
400
401 static void
402 dd_dump_flush_resource(struct dd_draw_state *dstate, struct pipe_resource *res,
403 FILE *f)
404 {
405 fprintf(f, "%s:\n", __func__+8);
406 DUMP(resource, res);
407 }
408
409 static void
410 dd_dump_clear(struct dd_draw_state *dstate, struct call_clear *info, FILE *f)
411 {
412 fprintf(f, "%s:\n", __func__+8);
413 DUMP_M(uint, info, buffers);
414 DUMP_M_ADDR(color_union, info, color);
415 DUMP_M(double, info, depth);
416 DUMP_M(hex, info, stencil);
417 }
418
419 static void
420 dd_dump_clear_buffer(struct dd_draw_state *dstate, struct call_clear_buffer *info,
421 FILE *f)
422 {
423 int i;
424 const char *value = (const char*)info->clear_value;
425
426 fprintf(f, "%s:\n", __func__+8);
427 DUMP_M(resource, info, res);
428 DUMP_M(uint, info, offset);
429 DUMP_M(uint, info, size);
430 DUMP_M(uint, info, clear_value_size);
431
432 fprintf(f, " clear_value:");
433 for (i = 0; i < info->clear_value_size; i++)
434 fprintf(f, " %02x", value[i]);
435 fprintf(f, "\n");
436 }
437
438 static void
439 dd_dump_clear_render_target(struct dd_draw_state *dstate, FILE *f)
440 {
441 fprintf(f, "%s:\n", __func__+8);
442 /* TODO */
443 }
444
445 static void
446 dd_dump_clear_depth_stencil(struct dd_draw_state *dstate, FILE *f)
447 {
448 fprintf(f, "%s:\n", __func__+8);
449 /* TODO */
450 }
451
452 static void
453 dd_dump_driver_state(struct dd_context *dctx, FILE *f, unsigned flags)
454 {
455 if (dctx->pipe->dump_debug_state) {
456 fprintf(f,"\n\n**************************************************"
457 "***************************\n");
458 fprintf(f, "Driver-specific state:\n\n");
459 dctx->pipe->dump_debug_state(dctx->pipe, f, flags);
460 }
461 }
462
463 static void
464 dd_dump_call(FILE *f, struct dd_draw_state *state, struct dd_call *call)
465 {
466 switch (call->type) {
467 case CALL_DRAW_VBO:
468 dd_dump_draw_vbo(state, &call->info.draw_vbo, f);
469 break;
470 case CALL_LAUNCH_GRID:
471 dd_dump_launch_grid(state, &call->info.launch_grid, f);
472 break;
473 case CALL_RESOURCE_COPY_REGION:
474 dd_dump_resource_copy_region(state,
475 &call->info.resource_copy_region, f);
476 break;
477 case CALL_BLIT:
478 dd_dump_blit(state, &call->info.blit, f);
479 break;
480 case CALL_FLUSH_RESOURCE:
481 dd_dump_flush_resource(state, call->info.flush_resource, f);
482 break;
483 case CALL_CLEAR:
484 dd_dump_clear(state, &call->info.clear, f);
485 break;
486 case CALL_CLEAR_BUFFER:
487 dd_dump_clear_buffer(state, &call->info.clear_buffer, f);
488 break;
489 case CALL_CLEAR_RENDER_TARGET:
490 dd_dump_clear_render_target(state, f);
491 break;
492 case CALL_CLEAR_DEPTH_STENCIL:
493 dd_dump_clear_depth_stencil(state, f);
494 break;
495 case CALL_GENERATE_MIPMAP:
496 dd_dump_generate_mipmap(state, f);
497 break;
498 }
499 }
500
501 static void
502 dd_write_report(struct dd_context *dctx, struct dd_call *call, unsigned flags,
503 bool dump_dmesg)
504 {
505 FILE *f = dd_get_file_stream(dd_screen(dctx->base.screen),
506 dctx->draw_state.apitrace_call_number);
507
508 if (!f)
509 return;
510
511 dd_dump_call(f, &dctx->draw_state, call);
512 dd_dump_driver_state(dctx, f, flags);
513 if (dump_dmesg)
514 dd_dump_dmesg(f);
515 dd_close_file_stream(f);
516 }
517
518 static void
519 dd_kill_process(void)
520 {
521 sync();
522 fprintf(stderr, "dd: Aborting the process...\n");
523 fflush(stdout);
524 fflush(stderr);
525 exit(1);
526 }
527
528 static bool
529 dd_flush_and_check_hang(struct dd_context *dctx,
530 struct pipe_fence_handle **flush_fence,
531 unsigned flush_flags)
532 {
533 struct pipe_fence_handle *fence = NULL;
534 struct pipe_context *pipe = dctx->pipe;
535 struct pipe_screen *screen = pipe->screen;
536 uint64_t timeout_ms = dd_screen(dctx->base.screen)->timeout_ms;
537 bool idle;
538
539 assert(timeout_ms > 0);
540
541 pipe->flush(pipe, &fence, flush_flags);
542 if (flush_fence)
543 screen->fence_reference(screen, flush_fence, fence);
544 if (!fence)
545 return false;
546
547 idle = screen->fence_finish(screen, fence, timeout_ms * 1000000);
548 screen->fence_reference(screen, &fence, NULL);
549 if (!idle)
550 fprintf(stderr, "dd: GPU hang detected!\n");
551 return !idle;
552 }
553
554 static void
555 dd_flush_and_handle_hang(struct dd_context *dctx,
556 struct pipe_fence_handle **fence, unsigned flags,
557 const char *cause)
558 {
559 if (dd_flush_and_check_hang(dctx, fence, flags)) {
560 FILE *f = dd_get_file_stream(dd_screen(dctx->base.screen),
561 dctx->draw_state.apitrace_call_number);
562
563 if (f) {
564 fprintf(f, "dd: %s.\n", cause);
565 dd_dump_driver_state(dctx, f,
566 PIPE_DUMP_DEVICE_STATUS_REGISTERS |
567 PIPE_DUMP_CURRENT_STATES |
568 PIPE_DUMP_CURRENT_SHADERS |
569 PIPE_DUMP_LAST_COMMAND_BUFFER);
570 dd_dump_dmesg(f);
571 dd_close_file_stream(f);
572 }
573
574 /* Terminate the process to prevent future hangs. */
575 dd_kill_process();
576 }
577 }
578
579 static void
580 dd_unreference_copy_of_call(struct dd_call *dst)
581 {
582 switch (dst->type) {
583 case CALL_DRAW_VBO:
584 pipe_so_target_reference(&dst->info.draw_vbo.count_from_stream_output, NULL);
585 pipe_resource_reference(&dst->info.draw_vbo.indirect, NULL);
586 pipe_resource_reference(&dst->info.draw_vbo.indirect_params, NULL);
587 break;
588 case CALL_LAUNCH_GRID:
589 pipe_resource_reference(&dst->info.launch_grid.indirect, NULL);
590 break;
591 case CALL_RESOURCE_COPY_REGION:
592 pipe_resource_reference(&dst->info.resource_copy_region.dst, NULL);
593 pipe_resource_reference(&dst->info.resource_copy_region.src, NULL);
594 break;
595 case CALL_BLIT:
596 pipe_resource_reference(&dst->info.blit.dst.resource, NULL);
597 pipe_resource_reference(&dst->info.blit.src.resource, NULL);
598 break;
599 case CALL_FLUSH_RESOURCE:
600 pipe_resource_reference(&dst->info.flush_resource, NULL);
601 break;
602 case CALL_CLEAR:
603 break;
604 case CALL_CLEAR_BUFFER:
605 pipe_resource_reference(&dst->info.clear_buffer.res, NULL);
606 break;
607 case CALL_CLEAR_RENDER_TARGET:
608 break;
609 case CALL_CLEAR_DEPTH_STENCIL:
610 break;
611 case CALL_GENERATE_MIPMAP:
612 pipe_resource_reference(&dst->info.generate_mipmap.res, NULL);
613 break;
614 }
615 }
616
617 static void
618 dd_copy_call(struct dd_call *dst, struct dd_call *src)
619 {
620 dst->type = src->type;
621
622 switch (src->type) {
623 case CALL_DRAW_VBO:
624 pipe_so_target_reference(&dst->info.draw_vbo.count_from_stream_output,
625 src->info.draw_vbo.count_from_stream_output);
626 pipe_resource_reference(&dst->info.draw_vbo.indirect,
627 src->info.draw_vbo.indirect);
628 pipe_resource_reference(&dst->info.draw_vbo.indirect_params,
629 src->info.draw_vbo.indirect_params);
630 dst->info.draw_vbo = src->info.draw_vbo;
631 break;
632 case CALL_LAUNCH_GRID:
633 pipe_resource_reference(&dst->info.launch_grid.indirect,
634 src->info.launch_grid.indirect);
635 dst->info.launch_grid = src->info.launch_grid;
636 break;
637 case CALL_RESOURCE_COPY_REGION:
638 pipe_resource_reference(&dst->info.resource_copy_region.dst,
639 src->info.resource_copy_region.dst);
640 pipe_resource_reference(&dst->info.resource_copy_region.src,
641 src->info.resource_copy_region.src);
642 dst->info.resource_copy_region = src->info.resource_copy_region;
643 break;
644 case CALL_BLIT:
645 pipe_resource_reference(&dst->info.blit.dst.resource,
646 src->info.blit.dst.resource);
647 pipe_resource_reference(&dst->info.blit.src.resource,
648 src->info.blit.src.resource);
649 dst->info.blit = src->info.blit;
650 break;
651 case CALL_FLUSH_RESOURCE:
652 pipe_resource_reference(&dst->info.flush_resource,
653 src->info.flush_resource);
654 break;
655 case CALL_CLEAR:
656 dst->info.clear = src->info.clear;
657 break;
658 case CALL_CLEAR_BUFFER:
659 pipe_resource_reference(&dst->info.clear_buffer.res,
660 src->info.clear_buffer.res);
661 dst->info.clear_buffer = src->info.clear_buffer;
662 break;
663 case CALL_CLEAR_RENDER_TARGET:
664 break;
665 case CALL_CLEAR_DEPTH_STENCIL:
666 break;
667 case CALL_GENERATE_MIPMAP:
668 pipe_resource_reference(&dst->info.generate_mipmap.res,
669 src->info.generate_mipmap.res);
670 dst->info.generate_mipmap = src->info.generate_mipmap;
671 break;
672 }
673 }
674
675 static void
676 dd_init_copy_of_draw_state(struct dd_draw_state_copy *state)
677 {
678 unsigned i,j;
679
680 /* Just clear pointers to gallium objects. Don't clear the whole structure,
681 * because it would kill performance with its size of 130 KB.
682 */
683 memset(&state->base.index_buffer, 0,
684 sizeof(state->base.index_buffer));
685 memset(state->base.vertex_buffers, 0,
686 sizeof(state->base.vertex_buffers));
687 memset(state->base.so_targets, 0,
688 sizeof(state->base.so_targets));
689 memset(state->base.constant_buffers, 0,
690 sizeof(state->base.constant_buffers));
691 memset(state->base.sampler_views, 0,
692 sizeof(state->base.sampler_views));
693 memset(state->base.shader_images, 0,
694 sizeof(state->base.shader_images));
695 memset(state->base.shader_buffers, 0,
696 sizeof(state->base.shader_buffers));
697 memset(&state->base.framebuffer_state, 0,
698 sizeof(state->base.framebuffer_state));
699
700 memset(state->shaders, 0, sizeof(state->shaders));
701
702 state->base.render_cond.query = &state->render_cond;
703
704 for (i = 0; i < PIPE_SHADER_TYPES; i++) {
705 state->base.shaders[i] = &state->shaders[i];
706 for (j = 0; j < PIPE_MAX_SAMPLERS; j++)
707 state->base.sampler_states[i][j] = &state->sampler_states[i][j];
708 }
709
710 state->base.velems = &state->velems;
711 state->base.rs = &state->rs;
712 state->base.dsa = &state->dsa;
713 state->base.blend = &state->blend;
714 }
715
716 static void
717 dd_unreference_copy_of_draw_state(struct dd_draw_state_copy *state)
718 {
719 struct dd_draw_state *dst = &state->base;
720 unsigned i,j;
721
722 util_set_index_buffer(&dst->index_buffer, NULL);
723
724 for (i = 0; i < ARRAY_SIZE(dst->vertex_buffers); i++)
725 pipe_resource_reference(&dst->vertex_buffers[i].buffer, NULL);
726 for (i = 0; i < ARRAY_SIZE(dst->so_targets); i++)
727 pipe_so_target_reference(&dst->so_targets[i], NULL);
728
729 for (i = 0; i < PIPE_SHADER_TYPES; i++) {
730 if (dst->shaders[i])
731 tgsi_free_tokens(dst->shaders[i]->state.shader.tokens);
732
733 for (j = 0; j < PIPE_MAX_CONSTANT_BUFFERS; j++)
734 pipe_resource_reference(&dst->constant_buffers[i][j].buffer, NULL);
735 for (j = 0; j < PIPE_MAX_SAMPLERS; j++)
736 pipe_sampler_view_reference(&dst->sampler_views[i][j], NULL);
737 for (j = 0; j < PIPE_MAX_SHADER_IMAGES; j++)
738 pipe_resource_reference(&dst->shader_images[i][j].resource, NULL);
739 for (j = 0; j < PIPE_MAX_SHADER_BUFFERS; j++)
740 pipe_resource_reference(&dst->shader_buffers[i][j].buffer, NULL);
741 }
742
743 util_unreference_framebuffer_state(&dst->framebuffer_state);
744 }
745
746 static void
747 dd_copy_draw_state(struct dd_draw_state *dst, struct dd_draw_state *src)
748 {
749 unsigned i,j;
750
751 if (src->render_cond.query) {
752 *dst->render_cond.query = *src->render_cond.query;
753 dst->render_cond.condition = src->render_cond.condition;
754 dst->render_cond.mode = src->render_cond.mode;
755 } else {
756 dst->render_cond.query = NULL;
757 }
758
759 util_set_index_buffer(&dst->index_buffer, &src->index_buffer);
760
761 for (i = 0; i < ARRAY_SIZE(src->vertex_buffers); i++) {
762 pipe_resource_reference(&dst->vertex_buffers[i].buffer,
763 src->vertex_buffers[i].buffer);
764 memcpy(&dst->vertex_buffers[i], &src->vertex_buffers[i],
765 sizeof(src->vertex_buffers[i]));
766 }
767
768 dst->num_so_targets = src->num_so_targets;
769 for (i = 0; i < ARRAY_SIZE(src->so_targets); i++)
770 pipe_so_target_reference(&dst->so_targets[i], src->so_targets[i]);
771 memcpy(dst->so_offsets, src->so_offsets, sizeof(src->so_offsets));
772
773 for (i = 0; i < PIPE_SHADER_TYPES; i++) {
774 if (!src->shaders[i]) {
775 dst->shaders[i] = NULL;
776 continue;
777 }
778
779 if (src->shaders[i]) {
780 dst->shaders[i]->state.shader = src->shaders[i]->state.shader;
781 dst->shaders[i]->state.shader.tokens =
782 tgsi_dup_tokens(src->shaders[i]->state.shader.tokens);
783 } else {
784 dst->shaders[i] = NULL;
785 }
786
787 for (j = 0; j < PIPE_MAX_CONSTANT_BUFFERS; j++) {
788 pipe_resource_reference(&dst->constant_buffers[i][j].buffer,
789 src->constant_buffers[i][j].buffer);
790 memcpy(&dst->constant_buffers[i][j], &src->constant_buffers[i][j],
791 sizeof(src->constant_buffers[i][j]));
792 }
793
794 for (j = 0; j < PIPE_MAX_SAMPLERS; j++) {
795 pipe_sampler_view_reference(&dst->sampler_views[i][j],
796 src->sampler_views[i][j]);
797 if (src->sampler_states[i][j])
798 dst->sampler_states[i][j]->state.sampler =
799 src->sampler_states[i][j]->state.sampler;
800 else
801 dst->sampler_states[i][j] = NULL;
802 }
803 /* TODO: shader buffers & images */
804 }
805
806 if (src->velems)
807 dst->velems->state.velems = src->velems->state.velems;
808 else
809 dst->velems = NULL;
810
811 if (src->rs)
812 dst->rs->state.rs = src->rs->state.rs;
813 else
814 dst->rs = NULL;
815
816 if (src->dsa)
817 dst->dsa->state.dsa = src->dsa->state.dsa;
818 else
819 dst->dsa = NULL;
820
821 if (src->blend)
822 dst->blend->state.blend = src->blend->state.blend;
823 else
824 dst->blend = NULL;
825
826 dst->blend_color = src->blend_color;
827 dst->stencil_ref = src->stencil_ref;
828 dst->sample_mask = src->sample_mask;
829 dst->min_samples = src->min_samples;
830 dst->clip_state = src->clip_state;
831 util_copy_framebuffer_state(&dst->framebuffer_state, &src->framebuffer_state);
832 memcpy(dst->scissors, src->scissors, sizeof(src->scissors));
833 memcpy(dst->viewports, src->viewports, sizeof(src->viewports));
834 memcpy(dst->tess_default_levels, src->tess_default_levels,
835 sizeof(src->tess_default_levels));
836 dst->apitrace_call_number = src->apitrace_call_number;
837 }
838
839 static void
840 dd_free_record(struct dd_draw_record **record)
841 {
842 struct dd_draw_record *next = (*record)->next;
843
844 dd_unreference_copy_of_call(&(*record)->call);
845 dd_unreference_copy_of_draw_state(&(*record)->draw_state);
846 FREE((*record)->driver_state_log);
847 FREE(*record);
848 *record = next;
849 }
850
851 static void
852 dd_dump_record(struct dd_context *dctx, struct dd_draw_record *record,
853 uint32_t hw_sequence_no, int64_t now)
854 {
855 FILE *f = dd_get_file_stream(dd_screen(dctx->base.screen),
856 record->draw_state.base.apitrace_call_number);
857 if (!f)
858 return;
859
860 fprintf(f, "Draw call sequence # = %u\n", record->sequence_no);
861 fprintf(f, "HW reached sequence # = %u\n", hw_sequence_no);
862 fprintf(f, "Elapsed time = %"PRIi64" ms\n\n",
863 (now - record->timestamp) / 1000);
864
865 dd_dump_call(f, &record->draw_state.base, &record->call);
866 fprintf(f, "%s\n", record->driver_state_log);
867
868 dctx->pipe->dump_debug_state(dctx->pipe, f,
869 PIPE_DUMP_DEVICE_STATUS_REGISTERS);
870 dd_dump_dmesg(f);
871 fclose(f);
872 }
873
874 PIPE_THREAD_ROUTINE(dd_thread_pipelined_hang_detect, input)
875 {
876 struct dd_context *dctx = (struct dd_context *)input;
877 struct dd_screen *dscreen = dd_screen(dctx->base.screen);
878
879 pipe_mutex_lock(dctx->mutex);
880
881 while (!dctx->kill_thread) {
882 struct dd_draw_record **record = &dctx->records;
883
884 /* Loop over all records. */
885 while (*record) {
886 int64_t now;
887
888 /* If the fence has been signalled, release the record and all older
889 * records.
890 */
891 if (*dctx->mapped_fence >= (*record)->sequence_no) {
892 while (*record)
893 dd_free_record(record);
894 break;
895 }
896
897 /* The fence hasn't been signalled. Check the timeout. */
898 now = os_time_get();
899 if (os_time_timeout((*record)->timestamp,
900 (*record)->timestamp + dscreen->timeout_ms * 1000,
901 now)) {
902 fprintf(stderr, "GPU hang detected.\n");
903
904 /* Get the oldest unsignalled draw call. */
905 while ((*record)->next &&
906 *dctx->mapped_fence < (*record)->next->sequence_no)
907 record = &(*record)->next;
908
909 dd_dump_record(dctx, *record, *dctx->mapped_fence, now);
910 dd_kill_process();
911 }
912
913 record = &(*record)->next;
914 }
915
916 /* Unlock and sleep before starting all over again. */
917 pipe_mutex_unlock(dctx->mutex);
918 os_time_sleep(10000); /* 10 ms */
919 pipe_mutex_lock(dctx->mutex);
920 }
921
922 /* Thread termination. */
923 while (dctx->records)
924 dd_free_record(&dctx->records);
925
926 pipe_mutex_unlock(dctx->mutex);
927 return 0;
928 }
929
930 static char *
931 dd_get_driver_shader_log(struct dd_context *dctx)
932 {
933 #if defined(PIPE_OS_LINUX)
934 FILE *f;
935 char *buf;
936 int written_bytes;
937
938 if (!dctx->max_log_buffer_size)
939 dctx->max_log_buffer_size = 16 * 1024;
940
941 /* Keep increasing the buffer size until there is enough space.
942 *
943 * open_memstream can resize automatically, but it's VERY SLOW.
944 * fmemopen is much faster.
945 */
946 while (1) {
947 buf = malloc(dctx->max_log_buffer_size);
948 buf[0] = 0;
949
950 f = fmemopen(buf, dctx->max_log_buffer_size, "a");
951 if (!f) {
952 free(buf);
953 return NULL;
954 }
955
956 dd_dump_driver_state(dctx, f, PIPE_DUMP_CURRENT_SHADERS);
957 written_bytes = ftell(f);
958 fclose(f);
959
960 /* Return if the backing buffer is large enough. */
961 if (written_bytes < dctx->max_log_buffer_size - 1)
962 break;
963
964 /* Try again. */
965 free(buf);
966 dctx->max_log_buffer_size *= 2;
967 }
968
969 return buf;
970 #else
971 /* Return an empty string. */
972 return (char*)calloc(1, 4);
973 #endif
974 }
975
976 static void
977 dd_pipelined_process_draw(struct dd_context *dctx, struct dd_call *call)
978 {
979 struct pipe_context *pipe = dctx->pipe;
980 struct dd_draw_record *record;
981 char *log;
982
983 /* Make a record of the draw call. */
984 record = MALLOC_STRUCT(dd_draw_record);
985 if (!record)
986 return;
987
988 /* Create the log. */
989 log = dd_get_driver_shader_log(dctx);
990 if (!log) {
991 FREE(record);
992 return;
993 }
994
995 /* Update the fence with the GPU.
996 *
997 * radeonsi/clear_buffer waits in the command processor until shaders are
998 * idle before writing to memory. That's a necessary condition for isolating
999 * draw calls.
1000 */
1001 dctx->sequence_no++;
1002 pipe->clear_buffer(pipe, dctx->fence, 0, 4, &dctx->sequence_no, 4);
1003
1004 /* Initialize the record. */
1005 record->timestamp = os_time_get();
1006 record->sequence_no = dctx->sequence_no;
1007 record->driver_state_log = log;
1008
1009 memset(&record->call, 0, sizeof(record->call));
1010 dd_copy_call(&record->call, call);
1011
1012 dd_init_copy_of_draw_state(&record->draw_state);
1013 dd_copy_draw_state(&record->draw_state.base, &dctx->draw_state);
1014
1015 /* Add the record to the list. */
1016 pipe_mutex_lock(dctx->mutex);
1017 record->next = dctx->records;
1018 dctx->records = record;
1019 pipe_mutex_unlock(dctx->mutex);
1020 }
1021
1022 static void
1023 dd_context_flush(struct pipe_context *_pipe,
1024 struct pipe_fence_handle **fence, unsigned flags)
1025 {
1026 struct dd_context *dctx = dd_context(_pipe);
1027 struct pipe_context *pipe = dctx->pipe;
1028
1029 switch (dd_screen(dctx->base.screen)->mode) {
1030 case DD_DETECT_HANGS:
1031 dd_flush_and_handle_hang(dctx, fence, flags,
1032 "GPU hang detected in pipe->flush()");
1033 break;
1034 case DD_DETECT_HANGS_PIPELINED: /* nothing to do here */
1035 case DD_DUMP_ALL_CALLS:
1036 case DD_DUMP_APITRACE_CALL:
1037 pipe->flush(pipe, fence, flags);
1038 break;
1039 default:
1040 assert(0);
1041 }
1042 }
1043
1044 static void
1045 dd_before_draw(struct dd_context *dctx)
1046 {
1047 struct dd_screen *dscreen = dd_screen(dctx->base.screen);
1048
1049 if (dscreen->mode == DD_DETECT_HANGS &&
1050 !dscreen->no_flush &&
1051 dctx->num_draw_calls >= dscreen->skip_count)
1052 dd_flush_and_handle_hang(dctx, NULL, 0,
1053 "GPU hang most likely caused by internal "
1054 "driver commands");
1055 }
1056
1057 static void
1058 dd_after_draw(struct dd_context *dctx, struct dd_call *call)
1059 {
1060 struct dd_screen *dscreen = dd_screen(dctx->base.screen);
1061 struct pipe_context *pipe = dctx->pipe;
1062
1063 if (dctx->num_draw_calls >= dscreen->skip_count) {
1064 switch (dscreen->mode) {
1065 case DD_DETECT_HANGS:
1066 if (!dscreen->no_flush &&
1067 dd_flush_and_check_hang(dctx, NULL, 0)) {
1068 dd_write_report(dctx, call,
1069 PIPE_DUMP_DEVICE_STATUS_REGISTERS |
1070 PIPE_DUMP_CURRENT_STATES |
1071 PIPE_DUMP_CURRENT_SHADERS |
1072 PIPE_DUMP_LAST_COMMAND_BUFFER,
1073 true);
1074
1075 /* Terminate the process to prevent future hangs. */
1076 dd_kill_process();
1077 }
1078 break;
1079 case DD_DETECT_HANGS_PIPELINED:
1080 dd_pipelined_process_draw(dctx, call);
1081 break;
1082 case DD_DUMP_ALL_CALLS:
1083 if (!dscreen->no_flush)
1084 pipe->flush(pipe, NULL, 0);
1085 dd_write_report(dctx, call, 0, false);
1086 break;
1087 case DD_DUMP_APITRACE_CALL:
1088 if (dscreen->apitrace_dump_call ==
1089 dctx->draw_state.apitrace_call_number) {
1090 dd_write_report(dctx, call, 0, false);
1091 /* No need to continue. */
1092 exit(0);
1093 }
1094 break;
1095 default:
1096 assert(0);
1097 }
1098 }
1099
1100 ++dctx->num_draw_calls;
1101 if (dscreen->skip_count && dctx->num_draw_calls % 10000 == 0)
1102 fprintf(stderr, "Gallium debugger reached %u draw calls.\n",
1103 dctx->num_draw_calls);
1104 }
1105
1106 static void
1107 dd_context_draw_vbo(struct pipe_context *_pipe,
1108 const struct pipe_draw_info *info)
1109 {
1110 struct dd_context *dctx = dd_context(_pipe);
1111 struct pipe_context *pipe = dctx->pipe;
1112 struct dd_call call;
1113
1114 call.type = CALL_DRAW_VBO;
1115 call.info.draw_vbo = *info;
1116
1117 dd_before_draw(dctx);
1118 pipe->draw_vbo(pipe, info);
1119 dd_after_draw(dctx, &call);
1120 }
1121
1122 static void
1123 dd_context_launch_grid(struct pipe_context *_pipe,
1124 const struct pipe_grid_info *info)
1125 {
1126 struct dd_context *dctx = dd_context(_pipe);
1127 struct pipe_context *pipe = dctx->pipe;
1128 struct dd_call call;
1129
1130 call.type = CALL_LAUNCH_GRID;
1131 call.info.launch_grid = *info;
1132
1133 dd_before_draw(dctx);
1134 pipe->launch_grid(pipe, info);
1135 dd_after_draw(dctx, &call);
1136 }
1137
1138 static void
1139 dd_context_resource_copy_region(struct pipe_context *_pipe,
1140 struct pipe_resource *dst, unsigned dst_level,
1141 unsigned dstx, unsigned dsty, unsigned dstz,
1142 struct pipe_resource *src, unsigned src_level,
1143 const struct pipe_box *src_box)
1144 {
1145 struct dd_context *dctx = dd_context(_pipe);
1146 struct pipe_context *pipe = dctx->pipe;
1147 struct dd_call call;
1148
1149 call.type = CALL_RESOURCE_COPY_REGION;
1150 call.info.resource_copy_region.dst = dst;
1151 call.info.resource_copy_region.dst_level = dst_level;
1152 call.info.resource_copy_region.dstx = dstx;
1153 call.info.resource_copy_region.dsty = dsty;
1154 call.info.resource_copy_region.dstz = dstz;
1155 call.info.resource_copy_region.src = src;
1156 call.info.resource_copy_region.src_level = src_level;
1157 call.info.resource_copy_region.src_box = *src_box;
1158
1159 dd_before_draw(dctx);
1160 pipe->resource_copy_region(pipe,
1161 dst, dst_level, dstx, dsty, dstz,
1162 src, src_level, src_box);
1163 dd_after_draw(dctx, &call);
1164 }
1165
1166 static void
1167 dd_context_blit(struct pipe_context *_pipe, const struct pipe_blit_info *info)
1168 {
1169 struct dd_context *dctx = dd_context(_pipe);
1170 struct pipe_context *pipe = dctx->pipe;
1171 struct dd_call call;
1172
1173 call.type = CALL_BLIT;
1174 call.info.blit = *info;
1175
1176 dd_before_draw(dctx);
1177 pipe->blit(pipe, info);
1178 dd_after_draw(dctx, &call);
1179 }
1180
1181 static boolean
1182 dd_context_generate_mipmap(struct pipe_context *_pipe,
1183 struct pipe_resource *res,
1184 enum pipe_format format,
1185 unsigned base_level,
1186 unsigned last_level,
1187 unsigned first_layer,
1188 unsigned last_layer)
1189 {
1190 struct dd_context *dctx = dd_context(_pipe);
1191 struct pipe_context *pipe = dctx->pipe;
1192 struct dd_call call;
1193 boolean result;
1194
1195 call.type = CALL_GENERATE_MIPMAP;
1196 call.info.generate_mipmap.res = res;
1197 call.info.generate_mipmap.format = format;
1198 call.info.generate_mipmap.base_level = base_level;
1199 call.info.generate_mipmap.last_level = last_level;
1200 call.info.generate_mipmap.first_layer = first_layer;
1201 call.info.generate_mipmap.last_layer = last_layer;
1202
1203 dd_before_draw(dctx);
1204 result = pipe->generate_mipmap(pipe, res, format, base_level, last_level,
1205 first_layer, last_layer);
1206 dd_after_draw(dctx, &call);
1207 return result;
1208 }
1209
1210 static void
1211 dd_context_flush_resource(struct pipe_context *_pipe,
1212 struct pipe_resource *resource)
1213 {
1214 struct dd_context *dctx = dd_context(_pipe);
1215 struct pipe_context *pipe = dctx->pipe;
1216 struct dd_call call;
1217
1218 call.type = CALL_FLUSH_RESOURCE;
1219 call.info.flush_resource = resource;
1220
1221 dd_before_draw(dctx);
1222 pipe->flush_resource(pipe, resource);
1223 dd_after_draw(dctx, &call);
1224 }
1225
1226 static void
1227 dd_context_clear(struct pipe_context *_pipe, unsigned buffers,
1228 const union pipe_color_union *color, double depth,
1229 unsigned stencil)
1230 {
1231 struct dd_context *dctx = dd_context(_pipe);
1232 struct pipe_context *pipe = dctx->pipe;
1233 struct dd_call call;
1234
1235 call.type = CALL_CLEAR;
1236 call.info.clear.buffers = buffers;
1237 call.info.clear.color = *color;
1238 call.info.clear.depth = depth;
1239 call.info.clear.stencil = stencil;
1240
1241 dd_before_draw(dctx);
1242 pipe->clear(pipe, buffers, color, depth, stencil);
1243 dd_after_draw(dctx, &call);
1244 }
1245
1246 static void
1247 dd_context_clear_render_target(struct pipe_context *_pipe,
1248 struct pipe_surface *dst,
1249 const union pipe_color_union *color,
1250 unsigned dstx, unsigned dsty,
1251 unsigned width, unsigned height)
1252 {
1253 struct dd_context *dctx = dd_context(_pipe);
1254 struct pipe_context *pipe = dctx->pipe;
1255 struct dd_call call;
1256
1257 call.type = CALL_CLEAR_RENDER_TARGET;
1258
1259 dd_before_draw(dctx);
1260 pipe->clear_render_target(pipe, dst, color, dstx, dsty, width, height);
1261 dd_after_draw(dctx, &call);
1262 }
1263
1264 static void
1265 dd_context_clear_depth_stencil(struct pipe_context *_pipe,
1266 struct pipe_surface *dst, unsigned clear_flags,
1267 double depth, unsigned stencil, unsigned dstx,
1268 unsigned dsty, unsigned width, unsigned height)
1269 {
1270 struct dd_context *dctx = dd_context(_pipe);
1271 struct pipe_context *pipe = dctx->pipe;
1272 struct dd_call call;
1273
1274 call.type = CALL_CLEAR_DEPTH_STENCIL;
1275
1276 dd_before_draw(dctx);
1277 pipe->clear_depth_stencil(pipe, dst, clear_flags, depth, stencil,
1278 dstx, dsty, width, height);
1279 dd_after_draw(dctx, &call);
1280 }
1281
1282 static void
1283 dd_context_clear_buffer(struct pipe_context *_pipe, struct pipe_resource *res,
1284 unsigned offset, unsigned size,
1285 const void *clear_value, int clear_value_size)
1286 {
1287 struct dd_context *dctx = dd_context(_pipe);
1288 struct pipe_context *pipe = dctx->pipe;
1289 struct dd_call call;
1290
1291 call.type = CALL_CLEAR_BUFFER;
1292 call.info.clear_buffer.res = res;
1293 call.info.clear_buffer.offset = offset;
1294 call.info.clear_buffer.size = size;
1295 call.info.clear_buffer.clear_value = clear_value;
1296 call.info.clear_buffer.clear_value_size = clear_value_size;
1297
1298 dd_before_draw(dctx);
1299 pipe->clear_buffer(pipe, res, offset, size, clear_value, clear_value_size);
1300 dd_after_draw(dctx, &call);
1301 }
1302
1303 void
1304 dd_init_draw_functions(struct dd_context *dctx)
1305 {
1306 CTX_INIT(flush);
1307 CTX_INIT(draw_vbo);
1308 CTX_INIT(launch_grid);
1309 CTX_INIT(resource_copy_region);
1310 CTX_INIT(blit);
1311 CTX_INIT(clear);
1312 CTX_INIT(clear_render_target);
1313 CTX_INIT(clear_depth_stencil);
1314 CTX_INIT(clear_buffer);
1315 CTX_INIT(flush_resource);
1316 CTX_INIT(generate_mipmap);
1317 }