ddebug: handle some cases of non-TGSI shaders
[mesa.git] / src / gallium / drivers / ddebug / dd_draw.c
1 /**************************************************************************
2 *
3 * Copyright 2015 Advanced Micro Devices, Inc.
4 * Copyright 2008 VMware, Inc.
5 * All Rights Reserved.
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the "Software"),
9 * to deal in the Software without restriction, including without limitation
10 * on the rights to use, copy, modify, merge, publish, distribute, sub
11 * license, and/or sell copies of the Software, and to permit persons to whom
12 * the Software is furnished to do so, subject to the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the next
15 * paragraph) shall be included in all copies or substantial portions of the
16 * Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28 #include "dd_pipe.h"
29
30 #include "util/u_dump.h"
31 #include "util/u_format.h"
32 #include "util/u_framebuffer.h"
33 #include "util/u_helpers.h"
34 #include "util/u_inlines.h"
35 #include "util/u_memory.h"
36 #include "tgsi/tgsi_parse.h"
37 #include "tgsi/tgsi_scan.h"
38 #include "os/os_time.h"
39 #include <inttypes.h>
40
41
42 static FILE *
43 dd_get_file_stream(struct dd_screen *dscreen, unsigned apitrace_call_number)
44 {
45 struct pipe_screen *screen = dscreen->screen;
46 char cmd_line[4096];
47
48 FILE *f = dd_get_debug_file(dscreen->verbose);
49 if (!f)
50 return NULL;
51
52 if (os_get_command_line(cmd_line, sizeof(cmd_line)))
53 fprintf(f, "Command: %s\n", cmd_line);
54 fprintf(f, "Driver vendor: %s\n", screen->get_vendor(screen));
55 fprintf(f, "Device vendor: %s\n", screen->get_device_vendor(screen));
56 fprintf(f, "Device name: %s\n\n", screen->get_name(screen));
57
58 if (apitrace_call_number)
59 fprintf(f, "Last apitrace call: %u\n\n",
60 apitrace_call_number);
61 return f;
62 }
63
64 static void
65 dd_dump_dmesg(FILE *f)
66 {
67 char line[2000];
68 FILE *p = popen("dmesg | tail -n60", "r");
69
70 if (!p)
71 return;
72
73 fprintf(f, "\nLast 60 lines of dmesg:\n\n");
74 while (fgets(line, sizeof(line), p))
75 fputs(line, f);
76
77 pclose(p);
78 }
79
80 static void
81 dd_close_file_stream(FILE *f)
82 {
83 fclose(f);
84 }
85
86 static unsigned
87 dd_num_active_viewports(struct dd_draw_state *dstate)
88 {
89 struct tgsi_shader_info info;
90 const struct tgsi_token *tokens;
91
92 if (dstate->shaders[PIPE_SHADER_GEOMETRY])
93 tokens = dstate->shaders[PIPE_SHADER_GEOMETRY]->state.shader.tokens;
94 else if (dstate->shaders[PIPE_SHADER_TESS_EVAL])
95 tokens = dstate->shaders[PIPE_SHADER_TESS_EVAL]->state.shader.tokens;
96 else if (dstate->shaders[PIPE_SHADER_VERTEX])
97 tokens = dstate->shaders[PIPE_SHADER_VERTEX]->state.shader.tokens;
98 else
99 return 1;
100
101 if (tokens) {
102 tgsi_scan_shader(tokens, &info);
103 if (info.writes_viewport_index)
104 return PIPE_MAX_VIEWPORTS;
105 }
106
107 return 1;
108 }
109
110 #define COLOR_RESET "\033[0m"
111 #define COLOR_SHADER "\033[1;32m"
112 #define COLOR_STATE "\033[1;33m"
113
114 #define DUMP(name, var) do { \
115 fprintf(f, COLOR_STATE #name ": " COLOR_RESET); \
116 util_dump_##name(f, var); \
117 fprintf(f, "\n"); \
118 } while(0)
119
120 #define DUMP_I(name, var, i) do { \
121 fprintf(f, COLOR_STATE #name " %i: " COLOR_RESET, i); \
122 util_dump_##name(f, var); \
123 fprintf(f, "\n"); \
124 } while(0)
125
126 #define DUMP_M(name, var, member) do { \
127 fprintf(f, " " #member ": "); \
128 util_dump_##name(f, (var)->member); \
129 fprintf(f, "\n"); \
130 } while(0)
131
132 #define DUMP_M_ADDR(name, var, member) do { \
133 fprintf(f, " " #member ": "); \
134 util_dump_##name(f, &(var)->member); \
135 fprintf(f, "\n"); \
136 } while(0)
137
138 static void
139 print_named_value(FILE *f, const char *name, int value)
140 {
141 fprintf(f, COLOR_STATE "%s" COLOR_RESET " = %i\n", name, value);
142 }
143
144 static void
145 print_named_xvalue(FILE *f, const char *name, int value)
146 {
147 fprintf(f, COLOR_STATE "%s" COLOR_RESET " = 0x%08x\n", name, value);
148 }
149
150 static void
151 util_dump_uint(FILE *f, unsigned i)
152 {
153 fprintf(f, "%u", i);
154 }
155
156 static void
157 util_dump_hex(FILE *f, unsigned i)
158 {
159 fprintf(f, "0x%x", i);
160 }
161
162 static void
163 util_dump_double(FILE *f, double d)
164 {
165 fprintf(f, "%f", d);
166 }
167
168 static void
169 util_dump_format(FILE *f, enum pipe_format format)
170 {
171 fprintf(f, "%s", util_format_name(format));
172 }
173
174 static void
175 util_dump_color_union(FILE *f, const union pipe_color_union *color)
176 {
177 fprintf(f, "{f = {%f, %f, %f, %f}, ui = {%u, %u, %u, %u}",
178 color->f[0], color->f[1], color->f[2], color->f[3],
179 color->ui[0], color->ui[1], color->ui[2], color->ui[3]);
180 }
181
182 static void
183 util_dump_query(FILE *f, struct dd_query *query)
184 {
185 if (query->type >= PIPE_QUERY_DRIVER_SPECIFIC)
186 fprintf(f, "PIPE_QUERY_DRIVER_SPECIFIC + %i",
187 query->type - PIPE_QUERY_DRIVER_SPECIFIC);
188 else
189 fprintf(f, "%s", util_dump_query_type(query->type, false));
190 }
191
192 static void
193 dd_dump_render_condition(struct dd_draw_state *dstate, FILE *f)
194 {
195 if (dstate->render_cond.query) {
196 fprintf(f, "render condition:\n");
197 DUMP_M(query, &dstate->render_cond, query);
198 DUMP_M(uint, &dstate->render_cond, condition);
199 DUMP_M(uint, &dstate->render_cond, mode);
200 fprintf(f, "\n");
201 }
202 }
203
204 static void
205 dd_dump_shader(struct dd_draw_state *dstate, enum pipe_shader_type sh, FILE *f)
206 {
207 int i;
208 const char *shader_str[PIPE_SHADER_TYPES];
209
210 shader_str[PIPE_SHADER_VERTEX] = "VERTEX";
211 shader_str[PIPE_SHADER_TESS_CTRL] = "TESS_CTRL";
212 shader_str[PIPE_SHADER_TESS_EVAL] = "TESS_EVAL";
213 shader_str[PIPE_SHADER_GEOMETRY] = "GEOMETRY";
214 shader_str[PIPE_SHADER_FRAGMENT] = "FRAGMENT";
215 shader_str[PIPE_SHADER_COMPUTE] = "COMPUTE";
216
217 if (sh == PIPE_SHADER_TESS_CTRL &&
218 !dstate->shaders[PIPE_SHADER_TESS_CTRL] &&
219 dstate->shaders[PIPE_SHADER_TESS_EVAL])
220 fprintf(f, "tess_state: {default_outer_level = {%f, %f, %f, %f}, "
221 "default_inner_level = {%f, %f}}\n",
222 dstate->tess_default_levels[0],
223 dstate->tess_default_levels[1],
224 dstate->tess_default_levels[2],
225 dstate->tess_default_levels[3],
226 dstate->tess_default_levels[4],
227 dstate->tess_default_levels[5]);
228
229 if (sh == PIPE_SHADER_FRAGMENT)
230 if (dstate->rs) {
231 unsigned num_viewports = dd_num_active_viewports(dstate);
232
233 if (dstate->rs->state.rs.clip_plane_enable)
234 DUMP(clip_state, &dstate->clip_state);
235
236 for (i = 0; i < num_viewports; i++)
237 DUMP_I(viewport_state, &dstate->viewports[i], i);
238
239 if (dstate->rs->state.rs.scissor)
240 for (i = 0; i < num_viewports; i++)
241 DUMP_I(scissor_state, &dstate->scissors[i], i);
242
243 DUMP(rasterizer_state, &dstate->rs->state.rs);
244
245 if (dstate->rs->state.rs.poly_stipple_enable)
246 DUMP(poly_stipple, &dstate->polygon_stipple);
247 fprintf(f, "\n");
248 }
249
250 if (!dstate->shaders[sh])
251 return;
252
253 fprintf(f, COLOR_SHADER "begin shader: %s" COLOR_RESET "\n", shader_str[sh]);
254 DUMP(shader_state, &dstate->shaders[sh]->state.shader);
255
256 for (i = 0; i < PIPE_MAX_CONSTANT_BUFFERS; i++)
257 if (dstate->constant_buffers[sh][i].buffer ||
258 dstate->constant_buffers[sh][i].user_buffer) {
259 DUMP_I(constant_buffer, &dstate->constant_buffers[sh][i], i);
260 if (dstate->constant_buffers[sh][i].buffer)
261 DUMP_M(resource, &dstate->constant_buffers[sh][i], buffer);
262 }
263
264 for (i = 0; i < PIPE_MAX_SAMPLERS; i++)
265 if (dstate->sampler_states[sh][i])
266 DUMP_I(sampler_state, &dstate->sampler_states[sh][i]->state.sampler, i);
267
268 for (i = 0; i < PIPE_MAX_SAMPLERS; i++)
269 if (dstate->sampler_views[sh][i]) {
270 DUMP_I(sampler_view, dstate->sampler_views[sh][i], i);
271 DUMP_M(resource, dstate->sampler_views[sh][i], texture);
272 }
273
274 for (i = 0; i < PIPE_MAX_SHADER_IMAGES; i++)
275 if (dstate->shader_images[sh][i].resource) {
276 DUMP_I(image_view, &dstate->shader_images[sh][i], i);
277 if (dstate->shader_images[sh][i].resource)
278 DUMP_M(resource, &dstate->shader_images[sh][i], resource);
279 }
280
281 for (i = 0; i < PIPE_MAX_SHADER_BUFFERS; i++)
282 if (dstate->shader_buffers[sh][i].buffer) {
283 DUMP_I(shader_buffer, &dstate->shader_buffers[sh][i], i);
284 if (dstate->shader_buffers[sh][i].buffer)
285 DUMP_M(resource, &dstate->shader_buffers[sh][i], buffer);
286 }
287
288 fprintf(f, COLOR_SHADER "end shader: %s" COLOR_RESET "\n\n", shader_str[sh]);
289 }
290
291 static void
292 dd_dump_draw_vbo(struct dd_draw_state *dstate, struct pipe_draw_info *info, FILE *f)
293 {
294 int sh, i;
295
296 DUMP(draw_info, info);
297 if (info->count_from_stream_output)
298 DUMP_M(stream_output_target, info,
299 count_from_stream_output);
300 if (info->indirect) {
301 DUMP_M(resource, info, indirect->buffer);
302 if (info->indirect->indirect_draw_count)
303 DUMP_M(resource, info, indirect->indirect_draw_count);
304 }
305
306 fprintf(f, "\n");
307
308 /* TODO: dump active queries */
309
310 dd_dump_render_condition(dstate, f);
311
312 for (i = 0; i < PIPE_MAX_ATTRIBS; i++)
313 if (dstate->vertex_buffers[i].buffer.resource) {
314 DUMP_I(vertex_buffer, &dstate->vertex_buffers[i], i);
315 if (!dstate->vertex_buffers[i].is_user_buffer)
316 DUMP_M(resource, &dstate->vertex_buffers[i], buffer.resource);
317 }
318
319 if (dstate->velems) {
320 print_named_value(f, "num vertex elements",
321 dstate->velems->state.velems.count);
322 for (i = 0; i < dstate->velems->state.velems.count; i++) {
323 fprintf(f, " ");
324 DUMP_I(vertex_element, &dstate->velems->state.velems.velems[i], i);
325 }
326 }
327
328 print_named_value(f, "num stream output targets", dstate->num_so_targets);
329 for (i = 0; i < dstate->num_so_targets; i++)
330 if (dstate->so_targets[i]) {
331 DUMP_I(stream_output_target, dstate->so_targets[i], i);
332 DUMP_M(resource, dstate->so_targets[i], buffer);
333 fprintf(f, " offset = %i\n", dstate->so_offsets[i]);
334 }
335
336 fprintf(f, "\n");
337 for (sh = 0; sh < PIPE_SHADER_TYPES; sh++) {
338 if (sh == PIPE_SHADER_COMPUTE)
339 continue;
340
341 dd_dump_shader(dstate, sh, f);
342 }
343
344 if (dstate->dsa)
345 DUMP(depth_stencil_alpha_state, &dstate->dsa->state.dsa);
346 DUMP(stencil_ref, &dstate->stencil_ref);
347
348 if (dstate->blend)
349 DUMP(blend_state, &dstate->blend->state.blend);
350 DUMP(blend_color, &dstate->blend_color);
351
352 print_named_value(f, "min_samples", dstate->min_samples);
353 print_named_xvalue(f, "sample_mask", dstate->sample_mask);
354 fprintf(f, "\n");
355
356 DUMP(framebuffer_state, &dstate->framebuffer_state);
357 for (i = 0; i < dstate->framebuffer_state.nr_cbufs; i++)
358 if (dstate->framebuffer_state.cbufs[i]) {
359 fprintf(f, " " COLOR_STATE "cbufs[%i]:" COLOR_RESET "\n ", i);
360 DUMP(surface, dstate->framebuffer_state.cbufs[i]);
361 fprintf(f, " ");
362 DUMP(resource, dstate->framebuffer_state.cbufs[i]->texture);
363 }
364 if (dstate->framebuffer_state.zsbuf) {
365 fprintf(f, " " COLOR_STATE "zsbuf:" COLOR_RESET "\n ");
366 DUMP(surface, dstate->framebuffer_state.zsbuf);
367 fprintf(f, " ");
368 DUMP(resource, dstate->framebuffer_state.zsbuf->texture);
369 }
370 fprintf(f, "\n");
371 }
372
373 static void
374 dd_dump_launch_grid(struct dd_draw_state *dstate, struct pipe_grid_info *info, FILE *f)
375 {
376 fprintf(f, "%s:\n", __func__+8);
377 DUMP(grid_info, info);
378 fprintf(f, "\n");
379
380 dd_dump_shader(dstate, PIPE_SHADER_COMPUTE, f);
381 fprintf(f, "\n");
382 }
383
384 static void
385 dd_dump_resource_copy_region(struct dd_draw_state *dstate,
386 struct call_resource_copy_region *info,
387 FILE *f)
388 {
389 fprintf(f, "%s:\n", __func__+8);
390 DUMP_M(resource, info, dst);
391 DUMP_M(uint, info, dst_level);
392 DUMP_M(uint, info, dstx);
393 DUMP_M(uint, info, dsty);
394 DUMP_M(uint, info, dstz);
395 DUMP_M(resource, info, src);
396 DUMP_M(uint, info, src_level);
397 DUMP_M_ADDR(box, info, src_box);
398 }
399
400 static void
401 dd_dump_blit(struct dd_draw_state *dstate, struct pipe_blit_info *info, FILE *f)
402 {
403 fprintf(f, "%s:\n", __func__+8);
404 DUMP_M(resource, info, dst.resource);
405 DUMP_M(uint, info, dst.level);
406 DUMP_M_ADDR(box, info, dst.box);
407 DUMP_M(format, info, dst.format);
408
409 DUMP_M(resource, info, src.resource);
410 DUMP_M(uint, info, src.level);
411 DUMP_M_ADDR(box, info, src.box);
412 DUMP_M(format, info, src.format);
413
414 DUMP_M(hex, info, mask);
415 DUMP_M(uint, info, filter);
416 DUMP_M(uint, info, scissor_enable);
417 DUMP_M_ADDR(scissor_state, info, scissor);
418 DUMP_M(uint, info, render_condition_enable);
419
420 if (info->render_condition_enable)
421 dd_dump_render_condition(dstate, f);
422 }
423
424 static void
425 dd_dump_generate_mipmap(struct dd_draw_state *dstate, FILE *f)
426 {
427 fprintf(f, "%s:\n", __func__+8);
428 /* TODO */
429 }
430
431 static void
432 dd_dump_flush_resource(struct dd_draw_state *dstate, struct pipe_resource *res,
433 FILE *f)
434 {
435 fprintf(f, "%s:\n", __func__+8);
436 DUMP(resource, res);
437 }
438
439 static void
440 dd_dump_clear(struct dd_draw_state *dstate, struct call_clear *info, FILE *f)
441 {
442 fprintf(f, "%s:\n", __func__+8);
443 DUMP_M(uint, info, buffers);
444 DUMP_M_ADDR(color_union, info, color);
445 DUMP_M(double, info, depth);
446 DUMP_M(hex, info, stencil);
447 }
448
449 static void
450 dd_dump_clear_buffer(struct dd_draw_state *dstate, struct call_clear_buffer *info,
451 FILE *f)
452 {
453 int i;
454 const char *value = (const char*)info->clear_value;
455
456 fprintf(f, "%s:\n", __func__+8);
457 DUMP_M(resource, info, res);
458 DUMP_M(uint, info, offset);
459 DUMP_M(uint, info, size);
460 DUMP_M(uint, info, clear_value_size);
461
462 fprintf(f, " clear_value:");
463 for (i = 0; i < info->clear_value_size; i++)
464 fprintf(f, " %02x", value[i]);
465 fprintf(f, "\n");
466 }
467
468 static void
469 dd_dump_clear_texture(struct dd_draw_state *dstate, FILE *f)
470 {
471 fprintf(f, "%s:\n", __func__+8);
472 /* TODO */
473 }
474
475 static void
476 dd_dump_clear_render_target(struct dd_draw_state *dstate, FILE *f)
477 {
478 fprintf(f, "%s:\n", __func__+8);
479 /* TODO */
480 }
481
482 static void
483 dd_dump_clear_depth_stencil(struct dd_draw_state *dstate, FILE *f)
484 {
485 fprintf(f, "%s:\n", __func__+8);
486 /* TODO */
487 }
488
489 static void
490 dd_dump_driver_state(struct dd_context *dctx, FILE *f, unsigned flags)
491 {
492 if (dctx->pipe->dump_debug_state) {
493 fprintf(f,"\n\n**************************************************"
494 "***************************\n");
495 fprintf(f, "Driver-specific state:\n\n");
496 dctx->pipe->dump_debug_state(dctx->pipe, f, flags);
497 }
498 }
499
500 static void
501 dd_dump_call(FILE *f, struct dd_draw_state *state, struct dd_call *call)
502 {
503 switch (call->type) {
504 case CALL_DRAW_VBO:
505 dd_dump_draw_vbo(state, &call->info.draw_vbo.draw, f);
506 break;
507 case CALL_LAUNCH_GRID:
508 dd_dump_launch_grid(state, &call->info.launch_grid, f);
509 break;
510 case CALL_RESOURCE_COPY_REGION:
511 dd_dump_resource_copy_region(state,
512 &call->info.resource_copy_region, f);
513 break;
514 case CALL_BLIT:
515 dd_dump_blit(state, &call->info.blit, f);
516 break;
517 case CALL_FLUSH_RESOURCE:
518 dd_dump_flush_resource(state, call->info.flush_resource, f);
519 break;
520 case CALL_CLEAR:
521 dd_dump_clear(state, &call->info.clear, f);
522 break;
523 case CALL_CLEAR_BUFFER:
524 dd_dump_clear_buffer(state, &call->info.clear_buffer, f);
525 break;
526 case CALL_CLEAR_TEXTURE:
527 dd_dump_clear_texture(state, f);
528 break;
529 case CALL_CLEAR_RENDER_TARGET:
530 dd_dump_clear_render_target(state, f);
531 break;
532 case CALL_CLEAR_DEPTH_STENCIL:
533 dd_dump_clear_depth_stencil(state, f);
534 break;
535 case CALL_GENERATE_MIPMAP:
536 dd_dump_generate_mipmap(state, f);
537 break;
538 }
539 }
540
541 static void
542 dd_write_report(struct dd_context *dctx, struct dd_call *call, unsigned flags,
543 bool dump_dmesg)
544 {
545 FILE *f = dd_get_file_stream(dd_screen(dctx->base.screen),
546 dctx->draw_state.apitrace_call_number);
547
548 if (!f)
549 return;
550
551 dd_dump_call(f, &dctx->draw_state, call);
552 dd_dump_driver_state(dctx, f, flags);
553 if (dump_dmesg)
554 dd_dump_dmesg(f);
555 dd_close_file_stream(f);
556 }
557
558 static void
559 dd_kill_process(void)
560 {
561 sync();
562 fprintf(stderr, "dd: Aborting the process...\n");
563 fflush(stdout);
564 fflush(stderr);
565 exit(1);
566 }
567
568 static bool
569 dd_flush_and_check_hang(struct dd_context *dctx,
570 struct pipe_fence_handle **flush_fence,
571 unsigned flush_flags)
572 {
573 struct pipe_fence_handle *fence = NULL;
574 struct pipe_context *pipe = dctx->pipe;
575 struct pipe_screen *screen = pipe->screen;
576 uint64_t timeout_ms = dd_screen(dctx->base.screen)->timeout_ms;
577 bool idle;
578
579 assert(timeout_ms > 0);
580
581 pipe->flush(pipe, &fence, flush_flags);
582 if (flush_fence)
583 screen->fence_reference(screen, flush_fence, fence);
584 if (!fence)
585 return false;
586
587 idle = screen->fence_finish(screen, pipe, fence, timeout_ms * 1000000);
588 screen->fence_reference(screen, &fence, NULL);
589 if (!idle)
590 fprintf(stderr, "dd: GPU hang detected!\n");
591 return !idle;
592 }
593
594 static void
595 dd_flush_and_handle_hang(struct dd_context *dctx,
596 struct pipe_fence_handle **fence, unsigned flags,
597 const char *cause)
598 {
599 if (dd_flush_and_check_hang(dctx, fence, flags)) {
600 FILE *f = dd_get_file_stream(dd_screen(dctx->base.screen),
601 dctx->draw_state.apitrace_call_number);
602
603 if (f) {
604 fprintf(f, "dd: %s.\n", cause);
605 dd_dump_driver_state(dctx, f,
606 PIPE_DUMP_DEVICE_STATUS_REGISTERS |
607 PIPE_DUMP_CURRENT_STATES |
608 PIPE_DUMP_CURRENT_SHADERS |
609 PIPE_DUMP_LAST_COMMAND_BUFFER);
610 dd_dump_dmesg(f);
611 dd_close_file_stream(f);
612 }
613
614 /* Terminate the process to prevent future hangs. */
615 dd_kill_process();
616 }
617 }
618
619 static void
620 dd_unreference_copy_of_call(struct dd_call *dst)
621 {
622 switch (dst->type) {
623 case CALL_DRAW_VBO:
624 pipe_so_target_reference(&dst->info.draw_vbo.draw.count_from_stream_output, NULL);
625 pipe_resource_reference(&dst->info.draw_vbo.indirect.buffer, NULL);
626 pipe_resource_reference(&dst->info.draw_vbo.indirect.indirect_draw_count, NULL);
627 if (dst->info.draw_vbo.draw.index_size &&
628 !dst->info.draw_vbo.draw.has_user_indices)
629 pipe_resource_reference(&dst->info.draw_vbo.draw.index.resource, NULL);
630 else
631 dst->info.draw_vbo.draw.index.user = NULL;
632 break;
633 case CALL_LAUNCH_GRID:
634 pipe_resource_reference(&dst->info.launch_grid.indirect, NULL);
635 break;
636 case CALL_RESOURCE_COPY_REGION:
637 pipe_resource_reference(&dst->info.resource_copy_region.dst, NULL);
638 pipe_resource_reference(&dst->info.resource_copy_region.src, NULL);
639 break;
640 case CALL_BLIT:
641 pipe_resource_reference(&dst->info.blit.dst.resource, NULL);
642 pipe_resource_reference(&dst->info.blit.src.resource, NULL);
643 break;
644 case CALL_FLUSH_RESOURCE:
645 pipe_resource_reference(&dst->info.flush_resource, NULL);
646 break;
647 case CALL_CLEAR:
648 break;
649 case CALL_CLEAR_BUFFER:
650 pipe_resource_reference(&dst->info.clear_buffer.res, NULL);
651 break;
652 case CALL_CLEAR_TEXTURE:
653 break;
654 case CALL_CLEAR_RENDER_TARGET:
655 break;
656 case CALL_CLEAR_DEPTH_STENCIL:
657 break;
658 case CALL_GENERATE_MIPMAP:
659 pipe_resource_reference(&dst->info.generate_mipmap.res, NULL);
660 break;
661 }
662 }
663
664 static void
665 dd_copy_call(struct dd_call *dst, struct dd_call *src)
666 {
667 dst->type = src->type;
668
669 switch (src->type) {
670 case CALL_DRAW_VBO:
671 pipe_so_target_reference(&dst->info.draw_vbo.draw.count_from_stream_output,
672 src->info.draw_vbo.draw.count_from_stream_output);
673 pipe_resource_reference(&dst->info.draw_vbo.indirect.buffer,
674 src->info.draw_vbo.indirect.buffer);
675 pipe_resource_reference(&dst->info.draw_vbo.indirect.indirect_draw_count,
676 src->info.draw_vbo.indirect.indirect_draw_count);
677
678 if (dst->info.draw_vbo.draw.index_size &&
679 !dst->info.draw_vbo.draw.has_user_indices)
680 pipe_resource_reference(&dst->info.draw_vbo.draw.index.resource, NULL);
681 else
682 dst->info.draw_vbo.draw.index.user = NULL;
683
684 if (src->info.draw_vbo.draw.index_size &&
685 !src->info.draw_vbo.draw.has_user_indices) {
686 pipe_resource_reference(&dst->info.draw_vbo.draw.index.resource,
687 src->info.draw_vbo.draw.index.resource);
688 }
689
690 dst->info.draw_vbo = src->info.draw_vbo;
691 if (!src->info.draw_vbo.draw.indirect)
692 dst->info.draw_vbo.draw.indirect = NULL;
693 else
694 dst->info.draw_vbo.draw.indirect = &dst->info.draw_vbo.indirect;
695 break;
696 case CALL_LAUNCH_GRID:
697 pipe_resource_reference(&dst->info.launch_grid.indirect,
698 src->info.launch_grid.indirect);
699 dst->info.launch_grid = src->info.launch_grid;
700 break;
701 case CALL_RESOURCE_COPY_REGION:
702 pipe_resource_reference(&dst->info.resource_copy_region.dst,
703 src->info.resource_copy_region.dst);
704 pipe_resource_reference(&dst->info.resource_copy_region.src,
705 src->info.resource_copy_region.src);
706 dst->info.resource_copy_region = src->info.resource_copy_region;
707 break;
708 case CALL_BLIT:
709 pipe_resource_reference(&dst->info.blit.dst.resource,
710 src->info.blit.dst.resource);
711 pipe_resource_reference(&dst->info.blit.src.resource,
712 src->info.blit.src.resource);
713 dst->info.blit = src->info.blit;
714 break;
715 case CALL_FLUSH_RESOURCE:
716 pipe_resource_reference(&dst->info.flush_resource,
717 src->info.flush_resource);
718 break;
719 case CALL_CLEAR:
720 dst->info.clear = src->info.clear;
721 break;
722 case CALL_CLEAR_BUFFER:
723 pipe_resource_reference(&dst->info.clear_buffer.res,
724 src->info.clear_buffer.res);
725 dst->info.clear_buffer = src->info.clear_buffer;
726 break;
727 case CALL_CLEAR_TEXTURE:
728 break;
729 case CALL_CLEAR_RENDER_TARGET:
730 break;
731 case CALL_CLEAR_DEPTH_STENCIL:
732 break;
733 case CALL_GENERATE_MIPMAP:
734 pipe_resource_reference(&dst->info.generate_mipmap.res,
735 src->info.generate_mipmap.res);
736 dst->info.generate_mipmap = src->info.generate_mipmap;
737 break;
738 }
739 }
740
741 static void
742 dd_init_copy_of_draw_state(struct dd_draw_state_copy *state)
743 {
744 unsigned i,j;
745
746 /* Just clear pointers to gallium objects. Don't clear the whole structure,
747 * because it would kill performance with its size of 130 KB.
748 */
749 memset(state->base.vertex_buffers, 0,
750 sizeof(state->base.vertex_buffers));
751 memset(state->base.so_targets, 0,
752 sizeof(state->base.so_targets));
753 memset(state->base.constant_buffers, 0,
754 sizeof(state->base.constant_buffers));
755 memset(state->base.sampler_views, 0,
756 sizeof(state->base.sampler_views));
757 memset(state->base.shader_images, 0,
758 sizeof(state->base.shader_images));
759 memset(state->base.shader_buffers, 0,
760 sizeof(state->base.shader_buffers));
761 memset(&state->base.framebuffer_state, 0,
762 sizeof(state->base.framebuffer_state));
763
764 memset(state->shaders, 0, sizeof(state->shaders));
765
766 state->base.render_cond.query = &state->render_cond;
767
768 for (i = 0; i < PIPE_SHADER_TYPES; i++) {
769 state->base.shaders[i] = &state->shaders[i];
770 for (j = 0; j < PIPE_MAX_SAMPLERS; j++)
771 state->base.sampler_states[i][j] = &state->sampler_states[i][j];
772 }
773
774 state->base.velems = &state->velems;
775 state->base.rs = &state->rs;
776 state->base.dsa = &state->dsa;
777 state->base.blend = &state->blend;
778 }
779
780 static void
781 dd_unreference_copy_of_draw_state(struct dd_draw_state_copy *state)
782 {
783 struct dd_draw_state *dst = &state->base;
784 unsigned i,j;
785
786 for (i = 0; i < ARRAY_SIZE(dst->vertex_buffers); i++)
787 pipe_vertex_buffer_unreference(&dst->vertex_buffers[i]);
788 for (i = 0; i < ARRAY_SIZE(dst->so_targets); i++)
789 pipe_so_target_reference(&dst->so_targets[i], NULL);
790
791 for (i = 0; i < PIPE_SHADER_TYPES; i++) {
792 if (dst->shaders[i])
793 tgsi_free_tokens(dst->shaders[i]->state.shader.tokens);
794
795 for (j = 0; j < PIPE_MAX_CONSTANT_BUFFERS; j++)
796 pipe_resource_reference(&dst->constant_buffers[i][j].buffer, NULL);
797 for (j = 0; j < PIPE_MAX_SAMPLERS; j++)
798 pipe_sampler_view_reference(&dst->sampler_views[i][j], NULL);
799 for (j = 0; j < PIPE_MAX_SHADER_IMAGES; j++)
800 pipe_resource_reference(&dst->shader_images[i][j].resource, NULL);
801 for (j = 0; j < PIPE_MAX_SHADER_BUFFERS; j++)
802 pipe_resource_reference(&dst->shader_buffers[i][j].buffer, NULL);
803 }
804
805 util_unreference_framebuffer_state(&dst->framebuffer_state);
806 }
807
808 static void
809 dd_copy_draw_state(struct dd_draw_state *dst, struct dd_draw_state *src)
810 {
811 unsigned i,j;
812
813 if (src->render_cond.query) {
814 *dst->render_cond.query = *src->render_cond.query;
815 dst->render_cond.condition = src->render_cond.condition;
816 dst->render_cond.mode = src->render_cond.mode;
817 } else {
818 dst->render_cond.query = NULL;
819 }
820
821 for (i = 0; i < ARRAY_SIZE(src->vertex_buffers); i++) {
822 pipe_vertex_buffer_reference(&dst->vertex_buffers[i],
823 &src->vertex_buffers[i]);
824 }
825
826 dst->num_so_targets = src->num_so_targets;
827 for (i = 0; i < ARRAY_SIZE(src->so_targets); i++)
828 pipe_so_target_reference(&dst->so_targets[i], src->so_targets[i]);
829 memcpy(dst->so_offsets, src->so_offsets, sizeof(src->so_offsets));
830
831 for (i = 0; i < PIPE_SHADER_TYPES; i++) {
832 if (!src->shaders[i]) {
833 dst->shaders[i] = NULL;
834 continue;
835 }
836
837 if (src->shaders[i]) {
838 dst->shaders[i]->state.shader = src->shaders[i]->state.shader;
839 if (src->shaders[i]->state.shader.tokens) {
840 dst->shaders[i]->state.shader.tokens =
841 tgsi_dup_tokens(src->shaders[i]->state.shader.tokens);
842 } else {
843 dst->shaders[i]->state.shader.ir.nir = NULL;
844 }
845 } else {
846 dst->shaders[i] = NULL;
847 }
848
849 for (j = 0; j < PIPE_MAX_CONSTANT_BUFFERS; j++) {
850 pipe_resource_reference(&dst->constant_buffers[i][j].buffer,
851 src->constant_buffers[i][j].buffer);
852 memcpy(&dst->constant_buffers[i][j], &src->constant_buffers[i][j],
853 sizeof(src->constant_buffers[i][j]));
854 }
855
856 for (j = 0; j < PIPE_MAX_SAMPLERS; j++) {
857 pipe_sampler_view_reference(&dst->sampler_views[i][j],
858 src->sampler_views[i][j]);
859 if (src->sampler_states[i][j])
860 dst->sampler_states[i][j]->state.sampler =
861 src->sampler_states[i][j]->state.sampler;
862 else
863 dst->sampler_states[i][j] = NULL;
864 }
865
866 for (j = 0; j < PIPE_MAX_SHADER_IMAGES; j++) {
867 pipe_resource_reference(&dst->shader_images[i][j].resource,
868 src->shader_images[i][j].resource);
869 memcpy(&dst->shader_images[i][j], &src->shader_images[i][j],
870 sizeof(src->shader_images[i][j]));
871 }
872
873 for (j = 0; j < PIPE_MAX_SHADER_BUFFERS; j++) {
874 pipe_resource_reference(&dst->shader_buffers[i][j].buffer,
875 src->shader_buffers[i][j].buffer);
876 memcpy(&dst->shader_buffers[i][j], &src->shader_buffers[i][j],
877 sizeof(src->shader_buffers[i][j]));
878 }
879 }
880
881 if (src->velems)
882 dst->velems->state.velems = src->velems->state.velems;
883 else
884 dst->velems = NULL;
885
886 if (src->rs)
887 dst->rs->state.rs = src->rs->state.rs;
888 else
889 dst->rs = NULL;
890
891 if (src->dsa)
892 dst->dsa->state.dsa = src->dsa->state.dsa;
893 else
894 dst->dsa = NULL;
895
896 if (src->blend)
897 dst->blend->state.blend = src->blend->state.blend;
898 else
899 dst->blend = NULL;
900
901 dst->blend_color = src->blend_color;
902 dst->stencil_ref = src->stencil_ref;
903 dst->sample_mask = src->sample_mask;
904 dst->min_samples = src->min_samples;
905 dst->clip_state = src->clip_state;
906 util_copy_framebuffer_state(&dst->framebuffer_state, &src->framebuffer_state);
907 memcpy(dst->scissors, src->scissors, sizeof(src->scissors));
908 memcpy(dst->viewports, src->viewports, sizeof(src->viewports));
909 memcpy(dst->tess_default_levels, src->tess_default_levels,
910 sizeof(src->tess_default_levels));
911 dst->apitrace_call_number = src->apitrace_call_number;
912 }
913
914 static void
915 dd_free_record(struct dd_draw_record **record)
916 {
917 struct dd_draw_record *next = (*record)->next;
918
919 dd_unreference_copy_of_call(&(*record)->call);
920 dd_unreference_copy_of_draw_state(&(*record)->draw_state);
921 FREE((*record)->driver_state_log);
922 FREE(*record);
923 *record = next;
924 }
925
926 static void
927 dd_dump_record(struct dd_context *dctx, struct dd_draw_record *record,
928 uint32_t hw_sequence_no, int64_t now)
929 {
930 FILE *f = dd_get_file_stream(dd_screen(dctx->base.screen),
931 record->draw_state.base.apitrace_call_number);
932 if (!f)
933 return;
934
935 fprintf(f, "Draw call sequence # = %u\n", record->sequence_no);
936 fprintf(f, "HW reached sequence # = %u\n", hw_sequence_no);
937 fprintf(f, "Elapsed time = %"PRIi64" ms\n\n",
938 (now - record->timestamp) / 1000);
939
940 dd_dump_call(f, &record->draw_state.base, &record->call);
941 fprintf(f, "%s\n", record->driver_state_log);
942
943 dctx->pipe->dump_debug_state(dctx->pipe, f,
944 PIPE_DUMP_DEVICE_STATUS_REGISTERS);
945 dd_dump_dmesg(f);
946 fclose(f);
947 }
948
949 int
950 dd_thread_pipelined_hang_detect(void *input)
951 {
952 struct dd_context *dctx = (struct dd_context *)input;
953 struct dd_screen *dscreen = dd_screen(dctx->base.screen);
954
955 mtx_lock(&dctx->mutex);
956
957 while (!dctx->kill_thread) {
958 struct dd_draw_record **record = &dctx->records;
959
960 /* Loop over all records. */
961 while (*record) {
962 int64_t now;
963
964 /* If the fence has been signalled, release the record and all older
965 * records.
966 */
967 if (*dctx->mapped_fence >= (*record)->sequence_no) {
968 while (*record)
969 dd_free_record(record);
970 break;
971 }
972
973 /* The fence hasn't been signalled. Check the timeout. */
974 now = os_time_get();
975 if (os_time_timeout((*record)->timestamp,
976 (*record)->timestamp + dscreen->timeout_ms * 1000,
977 now)) {
978 fprintf(stderr, "GPU hang detected.\n");
979
980 /* Get the oldest unsignalled draw call. */
981 while ((*record)->next &&
982 *dctx->mapped_fence < (*record)->next->sequence_no)
983 record = &(*record)->next;
984
985 dd_dump_record(dctx, *record, *dctx->mapped_fence, now);
986 dd_kill_process();
987 }
988
989 record = &(*record)->next;
990 }
991
992 /* Unlock and sleep before starting all over again. */
993 mtx_unlock(&dctx->mutex);
994 os_time_sleep(10000); /* 10 ms */
995 mtx_lock(&dctx->mutex);
996 }
997
998 /* Thread termination. */
999 while (dctx->records)
1000 dd_free_record(&dctx->records);
1001
1002 mtx_unlock(&dctx->mutex);
1003 return 0;
1004 }
1005
1006 static char *
1007 dd_get_driver_shader_log(struct dd_context *dctx)
1008 {
1009 #if defined(PIPE_OS_LINUX)
1010 FILE *f;
1011 char *buf;
1012 int written_bytes;
1013
1014 if (!dctx->max_log_buffer_size)
1015 dctx->max_log_buffer_size = 16 * 1024;
1016
1017 /* Keep increasing the buffer size until there is enough space.
1018 *
1019 * open_memstream can resize automatically, but it's VERY SLOW.
1020 * fmemopen is much faster.
1021 */
1022 while (1) {
1023 buf = malloc(dctx->max_log_buffer_size);
1024 buf[0] = 0;
1025
1026 f = fmemopen(buf, dctx->max_log_buffer_size, "a");
1027 if (!f) {
1028 free(buf);
1029 return NULL;
1030 }
1031
1032 dd_dump_driver_state(dctx, f, PIPE_DUMP_CURRENT_SHADERS);
1033 written_bytes = ftell(f);
1034 fclose(f);
1035
1036 /* Return if the backing buffer is large enough. */
1037 if (written_bytes < dctx->max_log_buffer_size - 1)
1038 break;
1039
1040 /* Try again. */
1041 free(buf);
1042 dctx->max_log_buffer_size *= 2;
1043 }
1044
1045 return buf;
1046 #else
1047 /* Return an empty string. */
1048 return (char*)calloc(1, 4);
1049 #endif
1050 }
1051
1052 static void
1053 dd_pipelined_process_draw(struct dd_context *dctx, struct dd_call *call)
1054 {
1055 struct pipe_context *pipe = dctx->pipe;
1056 struct dd_draw_record *record;
1057 char *log;
1058
1059 /* Make a record of the draw call. */
1060 record = MALLOC_STRUCT(dd_draw_record);
1061 if (!record)
1062 return;
1063
1064 /* Create the log. */
1065 log = dd_get_driver_shader_log(dctx);
1066 if (!log) {
1067 FREE(record);
1068 return;
1069 }
1070
1071 /* Update the fence with the GPU.
1072 *
1073 * radeonsi/clear_buffer waits in the command processor until shaders are
1074 * idle before writing to memory. That's a necessary condition for isolating
1075 * draw calls.
1076 */
1077 dctx->sequence_no++;
1078 pipe->clear_buffer(pipe, dctx->fence, 0, 4, &dctx->sequence_no, 4);
1079
1080 /* Initialize the record. */
1081 record->timestamp = os_time_get();
1082 record->sequence_no = dctx->sequence_no;
1083 record->driver_state_log = log;
1084
1085 memset(&record->call, 0, sizeof(record->call));
1086 dd_copy_call(&record->call, call);
1087
1088 dd_init_copy_of_draw_state(&record->draw_state);
1089 dd_copy_draw_state(&record->draw_state.base, &dctx->draw_state);
1090
1091 /* Add the record to the list. */
1092 mtx_lock(&dctx->mutex);
1093 record->next = dctx->records;
1094 dctx->records = record;
1095 mtx_unlock(&dctx->mutex);
1096 }
1097
1098 static void
1099 dd_context_flush(struct pipe_context *_pipe,
1100 struct pipe_fence_handle **fence, unsigned flags)
1101 {
1102 struct dd_context *dctx = dd_context(_pipe);
1103 struct pipe_context *pipe = dctx->pipe;
1104
1105 switch (dd_screen(dctx->base.screen)->mode) {
1106 case DD_DETECT_HANGS:
1107 dd_flush_and_handle_hang(dctx, fence, flags,
1108 "GPU hang detected in pipe->flush()");
1109 break;
1110 case DD_DETECT_HANGS_PIPELINED: /* nothing to do here */
1111 case DD_DUMP_ALL_CALLS:
1112 case DD_DUMP_APITRACE_CALL:
1113 pipe->flush(pipe, fence, flags);
1114 break;
1115 default:
1116 assert(0);
1117 }
1118 }
1119
1120 static void
1121 dd_before_draw(struct dd_context *dctx)
1122 {
1123 struct dd_screen *dscreen = dd_screen(dctx->base.screen);
1124
1125 if (dscreen->mode == DD_DETECT_HANGS &&
1126 !dscreen->no_flush &&
1127 dctx->num_draw_calls >= dscreen->skip_count)
1128 dd_flush_and_handle_hang(dctx, NULL, 0,
1129 "GPU hang most likely caused by internal "
1130 "driver commands");
1131 }
1132
1133 static void
1134 dd_after_draw(struct dd_context *dctx, struct dd_call *call)
1135 {
1136 struct dd_screen *dscreen = dd_screen(dctx->base.screen);
1137 struct pipe_context *pipe = dctx->pipe;
1138
1139 if (dctx->num_draw_calls >= dscreen->skip_count) {
1140 switch (dscreen->mode) {
1141 case DD_DETECT_HANGS:
1142 if (!dscreen->no_flush &&
1143 dd_flush_and_check_hang(dctx, NULL, 0)) {
1144 dd_write_report(dctx, call,
1145 PIPE_DUMP_DEVICE_STATUS_REGISTERS |
1146 PIPE_DUMP_CURRENT_STATES |
1147 PIPE_DUMP_CURRENT_SHADERS |
1148 PIPE_DUMP_LAST_COMMAND_BUFFER,
1149 true);
1150
1151 /* Terminate the process to prevent future hangs. */
1152 dd_kill_process();
1153 }
1154 break;
1155 case DD_DETECT_HANGS_PIPELINED:
1156 dd_pipelined_process_draw(dctx, call);
1157 break;
1158 case DD_DUMP_ALL_CALLS:
1159 if (!dscreen->no_flush)
1160 pipe->flush(pipe, NULL, 0);
1161 dd_write_report(dctx, call,
1162 PIPE_DUMP_CURRENT_STATES |
1163 PIPE_DUMP_CURRENT_SHADERS |
1164 PIPE_DUMP_LAST_COMMAND_BUFFER,
1165 false);
1166 break;
1167 case DD_DUMP_APITRACE_CALL:
1168 if (dscreen->apitrace_dump_call ==
1169 dctx->draw_state.apitrace_call_number) {
1170 dd_write_report(dctx, call,
1171 PIPE_DUMP_CURRENT_STATES |
1172 PIPE_DUMP_CURRENT_SHADERS,
1173 false);
1174 /* No need to continue. */
1175 exit(0);
1176 }
1177 break;
1178 default:
1179 assert(0);
1180 }
1181 }
1182
1183 ++dctx->num_draw_calls;
1184 if (dscreen->skip_count && dctx->num_draw_calls % 10000 == 0)
1185 fprintf(stderr, "Gallium debugger reached %u draw calls.\n",
1186 dctx->num_draw_calls);
1187 }
1188
1189 static void
1190 dd_context_draw_vbo(struct pipe_context *_pipe,
1191 const struct pipe_draw_info *info)
1192 {
1193 struct dd_context *dctx = dd_context(_pipe);
1194 struct pipe_context *pipe = dctx->pipe;
1195 struct dd_call call;
1196
1197 call.type = CALL_DRAW_VBO;
1198 call.info.draw_vbo.draw = *info;
1199 if (info->indirect) {
1200 call.info.draw_vbo.indirect = *info->indirect;
1201 call.info.draw_vbo.draw.indirect = &call.info.draw_vbo.indirect;
1202 } else {
1203 memset(&call.info.draw_vbo.indirect, 0, sizeof(*info->indirect));
1204 }
1205
1206 dd_before_draw(dctx);
1207 pipe->draw_vbo(pipe, info);
1208 dd_after_draw(dctx, &call);
1209 }
1210
1211 static void
1212 dd_context_launch_grid(struct pipe_context *_pipe,
1213 const struct pipe_grid_info *info)
1214 {
1215 struct dd_context *dctx = dd_context(_pipe);
1216 struct pipe_context *pipe = dctx->pipe;
1217 struct dd_call call;
1218
1219 call.type = CALL_LAUNCH_GRID;
1220 call.info.launch_grid = *info;
1221
1222 dd_before_draw(dctx);
1223 pipe->launch_grid(pipe, info);
1224 dd_after_draw(dctx, &call);
1225 }
1226
1227 static void
1228 dd_context_resource_copy_region(struct pipe_context *_pipe,
1229 struct pipe_resource *dst, unsigned dst_level,
1230 unsigned dstx, unsigned dsty, unsigned dstz,
1231 struct pipe_resource *src, unsigned src_level,
1232 const struct pipe_box *src_box)
1233 {
1234 struct dd_context *dctx = dd_context(_pipe);
1235 struct pipe_context *pipe = dctx->pipe;
1236 struct dd_call call;
1237
1238 call.type = CALL_RESOURCE_COPY_REGION;
1239 call.info.resource_copy_region.dst = dst;
1240 call.info.resource_copy_region.dst_level = dst_level;
1241 call.info.resource_copy_region.dstx = dstx;
1242 call.info.resource_copy_region.dsty = dsty;
1243 call.info.resource_copy_region.dstz = dstz;
1244 call.info.resource_copy_region.src = src;
1245 call.info.resource_copy_region.src_level = src_level;
1246 call.info.resource_copy_region.src_box = *src_box;
1247
1248 dd_before_draw(dctx);
1249 pipe->resource_copy_region(pipe,
1250 dst, dst_level, dstx, dsty, dstz,
1251 src, src_level, src_box);
1252 dd_after_draw(dctx, &call);
1253 }
1254
1255 static void
1256 dd_context_blit(struct pipe_context *_pipe, const struct pipe_blit_info *info)
1257 {
1258 struct dd_context *dctx = dd_context(_pipe);
1259 struct pipe_context *pipe = dctx->pipe;
1260 struct dd_call call;
1261
1262 call.type = CALL_BLIT;
1263 call.info.blit = *info;
1264
1265 dd_before_draw(dctx);
1266 pipe->blit(pipe, info);
1267 dd_after_draw(dctx, &call);
1268 }
1269
1270 static boolean
1271 dd_context_generate_mipmap(struct pipe_context *_pipe,
1272 struct pipe_resource *res,
1273 enum pipe_format format,
1274 unsigned base_level,
1275 unsigned last_level,
1276 unsigned first_layer,
1277 unsigned last_layer)
1278 {
1279 struct dd_context *dctx = dd_context(_pipe);
1280 struct pipe_context *pipe = dctx->pipe;
1281 struct dd_call call;
1282 boolean result;
1283
1284 call.type = CALL_GENERATE_MIPMAP;
1285 call.info.generate_mipmap.res = res;
1286 call.info.generate_mipmap.format = format;
1287 call.info.generate_mipmap.base_level = base_level;
1288 call.info.generate_mipmap.last_level = last_level;
1289 call.info.generate_mipmap.first_layer = first_layer;
1290 call.info.generate_mipmap.last_layer = last_layer;
1291
1292 dd_before_draw(dctx);
1293 result = pipe->generate_mipmap(pipe, res, format, base_level, last_level,
1294 first_layer, last_layer);
1295 dd_after_draw(dctx, &call);
1296 return result;
1297 }
1298
1299 static void
1300 dd_context_flush_resource(struct pipe_context *_pipe,
1301 struct pipe_resource *resource)
1302 {
1303 struct dd_context *dctx = dd_context(_pipe);
1304 struct pipe_context *pipe = dctx->pipe;
1305 struct dd_call call;
1306
1307 call.type = CALL_FLUSH_RESOURCE;
1308 call.info.flush_resource = resource;
1309
1310 dd_before_draw(dctx);
1311 pipe->flush_resource(pipe, resource);
1312 dd_after_draw(dctx, &call);
1313 }
1314
1315 static void
1316 dd_context_clear(struct pipe_context *_pipe, unsigned buffers,
1317 const union pipe_color_union *color, double depth,
1318 unsigned stencil)
1319 {
1320 struct dd_context *dctx = dd_context(_pipe);
1321 struct pipe_context *pipe = dctx->pipe;
1322 struct dd_call call;
1323
1324 call.type = CALL_CLEAR;
1325 call.info.clear.buffers = buffers;
1326 call.info.clear.color = *color;
1327 call.info.clear.depth = depth;
1328 call.info.clear.stencil = stencil;
1329
1330 dd_before_draw(dctx);
1331 pipe->clear(pipe, buffers, color, depth, stencil);
1332 dd_after_draw(dctx, &call);
1333 }
1334
1335 static void
1336 dd_context_clear_render_target(struct pipe_context *_pipe,
1337 struct pipe_surface *dst,
1338 const union pipe_color_union *color,
1339 unsigned dstx, unsigned dsty,
1340 unsigned width, unsigned height,
1341 bool render_condition_enabled)
1342 {
1343 struct dd_context *dctx = dd_context(_pipe);
1344 struct pipe_context *pipe = dctx->pipe;
1345 struct dd_call call;
1346
1347 call.type = CALL_CLEAR_RENDER_TARGET;
1348
1349 dd_before_draw(dctx);
1350 pipe->clear_render_target(pipe, dst, color, dstx, dsty, width, height,
1351 render_condition_enabled);
1352 dd_after_draw(dctx, &call);
1353 }
1354
1355 static void
1356 dd_context_clear_depth_stencil(struct pipe_context *_pipe,
1357 struct pipe_surface *dst, unsigned clear_flags,
1358 double depth, unsigned stencil, unsigned dstx,
1359 unsigned dsty, unsigned width, unsigned height,
1360 bool render_condition_enabled)
1361 {
1362 struct dd_context *dctx = dd_context(_pipe);
1363 struct pipe_context *pipe = dctx->pipe;
1364 struct dd_call call;
1365
1366 call.type = CALL_CLEAR_DEPTH_STENCIL;
1367
1368 dd_before_draw(dctx);
1369 pipe->clear_depth_stencil(pipe, dst, clear_flags, depth, stencil,
1370 dstx, dsty, width, height,
1371 render_condition_enabled);
1372 dd_after_draw(dctx, &call);
1373 }
1374
1375 static void
1376 dd_context_clear_buffer(struct pipe_context *_pipe, struct pipe_resource *res,
1377 unsigned offset, unsigned size,
1378 const void *clear_value, int clear_value_size)
1379 {
1380 struct dd_context *dctx = dd_context(_pipe);
1381 struct pipe_context *pipe = dctx->pipe;
1382 struct dd_call call;
1383
1384 call.type = CALL_CLEAR_BUFFER;
1385 call.info.clear_buffer.res = res;
1386 call.info.clear_buffer.offset = offset;
1387 call.info.clear_buffer.size = size;
1388 call.info.clear_buffer.clear_value = clear_value;
1389 call.info.clear_buffer.clear_value_size = clear_value_size;
1390
1391 dd_before_draw(dctx);
1392 pipe->clear_buffer(pipe, res, offset, size, clear_value, clear_value_size);
1393 dd_after_draw(dctx, &call);
1394 }
1395
1396 static void
1397 dd_context_clear_texture(struct pipe_context *_pipe,
1398 struct pipe_resource *res,
1399 unsigned level,
1400 const struct pipe_box *box,
1401 const void *data)
1402 {
1403 struct dd_context *dctx = dd_context(_pipe);
1404 struct pipe_context *pipe = dctx->pipe;
1405 struct dd_call call;
1406
1407 call.type = CALL_CLEAR_TEXTURE;
1408
1409 dd_before_draw(dctx);
1410 pipe->clear_texture(pipe, res, level, box, data);
1411 dd_after_draw(dctx, &call);
1412 }
1413
1414 void
1415 dd_init_draw_functions(struct dd_context *dctx)
1416 {
1417 CTX_INIT(flush);
1418 CTX_INIT(draw_vbo);
1419 CTX_INIT(launch_grid);
1420 CTX_INIT(resource_copy_region);
1421 CTX_INIT(blit);
1422 CTX_INIT(clear);
1423 CTX_INIT(clear_render_target);
1424 CTX_INIT(clear_depth_stencil);
1425 CTX_INIT(clear_buffer);
1426 CTX_INIT(clear_texture);
1427 CTX_INIT(flush_resource);
1428 CTX_INIT(generate_mipmap);
1429 }