aec9332d903c1badbc50782dd7128276b491a3ac
[mesa.git] / src / gallium / drivers / ddebug / dd_draw.c
1 /**************************************************************************
2 *
3 * Copyright 2015 Advanced Micro Devices, Inc.
4 * Copyright 2008 VMware, Inc.
5 * All Rights Reserved.
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the "Software"),
9 * to deal in the Software without restriction, including without limitation
10 * on the rights to use, copy, modify, merge, publish, distribute, sub
11 * license, and/or sell copies of the Software, and to permit persons to whom
12 * the Software is furnished to do so, subject to the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the next
15 * paragraph) shall be included in all copies or substantial portions of the
16 * Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28 #include "dd_pipe.h"
29
30 #include "util/u_dump.h"
31 #include "util/u_format.h"
32 #include "util/u_framebuffer.h"
33 #include "util/u_helpers.h"
34 #include "util/u_inlines.h"
35 #include "util/u_memory.h"
36 #include "tgsi/tgsi_parse.h"
37 #include "tgsi/tgsi_scan.h"
38 #include "os/os_time.h"
39 #include <inttypes.h>
40
41
42 FILE *
43 dd_get_file_stream(struct dd_screen *dscreen, unsigned apitrace_call_number)
44 {
45 struct pipe_screen *screen = dscreen->screen;
46 char cmd_line[4096];
47
48 FILE *f = dd_get_debug_file(dscreen->verbose);
49 if (!f)
50 return NULL;
51
52 if (os_get_command_line(cmd_line, sizeof(cmd_line)))
53 fprintf(f, "Command: %s\n", cmd_line);
54 fprintf(f, "Driver vendor: %s\n", screen->get_vendor(screen));
55 fprintf(f, "Device vendor: %s\n", screen->get_device_vendor(screen));
56 fprintf(f, "Device name: %s\n\n", screen->get_name(screen));
57
58 if (apitrace_call_number)
59 fprintf(f, "Last apitrace call: %u\n\n",
60 apitrace_call_number);
61 return f;
62 }
63
64 static void
65 dd_dump_dmesg(FILE *f)
66 {
67 char line[2000];
68 FILE *p = popen("dmesg | tail -n60", "r");
69
70 if (!p)
71 return;
72
73 fprintf(f, "\nLast 60 lines of dmesg:\n\n");
74 while (fgets(line, sizeof(line), p))
75 fputs(line, f);
76
77 pclose(p);
78 }
79
80 static void
81 dd_close_file_stream(FILE *f)
82 {
83 fclose(f);
84 }
85
86 static unsigned
87 dd_num_active_viewports(struct dd_draw_state *dstate)
88 {
89 struct tgsi_shader_info info;
90 const struct tgsi_token *tokens;
91
92 if (dstate->shaders[PIPE_SHADER_GEOMETRY])
93 tokens = dstate->shaders[PIPE_SHADER_GEOMETRY]->state.shader.tokens;
94 else if (dstate->shaders[PIPE_SHADER_TESS_EVAL])
95 tokens = dstate->shaders[PIPE_SHADER_TESS_EVAL]->state.shader.tokens;
96 else if (dstate->shaders[PIPE_SHADER_VERTEX])
97 tokens = dstate->shaders[PIPE_SHADER_VERTEX]->state.shader.tokens;
98 else
99 return 1;
100
101 if (tokens) {
102 tgsi_scan_shader(tokens, &info);
103 if (info.writes_viewport_index)
104 return PIPE_MAX_VIEWPORTS;
105 }
106
107 return 1;
108 }
109
110 #define COLOR_RESET "\033[0m"
111 #define COLOR_SHADER "\033[1;32m"
112 #define COLOR_STATE "\033[1;33m"
113
114 #define DUMP(name, var) do { \
115 fprintf(f, COLOR_STATE #name ": " COLOR_RESET); \
116 util_dump_##name(f, var); \
117 fprintf(f, "\n"); \
118 } while(0)
119
120 #define DUMP_I(name, var, i) do { \
121 fprintf(f, COLOR_STATE #name " %i: " COLOR_RESET, i); \
122 util_dump_##name(f, var); \
123 fprintf(f, "\n"); \
124 } while(0)
125
126 #define DUMP_M(name, var, member) do { \
127 fprintf(f, " " #member ": "); \
128 util_dump_##name(f, (var)->member); \
129 fprintf(f, "\n"); \
130 } while(0)
131
132 #define DUMP_M_ADDR(name, var, member) do { \
133 fprintf(f, " " #member ": "); \
134 util_dump_##name(f, &(var)->member); \
135 fprintf(f, "\n"); \
136 } while(0)
137
138 static void
139 print_named_value(FILE *f, const char *name, int value)
140 {
141 fprintf(f, COLOR_STATE "%s" COLOR_RESET " = %i\n", name, value);
142 }
143
144 static void
145 print_named_xvalue(FILE *f, const char *name, int value)
146 {
147 fprintf(f, COLOR_STATE "%s" COLOR_RESET " = 0x%08x\n", name, value);
148 }
149
150 static void
151 util_dump_uint(FILE *f, unsigned i)
152 {
153 fprintf(f, "%u", i);
154 }
155
156 static void
157 util_dump_int(FILE *f, int i)
158 {
159 fprintf(f, "%d", i);
160 }
161
162 static void
163 util_dump_hex(FILE *f, unsigned i)
164 {
165 fprintf(f, "0x%x", i);
166 }
167
168 static void
169 util_dump_double(FILE *f, double d)
170 {
171 fprintf(f, "%f", d);
172 }
173
174 static void
175 util_dump_format(FILE *f, enum pipe_format format)
176 {
177 fprintf(f, "%s", util_format_name(format));
178 }
179
180 static void
181 util_dump_color_union(FILE *f, const union pipe_color_union *color)
182 {
183 fprintf(f, "{f = {%f, %f, %f, %f}, ui = {%u, %u, %u, %u}",
184 color->f[0], color->f[1], color->f[2], color->f[3],
185 color->ui[0], color->ui[1], color->ui[2], color->ui[3]);
186 }
187
188 static void
189 dd_dump_render_condition(struct dd_draw_state *dstate, FILE *f)
190 {
191 if (dstate->render_cond.query) {
192 fprintf(f, "render condition:\n");
193 DUMP_M(query_type, &dstate->render_cond, query->type);
194 DUMP_M(uint, &dstate->render_cond, condition);
195 DUMP_M(uint, &dstate->render_cond, mode);
196 fprintf(f, "\n");
197 }
198 }
199
200 static void
201 dd_dump_shader(struct dd_draw_state *dstate, enum pipe_shader_type sh, FILE *f)
202 {
203 int i;
204 const char *shader_str[PIPE_SHADER_TYPES];
205
206 shader_str[PIPE_SHADER_VERTEX] = "VERTEX";
207 shader_str[PIPE_SHADER_TESS_CTRL] = "TESS_CTRL";
208 shader_str[PIPE_SHADER_TESS_EVAL] = "TESS_EVAL";
209 shader_str[PIPE_SHADER_GEOMETRY] = "GEOMETRY";
210 shader_str[PIPE_SHADER_FRAGMENT] = "FRAGMENT";
211 shader_str[PIPE_SHADER_COMPUTE] = "COMPUTE";
212
213 if (sh == PIPE_SHADER_TESS_CTRL &&
214 !dstate->shaders[PIPE_SHADER_TESS_CTRL] &&
215 dstate->shaders[PIPE_SHADER_TESS_EVAL])
216 fprintf(f, "tess_state: {default_outer_level = {%f, %f, %f, %f}, "
217 "default_inner_level = {%f, %f}}\n",
218 dstate->tess_default_levels[0],
219 dstate->tess_default_levels[1],
220 dstate->tess_default_levels[2],
221 dstate->tess_default_levels[3],
222 dstate->tess_default_levels[4],
223 dstate->tess_default_levels[5]);
224
225 if (sh == PIPE_SHADER_FRAGMENT)
226 if (dstate->rs) {
227 unsigned num_viewports = dd_num_active_viewports(dstate);
228
229 if (dstate->rs->state.rs.clip_plane_enable)
230 DUMP(clip_state, &dstate->clip_state);
231
232 for (i = 0; i < num_viewports; i++)
233 DUMP_I(viewport_state, &dstate->viewports[i], i);
234
235 if (dstate->rs->state.rs.scissor)
236 for (i = 0; i < num_viewports; i++)
237 DUMP_I(scissor_state, &dstate->scissors[i], i);
238
239 DUMP(rasterizer_state, &dstate->rs->state.rs);
240
241 if (dstate->rs->state.rs.poly_stipple_enable)
242 DUMP(poly_stipple, &dstate->polygon_stipple);
243 fprintf(f, "\n");
244 }
245
246 if (!dstate->shaders[sh])
247 return;
248
249 fprintf(f, COLOR_SHADER "begin shader: %s" COLOR_RESET "\n", shader_str[sh]);
250 DUMP(shader_state, &dstate->shaders[sh]->state.shader);
251
252 for (i = 0; i < PIPE_MAX_CONSTANT_BUFFERS; i++)
253 if (dstate->constant_buffers[sh][i].buffer ||
254 dstate->constant_buffers[sh][i].user_buffer) {
255 DUMP_I(constant_buffer, &dstate->constant_buffers[sh][i], i);
256 if (dstate->constant_buffers[sh][i].buffer)
257 DUMP_M(resource, &dstate->constant_buffers[sh][i], buffer);
258 }
259
260 for (i = 0; i < PIPE_MAX_SAMPLERS; i++)
261 if (dstate->sampler_states[sh][i])
262 DUMP_I(sampler_state, &dstate->sampler_states[sh][i]->state.sampler, i);
263
264 for (i = 0; i < PIPE_MAX_SAMPLERS; i++)
265 if (dstate->sampler_views[sh][i]) {
266 DUMP_I(sampler_view, dstate->sampler_views[sh][i], i);
267 DUMP_M(resource, dstate->sampler_views[sh][i], texture);
268 }
269
270 for (i = 0; i < PIPE_MAX_SHADER_IMAGES; i++)
271 if (dstate->shader_images[sh][i].resource) {
272 DUMP_I(image_view, &dstate->shader_images[sh][i], i);
273 if (dstate->shader_images[sh][i].resource)
274 DUMP_M(resource, &dstate->shader_images[sh][i], resource);
275 }
276
277 for (i = 0; i < PIPE_MAX_SHADER_BUFFERS; i++)
278 if (dstate->shader_buffers[sh][i].buffer) {
279 DUMP_I(shader_buffer, &dstate->shader_buffers[sh][i], i);
280 if (dstate->shader_buffers[sh][i].buffer)
281 DUMP_M(resource, &dstate->shader_buffers[sh][i], buffer);
282 }
283
284 fprintf(f, COLOR_SHADER "end shader: %s" COLOR_RESET "\n\n", shader_str[sh]);
285 }
286
287 static void
288 dd_dump_draw_vbo(struct dd_draw_state *dstate, struct pipe_draw_info *info, FILE *f)
289 {
290 int sh, i;
291
292 DUMP(draw_info, info);
293 if (info->count_from_stream_output)
294 DUMP_M(stream_output_target, info,
295 count_from_stream_output);
296 if (info->indirect) {
297 DUMP_M(resource, info, indirect->buffer);
298 if (info->indirect->indirect_draw_count)
299 DUMP_M(resource, info, indirect->indirect_draw_count);
300 }
301
302 fprintf(f, "\n");
303
304 /* TODO: dump active queries */
305
306 dd_dump_render_condition(dstate, f);
307
308 for (i = 0; i < PIPE_MAX_ATTRIBS; i++)
309 if (dstate->vertex_buffers[i].buffer.resource) {
310 DUMP_I(vertex_buffer, &dstate->vertex_buffers[i], i);
311 if (!dstate->vertex_buffers[i].is_user_buffer)
312 DUMP_M(resource, &dstate->vertex_buffers[i], buffer.resource);
313 }
314
315 if (dstate->velems) {
316 print_named_value(f, "num vertex elements",
317 dstate->velems->state.velems.count);
318 for (i = 0; i < dstate->velems->state.velems.count; i++) {
319 fprintf(f, " ");
320 DUMP_I(vertex_element, &dstate->velems->state.velems.velems[i], i);
321 }
322 }
323
324 print_named_value(f, "num stream output targets", dstate->num_so_targets);
325 for (i = 0; i < dstate->num_so_targets; i++)
326 if (dstate->so_targets[i]) {
327 DUMP_I(stream_output_target, dstate->so_targets[i], i);
328 DUMP_M(resource, dstate->so_targets[i], buffer);
329 fprintf(f, " offset = %i\n", dstate->so_offsets[i]);
330 }
331
332 fprintf(f, "\n");
333 for (sh = 0; sh < PIPE_SHADER_TYPES; sh++) {
334 if (sh == PIPE_SHADER_COMPUTE)
335 continue;
336
337 dd_dump_shader(dstate, sh, f);
338 }
339
340 if (dstate->dsa)
341 DUMP(depth_stencil_alpha_state, &dstate->dsa->state.dsa);
342 DUMP(stencil_ref, &dstate->stencil_ref);
343
344 if (dstate->blend)
345 DUMP(blend_state, &dstate->blend->state.blend);
346 DUMP(blend_color, &dstate->blend_color);
347
348 print_named_value(f, "min_samples", dstate->min_samples);
349 print_named_xvalue(f, "sample_mask", dstate->sample_mask);
350 fprintf(f, "\n");
351
352 DUMP(framebuffer_state, &dstate->framebuffer_state);
353 for (i = 0; i < dstate->framebuffer_state.nr_cbufs; i++)
354 if (dstate->framebuffer_state.cbufs[i]) {
355 fprintf(f, " " COLOR_STATE "cbufs[%i]:" COLOR_RESET "\n ", i);
356 DUMP(surface, dstate->framebuffer_state.cbufs[i]);
357 fprintf(f, " ");
358 DUMP(resource, dstate->framebuffer_state.cbufs[i]->texture);
359 }
360 if (dstate->framebuffer_state.zsbuf) {
361 fprintf(f, " " COLOR_STATE "zsbuf:" COLOR_RESET "\n ");
362 DUMP(surface, dstate->framebuffer_state.zsbuf);
363 fprintf(f, " ");
364 DUMP(resource, dstate->framebuffer_state.zsbuf->texture);
365 }
366 fprintf(f, "\n");
367 }
368
369 static void
370 dd_dump_launch_grid(struct dd_draw_state *dstate, struct pipe_grid_info *info, FILE *f)
371 {
372 fprintf(f, "%s:\n", __func__+8);
373 DUMP(grid_info, info);
374 fprintf(f, "\n");
375
376 dd_dump_shader(dstate, PIPE_SHADER_COMPUTE, f);
377 fprintf(f, "\n");
378 }
379
380 static void
381 dd_dump_resource_copy_region(struct dd_draw_state *dstate,
382 struct call_resource_copy_region *info,
383 FILE *f)
384 {
385 fprintf(f, "%s:\n", __func__+8);
386 DUMP_M(resource, info, dst);
387 DUMP_M(uint, info, dst_level);
388 DUMP_M(uint, info, dstx);
389 DUMP_M(uint, info, dsty);
390 DUMP_M(uint, info, dstz);
391 DUMP_M(resource, info, src);
392 DUMP_M(uint, info, src_level);
393 DUMP_M_ADDR(box, info, src_box);
394 }
395
396 static void
397 dd_dump_blit(struct dd_draw_state *dstate, struct pipe_blit_info *info, FILE *f)
398 {
399 fprintf(f, "%s:\n", __func__+8);
400 DUMP_M(resource, info, dst.resource);
401 DUMP_M(uint, info, dst.level);
402 DUMP_M_ADDR(box, info, dst.box);
403 DUMP_M(format, info, dst.format);
404
405 DUMP_M(resource, info, src.resource);
406 DUMP_M(uint, info, src.level);
407 DUMP_M_ADDR(box, info, src.box);
408 DUMP_M(format, info, src.format);
409
410 DUMP_M(hex, info, mask);
411 DUMP_M(uint, info, filter);
412 DUMP_M(uint, info, scissor_enable);
413 DUMP_M_ADDR(scissor_state, info, scissor);
414 DUMP_M(uint, info, render_condition_enable);
415
416 if (info->render_condition_enable)
417 dd_dump_render_condition(dstate, f);
418 }
419
420 static void
421 dd_dump_generate_mipmap(struct dd_draw_state *dstate, FILE *f)
422 {
423 fprintf(f, "%s:\n", __func__+8);
424 /* TODO */
425 }
426
427 static void
428 dd_dump_get_query_result_resource(struct call_get_query_result_resource *info, FILE *f)
429 {
430 fprintf(f, "%s:\n", __func__ + 8);
431 DUMP_M(query_type, info, query_type);
432 DUMP_M(uint, info, wait);
433 DUMP_M(query_value_type, info, result_type);
434 DUMP_M(int, info, index);
435 DUMP_M(resource, info, resource);
436 DUMP_M(uint, info, offset);
437 }
438
439 static void
440 dd_dump_flush_resource(struct dd_draw_state *dstate, struct pipe_resource *res,
441 FILE *f)
442 {
443 fprintf(f, "%s:\n", __func__+8);
444 DUMP(resource, res);
445 }
446
447 static void
448 dd_dump_clear(struct dd_draw_state *dstate, struct call_clear *info, FILE *f)
449 {
450 fprintf(f, "%s:\n", __func__+8);
451 DUMP_M(uint, info, buffers);
452 DUMP_M_ADDR(color_union, info, color);
453 DUMP_M(double, info, depth);
454 DUMP_M(hex, info, stencil);
455 }
456
457 static void
458 dd_dump_clear_buffer(struct dd_draw_state *dstate, struct call_clear_buffer *info,
459 FILE *f)
460 {
461 int i;
462 const char *value = (const char*)info->clear_value;
463
464 fprintf(f, "%s:\n", __func__+8);
465 DUMP_M(resource, info, res);
466 DUMP_M(uint, info, offset);
467 DUMP_M(uint, info, size);
468 DUMP_M(uint, info, clear_value_size);
469
470 fprintf(f, " clear_value:");
471 for (i = 0; i < info->clear_value_size; i++)
472 fprintf(f, " %02x", value[i]);
473 fprintf(f, "\n");
474 }
475
476 static void
477 dd_dump_clear_texture(struct dd_draw_state *dstate, FILE *f)
478 {
479 fprintf(f, "%s:\n", __func__+8);
480 /* TODO */
481 }
482
483 static void
484 dd_dump_clear_render_target(struct dd_draw_state *dstate, FILE *f)
485 {
486 fprintf(f, "%s:\n", __func__+8);
487 /* TODO */
488 }
489
490 static void
491 dd_dump_clear_depth_stencil(struct dd_draw_state *dstate, FILE *f)
492 {
493 fprintf(f, "%s:\n", __func__+8);
494 /* TODO */
495 }
496
497 static void
498 dd_dump_driver_state(struct dd_context *dctx, FILE *f, unsigned flags)
499 {
500 if (dctx->pipe->dump_debug_state) {
501 fprintf(f,"\n\n**************************************************"
502 "***************************\n");
503 fprintf(f, "Driver-specific state:\n\n");
504 dctx->pipe->dump_debug_state(dctx->pipe, f, flags);
505 }
506 }
507
508 static void
509 dd_dump_call(FILE *f, struct dd_draw_state *state, struct dd_call *call)
510 {
511 switch (call->type) {
512 case CALL_DRAW_VBO:
513 dd_dump_draw_vbo(state, &call->info.draw_vbo.draw, f);
514 break;
515 case CALL_LAUNCH_GRID:
516 dd_dump_launch_grid(state, &call->info.launch_grid, f);
517 break;
518 case CALL_RESOURCE_COPY_REGION:
519 dd_dump_resource_copy_region(state,
520 &call->info.resource_copy_region, f);
521 break;
522 case CALL_BLIT:
523 dd_dump_blit(state, &call->info.blit, f);
524 break;
525 case CALL_FLUSH_RESOURCE:
526 dd_dump_flush_resource(state, call->info.flush_resource, f);
527 break;
528 case CALL_CLEAR:
529 dd_dump_clear(state, &call->info.clear, f);
530 break;
531 case CALL_CLEAR_BUFFER:
532 dd_dump_clear_buffer(state, &call->info.clear_buffer, f);
533 break;
534 case CALL_CLEAR_TEXTURE:
535 dd_dump_clear_texture(state, f);
536 break;
537 case CALL_CLEAR_RENDER_TARGET:
538 dd_dump_clear_render_target(state, f);
539 break;
540 case CALL_CLEAR_DEPTH_STENCIL:
541 dd_dump_clear_depth_stencil(state, f);
542 break;
543 case CALL_GENERATE_MIPMAP:
544 dd_dump_generate_mipmap(state, f);
545 break;
546 case CALL_GET_QUERY_RESULT_RESOURCE:
547 dd_dump_get_query_result_resource(&call->info.get_query_result_resource, f);
548 break;
549 }
550 }
551
552 static void
553 dd_write_report(struct dd_context *dctx, struct dd_call *call, unsigned flags,
554 bool dump_dmesg)
555 {
556 FILE *f = dd_get_file_stream(dd_screen(dctx->base.screen),
557 dctx->draw_state.apitrace_call_number);
558
559 if (!f)
560 return;
561
562 dd_dump_call(f, &dctx->draw_state, call);
563 dd_dump_driver_state(dctx, f, flags);
564
565 fprintf(f,"\n\n**************************************************"
566 "***************************\n");
567 fprintf(f, "Context Log:\n\n");
568 u_log_new_page_print(&dctx->log, f);
569
570 if (dump_dmesg)
571 dd_dump_dmesg(f);
572 dd_close_file_stream(f);
573 }
574
575 static void
576 dd_kill_process(void)
577 {
578 sync();
579 fprintf(stderr, "dd: Aborting the process...\n");
580 fflush(stdout);
581 fflush(stderr);
582 exit(1);
583 }
584
585 static bool
586 dd_flush_and_check_hang(struct dd_context *dctx,
587 struct pipe_fence_handle **flush_fence,
588 unsigned flush_flags)
589 {
590 struct pipe_fence_handle *fence = NULL;
591 struct pipe_context *pipe = dctx->pipe;
592 struct pipe_screen *screen = pipe->screen;
593 uint64_t timeout_ms = dd_screen(dctx->base.screen)->timeout_ms;
594 bool idle;
595
596 assert(timeout_ms > 0);
597
598 pipe->flush(pipe, &fence, flush_flags);
599 if (flush_fence)
600 screen->fence_reference(screen, flush_fence, fence);
601 if (!fence)
602 return false;
603
604 idle = screen->fence_finish(screen, pipe, fence, timeout_ms * 1000000);
605 screen->fence_reference(screen, &fence, NULL);
606 if (!idle)
607 fprintf(stderr, "dd: GPU hang detected!\n");
608 return !idle;
609 }
610
611 static void
612 dd_flush_and_handle_hang(struct dd_context *dctx,
613 struct pipe_fence_handle **fence, unsigned flags,
614 const char *cause)
615 {
616 if (dd_flush_and_check_hang(dctx, fence, flags)) {
617 FILE *f = dd_get_file_stream(dd_screen(dctx->base.screen),
618 dctx->draw_state.apitrace_call_number);
619
620 if (f) {
621 fprintf(f, "dd: %s.\n", cause);
622 dd_dump_driver_state(dctx, f,
623 PIPE_DUMP_DEVICE_STATUS_REGISTERS);
624 dd_dump_dmesg(f);
625 dd_close_file_stream(f);
626 }
627
628 /* Terminate the process to prevent future hangs. */
629 dd_kill_process();
630 }
631 }
632
633 static void
634 dd_unreference_copy_of_call(struct dd_call *dst)
635 {
636 switch (dst->type) {
637 case CALL_DRAW_VBO:
638 pipe_so_target_reference(&dst->info.draw_vbo.draw.count_from_stream_output, NULL);
639 pipe_resource_reference(&dst->info.draw_vbo.indirect.buffer, NULL);
640 pipe_resource_reference(&dst->info.draw_vbo.indirect.indirect_draw_count, NULL);
641 if (dst->info.draw_vbo.draw.index_size &&
642 !dst->info.draw_vbo.draw.has_user_indices)
643 pipe_resource_reference(&dst->info.draw_vbo.draw.index.resource, NULL);
644 else
645 dst->info.draw_vbo.draw.index.user = NULL;
646 break;
647 case CALL_LAUNCH_GRID:
648 pipe_resource_reference(&dst->info.launch_grid.indirect, NULL);
649 break;
650 case CALL_RESOURCE_COPY_REGION:
651 pipe_resource_reference(&dst->info.resource_copy_region.dst, NULL);
652 pipe_resource_reference(&dst->info.resource_copy_region.src, NULL);
653 break;
654 case CALL_BLIT:
655 pipe_resource_reference(&dst->info.blit.dst.resource, NULL);
656 pipe_resource_reference(&dst->info.blit.src.resource, NULL);
657 break;
658 case CALL_FLUSH_RESOURCE:
659 pipe_resource_reference(&dst->info.flush_resource, NULL);
660 break;
661 case CALL_CLEAR:
662 break;
663 case CALL_CLEAR_BUFFER:
664 pipe_resource_reference(&dst->info.clear_buffer.res, NULL);
665 break;
666 case CALL_CLEAR_TEXTURE:
667 break;
668 case CALL_CLEAR_RENDER_TARGET:
669 break;
670 case CALL_CLEAR_DEPTH_STENCIL:
671 break;
672 case CALL_GENERATE_MIPMAP:
673 pipe_resource_reference(&dst->info.generate_mipmap.res, NULL);
674 break;
675 case CALL_GET_QUERY_RESULT_RESOURCE:
676 pipe_resource_reference(&dst->info.get_query_result_resource.resource, NULL);
677 break;
678 }
679 }
680
681 static void
682 dd_copy_call(struct dd_call *dst, struct dd_call *src)
683 {
684 dst->type = src->type;
685
686 switch (src->type) {
687 case CALL_DRAW_VBO:
688 pipe_so_target_reference(&dst->info.draw_vbo.draw.count_from_stream_output,
689 src->info.draw_vbo.draw.count_from_stream_output);
690 pipe_resource_reference(&dst->info.draw_vbo.indirect.buffer,
691 src->info.draw_vbo.indirect.buffer);
692 pipe_resource_reference(&dst->info.draw_vbo.indirect.indirect_draw_count,
693 src->info.draw_vbo.indirect.indirect_draw_count);
694
695 if (dst->info.draw_vbo.draw.index_size &&
696 !dst->info.draw_vbo.draw.has_user_indices)
697 pipe_resource_reference(&dst->info.draw_vbo.draw.index.resource, NULL);
698 else
699 dst->info.draw_vbo.draw.index.user = NULL;
700
701 if (src->info.draw_vbo.draw.index_size &&
702 !src->info.draw_vbo.draw.has_user_indices) {
703 pipe_resource_reference(&dst->info.draw_vbo.draw.index.resource,
704 src->info.draw_vbo.draw.index.resource);
705 }
706
707 dst->info.draw_vbo = src->info.draw_vbo;
708 if (!src->info.draw_vbo.draw.indirect)
709 dst->info.draw_vbo.draw.indirect = NULL;
710 else
711 dst->info.draw_vbo.draw.indirect = &dst->info.draw_vbo.indirect;
712 break;
713 case CALL_LAUNCH_GRID:
714 pipe_resource_reference(&dst->info.launch_grid.indirect,
715 src->info.launch_grid.indirect);
716 dst->info.launch_grid = src->info.launch_grid;
717 break;
718 case CALL_RESOURCE_COPY_REGION:
719 pipe_resource_reference(&dst->info.resource_copy_region.dst,
720 src->info.resource_copy_region.dst);
721 pipe_resource_reference(&dst->info.resource_copy_region.src,
722 src->info.resource_copy_region.src);
723 dst->info.resource_copy_region = src->info.resource_copy_region;
724 break;
725 case CALL_BLIT:
726 pipe_resource_reference(&dst->info.blit.dst.resource,
727 src->info.blit.dst.resource);
728 pipe_resource_reference(&dst->info.blit.src.resource,
729 src->info.blit.src.resource);
730 dst->info.blit = src->info.blit;
731 break;
732 case CALL_FLUSH_RESOURCE:
733 pipe_resource_reference(&dst->info.flush_resource,
734 src->info.flush_resource);
735 break;
736 case CALL_CLEAR:
737 dst->info.clear = src->info.clear;
738 break;
739 case CALL_CLEAR_BUFFER:
740 pipe_resource_reference(&dst->info.clear_buffer.res,
741 src->info.clear_buffer.res);
742 dst->info.clear_buffer = src->info.clear_buffer;
743 break;
744 case CALL_CLEAR_TEXTURE:
745 break;
746 case CALL_CLEAR_RENDER_TARGET:
747 break;
748 case CALL_CLEAR_DEPTH_STENCIL:
749 break;
750 case CALL_GENERATE_MIPMAP:
751 pipe_resource_reference(&dst->info.generate_mipmap.res,
752 src->info.generate_mipmap.res);
753 dst->info.generate_mipmap = src->info.generate_mipmap;
754 break;
755 case CALL_GET_QUERY_RESULT_RESOURCE:
756 pipe_resource_reference(&dst->info.get_query_result_resource.resource,
757 src->info.get_query_result_resource.resource);
758 dst->info.get_query_result_resource = src->info.get_query_result_resource;
759 dst->info.get_query_result_resource.query = NULL;
760 break;
761 }
762 }
763
764 static void
765 dd_init_copy_of_draw_state(struct dd_draw_state_copy *state)
766 {
767 unsigned i,j;
768
769 /* Just clear pointers to gallium objects. Don't clear the whole structure,
770 * because it would kill performance with its size of 130 KB.
771 */
772 memset(state->base.vertex_buffers, 0,
773 sizeof(state->base.vertex_buffers));
774 memset(state->base.so_targets, 0,
775 sizeof(state->base.so_targets));
776 memset(state->base.constant_buffers, 0,
777 sizeof(state->base.constant_buffers));
778 memset(state->base.sampler_views, 0,
779 sizeof(state->base.sampler_views));
780 memset(state->base.shader_images, 0,
781 sizeof(state->base.shader_images));
782 memset(state->base.shader_buffers, 0,
783 sizeof(state->base.shader_buffers));
784 memset(&state->base.framebuffer_state, 0,
785 sizeof(state->base.framebuffer_state));
786
787 memset(state->shaders, 0, sizeof(state->shaders));
788
789 state->base.render_cond.query = &state->render_cond;
790
791 for (i = 0; i < PIPE_SHADER_TYPES; i++) {
792 state->base.shaders[i] = &state->shaders[i];
793 for (j = 0; j < PIPE_MAX_SAMPLERS; j++)
794 state->base.sampler_states[i][j] = &state->sampler_states[i][j];
795 }
796
797 state->base.velems = &state->velems;
798 state->base.rs = &state->rs;
799 state->base.dsa = &state->dsa;
800 state->base.blend = &state->blend;
801 }
802
803 static void
804 dd_unreference_copy_of_draw_state(struct dd_draw_state_copy *state)
805 {
806 struct dd_draw_state *dst = &state->base;
807 unsigned i,j;
808
809 for (i = 0; i < ARRAY_SIZE(dst->vertex_buffers); i++)
810 pipe_vertex_buffer_unreference(&dst->vertex_buffers[i]);
811 for (i = 0; i < ARRAY_SIZE(dst->so_targets); i++)
812 pipe_so_target_reference(&dst->so_targets[i], NULL);
813
814 for (i = 0; i < PIPE_SHADER_TYPES; i++) {
815 if (dst->shaders[i])
816 tgsi_free_tokens(dst->shaders[i]->state.shader.tokens);
817
818 for (j = 0; j < PIPE_MAX_CONSTANT_BUFFERS; j++)
819 pipe_resource_reference(&dst->constant_buffers[i][j].buffer, NULL);
820 for (j = 0; j < PIPE_MAX_SAMPLERS; j++)
821 pipe_sampler_view_reference(&dst->sampler_views[i][j], NULL);
822 for (j = 0; j < PIPE_MAX_SHADER_IMAGES; j++)
823 pipe_resource_reference(&dst->shader_images[i][j].resource, NULL);
824 for (j = 0; j < PIPE_MAX_SHADER_BUFFERS; j++)
825 pipe_resource_reference(&dst->shader_buffers[i][j].buffer, NULL);
826 }
827
828 util_unreference_framebuffer_state(&dst->framebuffer_state);
829 }
830
831 static void
832 dd_copy_draw_state(struct dd_draw_state *dst, struct dd_draw_state *src)
833 {
834 unsigned i,j;
835
836 if (src->render_cond.query) {
837 *dst->render_cond.query = *src->render_cond.query;
838 dst->render_cond.condition = src->render_cond.condition;
839 dst->render_cond.mode = src->render_cond.mode;
840 } else {
841 dst->render_cond.query = NULL;
842 }
843
844 for (i = 0; i < ARRAY_SIZE(src->vertex_buffers); i++) {
845 pipe_vertex_buffer_reference(&dst->vertex_buffers[i],
846 &src->vertex_buffers[i]);
847 }
848
849 dst->num_so_targets = src->num_so_targets;
850 for (i = 0; i < ARRAY_SIZE(src->so_targets); i++)
851 pipe_so_target_reference(&dst->so_targets[i], src->so_targets[i]);
852 memcpy(dst->so_offsets, src->so_offsets, sizeof(src->so_offsets));
853
854 for (i = 0; i < PIPE_SHADER_TYPES; i++) {
855 if (!src->shaders[i]) {
856 dst->shaders[i] = NULL;
857 continue;
858 }
859
860 if (src->shaders[i]) {
861 dst->shaders[i]->state.shader = src->shaders[i]->state.shader;
862 if (src->shaders[i]->state.shader.tokens) {
863 dst->shaders[i]->state.shader.tokens =
864 tgsi_dup_tokens(src->shaders[i]->state.shader.tokens);
865 } else {
866 dst->shaders[i]->state.shader.ir.nir = NULL;
867 }
868 } else {
869 dst->shaders[i] = NULL;
870 }
871
872 for (j = 0; j < PIPE_MAX_CONSTANT_BUFFERS; j++) {
873 pipe_resource_reference(&dst->constant_buffers[i][j].buffer,
874 src->constant_buffers[i][j].buffer);
875 memcpy(&dst->constant_buffers[i][j], &src->constant_buffers[i][j],
876 sizeof(src->constant_buffers[i][j]));
877 }
878
879 for (j = 0; j < PIPE_MAX_SAMPLERS; j++) {
880 pipe_sampler_view_reference(&dst->sampler_views[i][j],
881 src->sampler_views[i][j]);
882 if (src->sampler_states[i][j])
883 dst->sampler_states[i][j]->state.sampler =
884 src->sampler_states[i][j]->state.sampler;
885 else
886 dst->sampler_states[i][j] = NULL;
887 }
888
889 for (j = 0; j < PIPE_MAX_SHADER_IMAGES; j++) {
890 pipe_resource_reference(&dst->shader_images[i][j].resource,
891 src->shader_images[i][j].resource);
892 memcpy(&dst->shader_images[i][j], &src->shader_images[i][j],
893 sizeof(src->shader_images[i][j]));
894 }
895
896 for (j = 0; j < PIPE_MAX_SHADER_BUFFERS; j++) {
897 pipe_resource_reference(&dst->shader_buffers[i][j].buffer,
898 src->shader_buffers[i][j].buffer);
899 memcpy(&dst->shader_buffers[i][j], &src->shader_buffers[i][j],
900 sizeof(src->shader_buffers[i][j]));
901 }
902 }
903
904 if (src->velems)
905 dst->velems->state.velems = src->velems->state.velems;
906 else
907 dst->velems = NULL;
908
909 if (src->rs)
910 dst->rs->state.rs = src->rs->state.rs;
911 else
912 dst->rs = NULL;
913
914 if (src->dsa)
915 dst->dsa->state.dsa = src->dsa->state.dsa;
916 else
917 dst->dsa = NULL;
918
919 if (src->blend)
920 dst->blend->state.blend = src->blend->state.blend;
921 else
922 dst->blend = NULL;
923
924 dst->blend_color = src->blend_color;
925 dst->stencil_ref = src->stencil_ref;
926 dst->sample_mask = src->sample_mask;
927 dst->min_samples = src->min_samples;
928 dst->clip_state = src->clip_state;
929 util_copy_framebuffer_state(&dst->framebuffer_state, &src->framebuffer_state);
930 memcpy(dst->scissors, src->scissors, sizeof(src->scissors));
931 memcpy(dst->viewports, src->viewports, sizeof(src->viewports));
932 memcpy(dst->tess_default_levels, src->tess_default_levels,
933 sizeof(src->tess_default_levels));
934 dst->apitrace_call_number = src->apitrace_call_number;
935 }
936
937 static void
938 dd_free_record(struct dd_draw_record **record)
939 {
940 struct dd_draw_record *next = (*record)->next;
941
942 u_log_page_destroy((*record)->log_page);
943 dd_unreference_copy_of_call(&(*record)->call);
944 dd_unreference_copy_of_draw_state(&(*record)->draw_state);
945 FREE(*record);
946 *record = next;
947 }
948
949 static void
950 dd_dump_record(struct dd_context *dctx, struct dd_draw_record *record,
951 uint32_t hw_sequence_no, int64_t now)
952 {
953 FILE *f = dd_get_file_stream(dd_screen(dctx->base.screen),
954 record->draw_state.base.apitrace_call_number);
955 if (!f)
956 return;
957
958 fprintf(f, "Draw call sequence # = %u\n", record->sequence_no);
959 fprintf(f, "HW reached sequence # = %u\n", hw_sequence_no);
960 fprintf(f, "Elapsed time = %"PRIi64" ms\n\n",
961 (now - record->timestamp) / 1000);
962
963 dd_dump_call(f, &record->draw_state.base, &record->call);
964
965 fprintf(f,"\n\n**************************************************"
966 "***************************\n");
967 fprintf(f, "Context Log:\n\n");
968 u_log_page_print(record->log_page, f);
969
970 dctx->pipe->dump_debug_state(dctx->pipe, f,
971 PIPE_DUMP_DEVICE_STATUS_REGISTERS);
972 dd_dump_dmesg(f);
973 fclose(f);
974 }
975
976 int
977 dd_thread_pipelined_hang_detect(void *input)
978 {
979 struct dd_context *dctx = (struct dd_context *)input;
980 struct dd_screen *dscreen = dd_screen(dctx->base.screen);
981
982 mtx_lock(&dctx->mutex);
983
984 while (!dctx->kill_thread) {
985 struct dd_draw_record **record = &dctx->records;
986
987 /* Loop over all records. */
988 while (*record) {
989 int64_t now;
990
991 /* If the fence has been signalled, release the record and all older
992 * records.
993 */
994 if (*dctx->mapped_fence >= (*record)->sequence_no) {
995 while (*record)
996 dd_free_record(record);
997 break;
998 }
999
1000 /* The fence hasn't been signalled. Check the timeout. */
1001 now = os_time_get();
1002 if (os_time_timeout((*record)->timestamp,
1003 (*record)->timestamp + dscreen->timeout_ms * 1000,
1004 now)) {
1005 fprintf(stderr, "GPU hang detected.\n");
1006
1007 /* Get the oldest unsignalled draw call. */
1008 while ((*record)->next &&
1009 *dctx->mapped_fence < (*record)->next->sequence_no)
1010 record = &(*record)->next;
1011
1012 dd_dump_record(dctx, *record, *dctx->mapped_fence, now);
1013 dd_kill_process();
1014 }
1015
1016 record = &(*record)->next;
1017 }
1018
1019 /* Unlock and sleep before starting all over again. */
1020 mtx_unlock(&dctx->mutex);
1021 os_time_sleep(10000); /* 10 ms */
1022 mtx_lock(&dctx->mutex);
1023 }
1024
1025 /* Thread termination. */
1026 while (dctx->records)
1027 dd_free_record(&dctx->records);
1028
1029 mtx_unlock(&dctx->mutex);
1030 return 0;
1031 }
1032
1033 static void
1034 dd_pipelined_process_draw(struct dd_context *dctx, struct dd_call *call)
1035 {
1036 struct pipe_context *pipe = dctx->pipe;
1037 struct dd_draw_record *record;
1038
1039 /* Make a record of the draw call. */
1040 record = MALLOC_STRUCT(dd_draw_record);
1041 if (!record)
1042 return;
1043
1044 /* Update the fence with the GPU.
1045 *
1046 * radeonsi/clear_buffer waits in the command processor until shaders are
1047 * idle before writing to memory. That's a necessary condition for isolating
1048 * draw calls.
1049 */
1050 dctx->sequence_no++;
1051 pipe->clear_buffer(pipe, dctx->fence, 0, 4, &dctx->sequence_no, 4);
1052
1053 /* Initialize the record. */
1054 record->timestamp = os_time_get();
1055 record->sequence_no = dctx->sequence_no;
1056 record->log_page = u_log_new_page(&dctx->log);
1057
1058 memset(&record->call, 0, sizeof(record->call));
1059 dd_copy_call(&record->call, call);
1060
1061 dd_init_copy_of_draw_state(&record->draw_state);
1062 dd_copy_draw_state(&record->draw_state.base, &dctx->draw_state);
1063
1064 /* Add the record to the list. */
1065 mtx_lock(&dctx->mutex);
1066 record->next = dctx->records;
1067 dctx->records = record;
1068 mtx_unlock(&dctx->mutex);
1069 }
1070
1071 static void
1072 dd_context_flush(struct pipe_context *_pipe,
1073 struct pipe_fence_handle **fence, unsigned flags)
1074 {
1075 struct dd_context *dctx = dd_context(_pipe);
1076 struct pipe_context *pipe = dctx->pipe;
1077
1078 switch (dd_screen(dctx->base.screen)->mode) {
1079 case DD_DETECT_HANGS:
1080 dd_flush_and_handle_hang(dctx, fence, flags,
1081 "GPU hang detected in pipe->flush()");
1082 break;
1083 case DD_DETECT_HANGS_PIPELINED: /* nothing to do here */
1084 case DD_DUMP_ALL_CALLS:
1085 case DD_DUMP_APITRACE_CALL:
1086 pipe->flush(pipe, fence, flags);
1087 break;
1088 default:
1089 assert(0);
1090 }
1091 }
1092
1093 static void
1094 dd_before_draw(struct dd_context *dctx)
1095 {
1096 struct dd_screen *dscreen = dd_screen(dctx->base.screen);
1097
1098 if (dscreen->mode == DD_DETECT_HANGS &&
1099 !dscreen->no_flush &&
1100 dctx->num_draw_calls >= dscreen->skip_count)
1101 dd_flush_and_handle_hang(dctx, NULL, 0,
1102 "GPU hang most likely caused by internal "
1103 "driver commands");
1104 }
1105
1106 static void
1107 dd_after_draw(struct dd_context *dctx, struct dd_call *call)
1108 {
1109 struct dd_screen *dscreen = dd_screen(dctx->base.screen);
1110 struct pipe_context *pipe = dctx->pipe;
1111
1112 if (dctx->num_draw_calls >= dscreen->skip_count) {
1113 switch (dscreen->mode) {
1114 case DD_DETECT_HANGS:
1115 if (!dscreen->no_flush &&
1116 dd_flush_and_check_hang(dctx, NULL, 0)) {
1117 dd_write_report(dctx, call,
1118 PIPE_DUMP_DEVICE_STATUS_REGISTERS,
1119 true);
1120
1121 /* Terminate the process to prevent future hangs. */
1122 dd_kill_process();
1123 } else {
1124 u_log_page_destroy(u_log_new_page(&dctx->log));
1125 }
1126 break;
1127 case DD_DETECT_HANGS_PIPELINED:
1128 dd_pipelined_process_draw(dctx, call);
1129 break;
1130 case DD_DUMP_ALL_CALLS:
1131 if (!dscreen->no_flush)
1132 pipe->flush(pipe, NULL, 0);
1133 dd_write_report(dctx, call, 0, false);
1134 break;
1135 case DD_DUMP_APITRACE_CALL:
1136 if (dscreen->apitrace_dump_call ==
1137 dctx->draw_state.apitrace_call_number) {
1138 dd_write_report(dctx, call, 0, false);
1139 /* No need to continue. */
1140 exit(0);
1141 } else {
1142 u_log_page_destroy(u_log_new_page(&dctx->log));
1143 }
1144 break;
1145 default:
1146 assert(0);
1147 }
1148 }
1149
1150 ++dctx->num_draw_calls;
1151 if (dscreen->skip_count && dctx->num_draw_calls % 10000 == 0)
1152 fprintf(stderr, "Gallium debugger reached %u draw calls.\n",
1153 dctx->num_draw_calls);
1154 }
1155
1156 static void
1157 dd_context_draw_vbo(struct pipe_context *_pipe,
1158 const struct pipe_draw_info *info)
1159 {
1160 struct dd_context *dctx = dd_context(_pipe);
1161 struct pipe_context *pipe = dctx->pipe;
1162 struct dd_call call;
1163
1164 call.type = CALL_DRAW_VBO;
1165 call.info.draw_vbo.draw = *info;
1166 if (info->indirect) {
1167 call.info.draw_vbo.indirect = *info->indirect;
1168 call.info.draw_vbo.draw.indirect = &call.info.draw_vbo.indirect;
1169 } else {
1170 memset(&call.info.draw_vbo.indirect, 0, sizeof(*info->indirect));
1171 }
1172
1173 dd_before_draw(dctx);
1174 pipe->draw_vbo(pipe, info);
1175 dd_after_draw(dctx, &call);
1176 }
1177
1178 static void
1179 dd_context_launch_grid(struct pipe_context *_pipe,
1180 const struct pipe_grid_info *info)
1181 {
1182 struct dd_context *dctx = dd_context(_pipe);
1183 struct pipe_context *pipe = dctx->pipe;
1184 struct dd_call call;
1185
1186 call.type = CALL_LAUNCH_GRID;
1187 call.info.launch_grid = *info;
1188
1189 dd_before_draw(dctx);
1190 pipe->launch_grid(pipe, info);
1191 dd_after_draw(dctx, &call);
1192 }
1193
1194 static void
1195 dd_context_resource_copy_region(struct pipe_context *_pipe,
1196 struct pipe_resource *dst, unsigned dst_level,
1197 unsigned dstx, unsigned dsty, unsigned dstz,
1198 struct pipe_resource *src, unsigned src_level,
1199 const struct pipe_box *src_box)
1200 {
1201 struct dd_context *dctx = dd_context(_pipe);
1202 struct pipe_context *pipe = dctx->pipe;
1203 struct dd_call call;
1204
1205 call.type = CALL_RESOURCE_COPY_REGION;
1206 call.info.resource_copy_region.dst = dst;
1207 call.info.resource_copy_region.dst_level = dst_level;
1208 call.info.resource_copy_region.dstx = dstx;
1209 call.info.resource_copy_region.dsty = dsty;
1210 call.info.resource_copy_region.dstz = dstz;
1211 call.info.resource_copy_region.src = src;
1212 call.info.resource_copy_region.src_level = src_level;
1213 call.info.resource_copy_region.src_box = *src_box;
1214
1215 dd_before_draw(dctx);
1216 pipe->resource_copy_region(pipe,
1217 dst, dst_level, dstx, dsty, dstz,
1218 src, src_level, src_box);
1219 dd_after_draw(dctx, &call);
1220 }
1221
1222 static void
1223 dd_context_blit(struct pipe_context *_pipe, const struct pipe_blit_info *info)
1224 {
1225 struct dd_context *dctx = dd_context(_pipe);
1226 struct pipe_context *pipe = dctx->pipe;
1227 struct dd_call call;
1228
1229 call.type = CALL_BLIT;
1230 call.info.blit = *info;
1231
1232 dd_before_draw(dctx);
1233 pipe->blit(pipe, info);
1234 dd_after_draw(dctx, &call);
1235 }
1236
1237 static boolean
1238 dd_context_generate_mipmap(struct pipe_context *_pipe,
1239 struct pipe_resource *res,
1240 enum pipe_format format,
1241 unsigned base_level,
1242 unsigned last_level,
1243 unsigned first_layer,
1244 unsigned last_layer)
1245 {
1246 struct dd_context *dctx = dd_context(_pipe);
1247 struct pipe_context *pipe = dctx->pipe;
1248 struct dd_call call;
1249 boolean result;
1250
1251 call.type = CALL_GENERATE_MIPMAP;
1252 call.info.generate_mipmap.res = res;
1253 call.info.generate_mipmap.format = format;
1254 call.info.generate_mipmap.base_level = base_level;
1255 call.info.generate_mipmap.last_level = last_level;
1256 call.info.generate_mipmap.first_layer = first_layer;
1257 call.info.generate_mipmap.last_layer = last_layer;
1258
1259 dd_before_draw(dctx);
1260 result = pipe->generate_mipmap(pipe, res, format, base_level, last_level,
1261 first_layer, last_layer);
1262 dd_after_draw(dctx, &call);
1263 return result;
1264 }
1265
1266 static void
1267 dd_context_get_query_result_resource(struct pipe_context *_pipe,
1268 struct pipe_query *query,
1269 boolean wait,
1270 enum pipe_query_value_type result_type,
1271 int index,
1272 struct pipe_resource *resource,
1273 unsigned offset)
1274 {
1275 struct dd_context *dctx = dd_context(_pipe);
1276 struct dd_query *dquery = dd_query(query);
1277 struct pipe_context *pipe = dctx->pipe;
1278 struct dd_call call;
1279
1280 call.type = CALL_GET_QUERY_RESULT_RESOURCE;
1281 call.info.get_query_result_resource.query = query;
1282 call.info.get_query_result_resource.wait = wait;
1283 call.info.get_query_result_resource.result_type = result_type;
1284 call.info.get_query_result_resource.index = index;
1285 call.info.get_query_result_resource.resource = resource;
1286 call.info.get_query_result_resource.offset = offset;
1287
1288 /* In pipelined mode, the query may be deleted by the time we need to
1289 * print it.
1290 */
1291 call.info.get_query_result_resource.query_type = dquery->type;
1292
1293 dd_before_draw(dctx);
1294 pipe->get_query_result_resource(pipe, dquery->query, wait,
1295 result_type, index, resource, offset);
1296 dd_after_draw(dctx, &call);
1297 }
1298
1299 static void
1300 dd_context_flush_resource(struct pipe_context *_pipe,
1301 struct pipe_resource *resource)
1302 {
1303 struct dd_context *dctx = dd_context(_pipe);
1304 struct pipe_context *pipe = dctx->pipe;
1305 struct dd_call call;
1306
1307 call.type = CALL_FLUSH_RESOURCE;
1308 call.info.flush_resource = resource;
1309
1310 dd_before_draw(dctx);
1311 pipe->flush_resource(pipe, resource);
1312 dd_after_draw(dctx, &call);
1313 }
1314
1315 static void
1316 dd_context_clear(struct pipe_context *_pipe, unsigned buffers,
1317 const union pipe_color_union *color, double depth,
1318 unsigned stencil)
1319 {
1320 struct dd_context *dctx = dd_context(_pipe);
1321 struct pipe_context *pipe = dctx->pipe;
1322 struct dd_call call;
1323
1324 call.type = CALL_CLEAR;
1325 call.info.clear.buffers = buffers;
1326 call.info.clear.color = *color;
1327 call.info.clear.depth = depth;
1328 call.info.clear.stencil = stencil;
1329
1330 dd_before_draw(dctx);
1331 pipe->clear(pipe, buffers, color, depth, stencil);
1332 dd_after_draw(dctx, &call);
1333 }
1334
1335 static void
1336 dd_context_clear_render_target(struct pipe_context *_pipe,
1337 struct pipe_surface *dst,
1338 const union pipe_color_union *color,
1339 unsigned dstx, unsigned dsty,
1340 unsigned width, unsigned height,
1341 bool render_condition_enabled)
1342 {
1343 struct dd_context *dctx = dd_context(_pipe);
1344 struct pipe_context *pipe = dctx->pipe;
1345 struct dd_call call;
1346
1347 call.type = CALL_CLEAR_RENDER_TARGET;
1348
1349 dd_before_draw(dctx);
1350 pipe->clear_render_target(pipe, dst, color, dstx, dsty, width, height,
1351 render_condition_enabled);
1352 dd_after_draw(dctx, &call);
1353 }
1354
1355 static void
1356 dd_context_clear_depth_stencil(struct pipe_context *_pipe,
1357 struct pipe_surface *dst, unsigned clear_flags,
1358 double depth, unsigned stencil, unsigned dstx,
1359 unsigned dsty, unsigned width, unsigned height,
1360 bool render_condition_enabled)
1361 {
1362 struct dd_context *dctx = dd_context(_pipe);
1363 struct pipe_context *pipe = dctx->pipe;
1364 struct dd_call call;
1365
1366 call.type = CALL_CLEAR_DEPTH_STENCIL;
1367
1368 dd_before_draw(dctx);
1369 pipe->clear_depth_stencil(pipe, dst, clear_flags, depth, stencil,
1370 dstx, dsty, width, height,
1371 render_condition_enabled);
1372 dd_after_draw(dctx, &call);
1373 }
1374
1375 static void
1376 dd_context_clear_buffer(struct pipe_context *_pipe, struct pipe_resource *res,
1377 unsigned offset, unsigned size,
1378 const void *clear_value, int clear_value_size)
1379 {
1380 struct dd_context *dctx = dd_context(_pipe);
1381 struct pipe_context *pipe = dctx->pipe;
1382 struct dd_call call;
1383
1384 call.type = CALL_CLEAR_BUFFER;
1385 call.info.clear_buffer.res = res;
1386 call.info.clear_buffer.offset = offset;
1387 call.info.clear_buffer.size = size;
1388 call.info.clear_buffer.clear_value = clear_value;
1389 call.info.clear_buffer.clear_value_size = clear_value_size;
1390
1391 dd_before_draw(dctx);
1392 pipe->clear_buffer(pipe, res, offset, size, clear_value, clear_value_size);
1393 dd_after_draw(dctx, &call);
1394 }
1395
1396 static void
1397 dd_context_clear_texture(struct pipe_context *_pipe,
1398 struct pipe_resource *res,
1399 unsigned level,
1400 const struct pipe_box *box,
1401 const void *data)
1402 {
1403 struct dd_context *dctx = dd_context(_pipe);
1404 struct pipe_context *pipe = dctx->pipe;
1405 struct dd_call call;
1406
1407 call.type = CALL_CLEAR_TEXTURE;
1408
1409 dd_before_draw(dctx);
1410 pipe->clear_texture(pipe, res, level, box, data);
1411 dd_after_draw(dctx, &call);
1412 }
1413
1414 void
1415 dd_init_draw_functions(struct dd_context *dctx)
1416 {
1417 CTX_INIT(flush);
1418 CTX_INIT(draw_vbo);
1419 CTX_INIT(launch_grid);
1420 CTX_INIT(resource_copy_region);
1421 CTX_INIT(blit);
1422 CTX_INIT(clear);
1423 CTX_INIT(clear_render_target);
1424 CTX_INIT(clear_depth_stencil);
1425 CTX_INIT(clear_buffer);
1426 CTX_INIT(clear_texture);
1427 CTX_INIT(flush_resource);
1428 CTX_INIT(generate_mipmap);
1429 CTX_INIT(get_query_result_resource);
1430 }