a25017114d8ac4e1c0beeb54b81e755c2c8f5a18
[mesa.git] / src / gallium / drivers / ddebug / dd_draw.c
1 /**************************************************************************
2 *
3 * Copyright 2015 Advanced Micro Devices, Inc.
4 * Copyright 2008 VMware, Inc.
5 * All Rights Reserved.
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the "Software"),
9 * to deal in the Software without restriction, including without limitation
10 * on the rights to use, copy, modify, merge, publish, distribute, sub
11 * license, and/or sell copies of the Software, and to permit persons to whom
12 * the Software is furnished to do so, subject to the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the next
15 * paragraph) shall be included in all copies or substantial portions of the
16 * Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28 #include "dd_pipe.h"
29
30 #include "util/u_dump.h"
31 #include "util/u_format.h"
32 #include "util/u_framebuffer.h"
33 #include "util/u_helpers.h"
34 #include "util/u_inlines.h"
35 #include "util/u_memory.h"
36 #include "tgsi/tgsi_parse.h"
37 #include "tgsi/tgsi_scan.h"
38 #include "util/os_time.h"
39 #include <inttypes.h>
40
41
42 static void
43 dd_write_header(FILE *f, struct pipe_screen *screen, unsigned apitrace_call_number)
44 {
45 char cmd_line[4096];
46 if (os_get_command_line(cmd_line, sizeof(cmd_line)))
47 fprintf(f, "Command: %s\n", cmd_line);
48 fprintf(f, "Driver vendor: %s\n", screen->get_vendor(screen));
49 fprintf(f, "Device vendor: %s\n", screen->get_device_vendor(screen));
50 fprintf(f, "Device name: %s\n\n", screen->get_name(screen));
51
52 if (apitrace_call_number)
53 fprintf(f, "Last apitrace call: %u\n\n", apitrace_call_number);
54 }
55
56 FILE *
57 dd_get_file_stream(struct dd_screen *dscreen, unsigned apitrace_call_number)
58 {
59 struct pipe_screen *screen = dscreen->screen;
60
61 FILE *f = dd_get_debug_file(dscreen->verbose);
62 if (!f)
63 return NULL;
64
65 dd_write_header(f, screen, apitrace_call_number);
66 return f;
67 }
68
69 static void
70 dd_dump_dmesg(FILE *f)
71 {
72 char line[2000];
73 FILE *p = popen("dmesg | tail -n60", "r");
74
75 if (!p)
76 return;
77
78 fprintf(f, "\nLast 60 lines of dmesg:\n\n");
79 while (fgets(line, sizeof(line), p))
80 fputs(line, f);
81
82 pclose(p);
83 }
84
85 static unsigned
86 dd_num_active_viewports(struct dd_draw_state *dstate)
87 {
88 struct tgsi_shader_info info;
89 const struct tgsi_token *tokens;
90
91 if (dstate->shaders[PIPE_SHADER_GEOMETRY])
92 tokens = dstate->shaders[PIPE_SHADER_GEOMETRY]->state.shader.tokens;
93 else if (dstate->shaders[PIPE_SHADER_TESS_EVAL])
94 tokens = dstate->shaders[PIPE_SHADER_TESS_EVAL]->state.shader.tokens;
95 else if (dstate->shaders[PIPE_SHADER_VERTEX])
96 tokens = dstate->shaders[PIPE_SHADER_VERTEX]->state.shader.tokens;
97 else
98 return 1;
99
100 if (tokens) {
101 tgsi_scan_shader(tokens, &info);
102 if (info.writes_viewport_index)
103 return PIPE_MAX_VIEWPORTS;
104 }
105
106 return 1;
107 }
108
109 #define COLOR_RESET "\033[0m"
110 #define COLOR_SHADER "\033[1;32m"
111 #define COLOR_STATE "\033[1;33m"
112
113 #define DUMP(name, var) do { \
114 fprintf(f, COLOR_STATE #name ": " COLOR_RESET); \
115 util_dump_##name(f, var); \
116 fprintf(f, "\n"); \
117 } while(0)
118
119 #define DUMP_I(name, var, i) do { \
120 fprintf(f, COLOR_STATE #name " %i: " COLOR_RESET, i); \
121 util_dump_##name(f, var); \
122 fprintf(f, "\n"); \
123 } while(0)
124
125 #define DUMP_M(name, var, member) do { \
126 fprintf(f, " " #member ": "); \
127 util_dump_##name(f, (var)->member); \
128 fprintf(f, "\n"); \
129 } while(0)
130
131 #define DUMP_M_ADDR(name, var, member) do { \
132 fprintf(f, " " #member ": "); \
133 util_dump_##name(f, &(var)->member); \
134 fprintf(f, "\n"); \
135 } while(0)
136
137 #define PRINT_NAMED(type, name, value) \
138 do { \
139 fprintf(f, COLOR_STATE "%s" COLOR_RESET " = ", name); \
140 util_dump_##type(f, value); \
141 fprintf(f, "\n"); \
142 } while (0)
143
144 static void
145 util_dump_uint(FILE *f, unsigned i)
146 {
147 fprintf(f, "%u", i);
148 }
149
150 static void
151 util_dump_int(FILE *f, int i)
152 {
153 fprintf(f, "%d", i);
154 }
155
156 static void
157 util_dump_hex(FILE *f, unsigned i)
158 {
159 fprintf(f, "0x%x", i);
160 }
161
162 static void
163 util_dump_double(FILE *f, double d)
164 {
165 fprintf(f, "%f", d);
166 }
167
168 static void
169 util_dump_format(FILE *f, enum pipe_format format)
170 {
171 fprintf(f, "%s", util_format_name(format));
172 }
173
174 static void
175 util_dump_color_union(FILE *f, const union pipe_color_union *color)
176 {
177 fprintf(f, "{f = {%f, %f, %f, %f}, ui = {%u, %u, %u, %u}",
178 color->f[0], color->f[1], color->f[2], color->f[3],
179 color->ui[0], color->ui[1], color->ui[2], color->ui[3]);
180 }
181
182 static void
183 dd_dump_render_condition(struct dd_draw_state *dstate, FILE *f)
184 {
185 if (dstate->render_cond.query) {
186 fprintf(f, "render condition:\n");
187 DUMP_M(query_type, &dstate->render_cond, query->type);
188 DUMP_M(uint, &dstate->render_cond, condition);
189 DUMP_M(uint, &dstate->render_cond, mode);
190 fprintf(f, "\n");
191 }
192 }
193
194 static void
195 dd_dump_shader(struct dd_draw_state *dstate, enum pipe_shader_type sh, FILE *f)
196 {
197 int i;
198 const char *shader_str[PIPE_SHADER_TYPES];
199
200 shader_str[PIPE_SHADER_VERTEX] = "VERTEX";
201 shader_str[PIPE_SHADER_TESS_CTRL] = "TESS_CTRL";
202 shader_str[PIPE_SHADER_TESS_EVAL] = "TESS_EVAL";
203 shader_str[PIPE_SHADER_GEOMETRY] = "GEOMETRY";
204 shader_str[PIPE_SHADER_FRAGMENT] = "FRAGMENT";
205 shader_str[PIPE_SHADER_COMPUTE] = "COMPUTE";
206
207 if (sh == PIPE_SHADER_TESS_CTRL &&
208 !dstate->shaders[PIPE_SHADER_TESS_CTRL] &&
209 dstate->shaders[PIPE_SHADER_TESS_EVAL])
210 fprintf(f, "tess_state: {default_outer_level = {%f, %f, %f, %f}, "
211 "default_inner_level = {%f, %f}}\n",
212 dstate->tess_default_levels[0],
213 dstate->tess_default_levels[1],
214 dstate->tess_default_levels[2],
215 dstate->tess_default_levels[3],
216 dstate->tess_default_levels[4],
217 dstate->tess_default_levels[5]);
218
219 if (sh == PIPE_SHADER_FRAGMENT)
220 if (dstate->rs) {
221 unsigned num_viewports = dd_num_active_viewports(dstate);
222
223 if (dstate->rs->state.rs.clip_plane_enable)
224 DUMP(clip_state, &dstate->clip_state);
225
226 for (i = 0; i < num_viewports; i++)
227 DUMP_I(viewport_state, &dstate->viewports[i], i);
228
229 if (dstate->rs->state.rs.scissor)
230 for (i = 0; i < num_viewports; i++)
231 DUMP_I(scissor_state, &dstate->scissors[i], i);
232
233 DUMP(rasterizer_state, &dstate->rs->state.rs);
234
235 if (dstate->rs->state.rs.poly_stipple_enable)
236 DUMP(poly_stipple, &dstate->polygon_stipple);
237 fprintf(f, "\n");
238 }
239
240 if (!dstate->shaders[sh])
241 return;
242
243 fprintf(f, COLOR_SHADER "begin shader: %s" COLOR_RESET "\n", shader_str[sh]);
244 DUMP(shader_state, &dstate->shaders[sh]->state.shader);
245
246 for (i = 0; i < PIPE_MAX_CONSTANT_BUFFERS; i++)
247 if (dstate->constant_buffers[sh][i].buffer ||
248 dstate->constant_buffers[sh][i].user_buffer) {
249 DUMP_I(constant_buffer, &dstate->constant_buffers[sh][i], i);
250 if (dstate->constant_buffers[sh][i].buffer)
251 DUMP_M(resource, &dstate->constant_buffers[sh][i], buffer);
252 }
253
254 for (i = 0; i < PIPE_MAX_SAMPLERS; i++)
255 if (dstate->sampler_states[sh][i])
256 DUMP_I(sampler_state, &dstate->sampler_states[sh][i]->state.sampler, i);
257
258 for (i = 0; i < PIPE_MAX_SAMPLERS; i++)
259 if (dstate->sampler_views[sh][i]) {
260 DUMP_I(sampler_view, dstate->sampler_views[sh][i], i);
261 DUMP_M(resource, dstate->sampler_views[sh][i], texture);
262 }
263
264 for (i = 0; i < PIPE_MAX_SHADER_IMAGES; i++)
265 if (dstate->shader_images[sh][i].resource) {
266 DUMP_I(image_view, &dstate->shader_images[sh][i], i);
267 if (dstate->shader_images[sh][i].resource)
268 DUMP_M(resource, &dstate->shader_images[sh][i], resource);
269 }
270
271 for (i = 0; i < PIPE_MAX_SHADER_BUFFERS; i++)
272 if (dstate->shader_buffers[sh][i].buffer) {
273 DUMP_I(shader_buffer, &dstate->shader_buffers[sh][i], i);
274 if (dstate->shader_buffers[sh][i].buffer)
275 DUMP_M(resource, &dstate->shader_buffers[sh][i], buffer);
276 }
277
278 fprintf(f, COLOR_SHADER "end shader: %s" COLOR_RESET "\n\n", shader_str[sh]);
279 }
280
281 static void
282 dd_dump_draw_vbo(struct dd_draw_state *dstate, struct pipe_draw_info *info, FILE *f)
283 {
284 int sh, i;
285
286 DUMP(draw_info, info);
287 if (info->count_from_stream_output)
288 DUMP_M(stream_output_target, info,
289 count_from_stream_output);
290 if (info->indirect) {
291 DUMP_M(resource, info, indirect->buffer);
292 if (info->indirect->indirect_draw_count)
293 DUMP_M(resource, info, indirect->indirect_draw_count);
294 }
295
296 fprintf(f, "\n");
297
298 /* TODO: dump active queries */
299
300 dd_dump_render_condition(dstate, f);
301
302 for (i = 0; i < PIPE_MAX_ATTRIBS; i++)
303 if (dstate->vertex_buffers[i].buffer.resource) {
304 DUMP_I(vertex_buffer, &dstate->vertex_buffers[i], i);
305 if (!dstate->vertex_buffers[i].is_user_buffer)
306 DUMP_M(resource, &dstate->vertex_buffers[i], buffer.resource);
307 }
308
309 if (dstate->velems) {
310 PRINT_NAMED(uint, "num vertex elements",
311 dstate->velems->state.velems.count);
312 for (i = 0; i < dstate->velems->state.velems.count; i++) {
313 fprintf(f, " ");
314 DUMP_I(vertex_element, &dstate->velems->state.velems.velems[i], i);
315 }
316 }
317
318 PRINT_NAMED(uint, "num stream output targets", dstate->num_so_targets);
319 for (i = 0; i < dstate->num_so_targets; i++)
320 if (dstate->so_targets[i]) {
321 DUMP_I(stream_output_target, dstate->so_targets[i], i);
322 DUMP_M(resource, dstate->so_targets[i], buffer);
323 fprintf(f, " offset = %i\n", dstate->so_offsets[i]);
324 }
325
326 fprintf(f, "\n");
327 for (sh = 0; sh < PIPE_SHADER_TYPES; sh++) {
328 if (sh == PIPE_SHADER_COMPUTE)
329 continue;
330
331 dd_dump_shader(dstate, sh, f);
332 }
333
334 if (dstate->dsa)
335 DUMP(depth_stencil_alpha_state, &dstate->dsa->state.dsa);
336 DUMP(stencil_ref, &dstate->stencil_ref);
337
338 if (dstate->blend)
339 DUMP(blend_state, &dstate->blend->state.blend);
340 DUMP(blend_color, &dstate->blend_color);
341
342 PRINT_NAMED(uint, "min_samples", dstate->min_samples);
343 PRINT_NAMED(hex, "sample_mask", dstate->sample_mask);
344 fprintf(f, "\n");
345
346 DUMP(framebuffer_state, &dstate->framebuffer_state);
347 for (i = 0; i < dstate->framebuffer_state.nr_cbufs; i++)
348 if (dstate->framebuffer_state.cbufs[i]) {
349 fprintf(f, " " COLOR_STATE "cbufs[%i]:" COLOR_RESET "\n ", i);
350 DUMP(surface, dstate->framebuffer_state.cbufs[i]);
351 fprintf(f, " ");
352 DUMP(resource, dstate->framebuffer_state.cbufs[i]->texture);
353 }
354 if (dstate->framebuffer_state.zsbuf) {
355 fprintf(f, " " COLOR_STATE "zsbuf:" COLOR_RESET "\n ");
356 DUMP(surface, dstate->framebuffer_state.zsbuf);
357 fprintf(f, " ");
358 DUMP(resource, dstate->framebuffer_state.zsbuf->texture);
359 }
360 fprintf(f, "\n");
361 }
362
363 static void
364 dd_dump_launch_grid(struct dd_draw_state *dstate, struct pipe_grid_info *info, FILE *f)
365 {
366 fprintf(f, "%s:\n", __func__+8);
367 DUMP(grid_info, info);
368 fprintf(f, "\n");
369
370 dd_dump_shader(dstate, PIPE_SHADER_COMPUTE, f);
371 fprintf(f, "\n");
372 }
373
374 static void
375 dd_dump_resource_copy_region(struct dd_draw_state *dstate,
376 struct call_resource_copy_region *info,
377 FILE *f)
378 {
379 fprintf(f, "%s:\n", __func__+8);
380 DUMP_M(resource, info, dst);
381 DUMP_M(uint, info, dst_level);
382 DUMP_M(uint, info, dstx);
383 DUMP_M(uint, info, dsty);
384 DUMP_M(uint, info, dstz);
385 DUMP_M(resource, info, src);
386 DUMP_M(uint, info, src_level);
387 DUMP_M_ADDR(box, info, src_box);
388 }
389
390 static void
391 dd_dump_blit(struct dd_draw_state *dstate, struct pipe_blit_info *info, FILE *f)
392 {
393 fprintf(f, "%s:\n", __func__+8);
394 DUMP_M(resource, info, dst.resource);
395 DUMP_M(uint, info, dst.level);
396 DUMP_M_ADDR(box, info, dst.box);
397 DUMP_M(format, info, dst.format);
398
399 DUMP_M(resource, info, src.resource);
400 DUMP_M(uint, info, src.level);
401 DUMP_M_ADDR(box, info, src.box);
402 DUMP_M(format, info, src.format);
403
404 DUMP_M(hex, info, mask);
405 DUMP_M(uint, info, filter);
406 DUMP_M(uint, info, scissor_enable);
407 DUMP_M_ADDR(scissor_state, info, scissor);
408 DUMP_M(uint, info, render_condition_enable);
409
410 if (info->render_condition_enable)
411 dd_dump_render_condition(dstate, f);
412 }
413
414 static void
415 dd_dump_generate_mipmap(struct dd_draw_state *dstate, FILE *f)
416 {
417 fprintf(f, "%s:\n", __func__+8);
418 /* TODO */
419 }
420
421 static void
422 dd_dump_get_query_result_resource(struct call_get_query_result_resource *info, FILE *f)
423 {
424 fprintf(f, "%s:\n", __func__ + 8);
425 DUMP_M(query_type, info, query_type);
426 DUMP_M(uint, info, wait);
427 DUMP_M(query_value_type, info, result_type);
428 DUMP_M(int, info, index);
429 DUMP_M(resource, info, resource);
430 DUMP_M(uint, info, offset);
431 }
432
433 static void
434 dd_dump_flush_resource(struct dd_draw_state *dstate, struct pipe_resource *res,
435 FILE *f)
436 {
437 fprintf(f, "%s:\n", __func__+8);
438 DUMP(resource, res);
439 }
440
441 static void
442 dd_dump_clear(struct dd_draw_state *dstate, struct call_clear *info, FILE *f)
443 {
444 fprintf(f, "%s:\n", __func__+8);
445 DUMP_M(uint, info, buffers);
446 DUMP_M_ADDR(color_union, info, color);
447 DUMP_M(double, info, depth);
448 DUMP_M(hex, info, stencil);
449 }
450
451 static void
452 dd_dump_clear_buffer(struct dd_draw_state *dstate, struct call_clear_buffer *info,
453 FILE *f)
454 {
455 int i;
456 const char *value = (const char*)info->clear_value;
457
458 fprintf(f, "%s:\n", __func__+8);
459 DUMP_M(resource, info, res);
460 DUMP_M(uint, info, offset);
461 DUMP_M(uint, info, size);
462 DUMP_M(uint, info, clear_value_size);
463
464 fprintf(f, " clear_value:");
465 for (i = 0; i < info->clear_value_size; i++)
466 fprintf(f, " %02x", value[i]);
467 fprintf(f, "\n");
468 }
469
470 static void
471 dd_dump_transfer_map(struct call_transfer_map *info, FILE *f)
472 {
473 fprintf(f, "%s:\n", __func__+8);
474 DUMP_M_ADDR(transfer, info, transfer);
475 DUMP_M(ptr, info, transfer_ptr);
476 DUMP_M(ptr, info, ptr);
477 }
478
479 static void
480 dd_dump_transfer_flush_region(struct call_transfer_flush_region *info, FILE *f)
481 {
482 fprintf(f, "%s:\n", __func__+8);
483 DUMP_M_ADDR(transfer, info, transfer);
484 DUMP_M(ptr, info, transfer_ptr);
485 DUMP_M_ADDR(box, info, box);
486 }
487
488 static void
489 dd_dump_transfer_unmap(struct call_transfer_unmap *info, FILE *f)
490 {
491 fprintf(f, "%s:\n", __func__+8);
492 DUMP_M_ADDR(transfer, info, transfer);
493 DUMP_M(ptr, info, transfer_ptr);
494 }
495
496 static void
497 dd_dump_buffer_subdata(struct call_buffer_subdata *info, FILE *f)
498 {
499 fprintf(f, "%s:\n", __func__+8);
500 DUMP_M(resource, info, resource);
501 DUMP_M(transfer_usage, info, usage);
502 DUMP_M(uint, info, offset);
503 DUMP_M(uint, info, size);
504 DUMP_M(ptr, info, data);
505 }
506
507 static void
508 dd_dump_texture_subdata(struct call_texture_subdata *info, FILE *f)
509 {
510 fprintf(f, "%s:\n", __func__+8);
511 DUMP_M(resource, info, resource);
512 DUMP_M(uint, info, level);
513 DUMP_M(transfer_usage, info, usage);
514 DUMP_M_ADDR(box, info, box);
515 DUMP_M(ptr, info, data);
516 DUMP_M(uint, info, stride);
517 DUMP_M(uint, info, layer_stride);
518 }
519
520 static void
521 dd_dump_clear_texture(struct dd_draw_state *dstate, FILE *f)
522 {
523 fprintf(f, "%s:\n", __func__+8);
524 /* TODO */
525 }
526
527 static void
528 dd_dump_clear_render_target(struct dd_draw_state *dstate, FILE *f)
529 {
530 fprintf(f, "%s:\n", __func__+8);
531 /* TODO */
532 }
533
534 static void
535 dd_dump_clear_depth_stencil(struct dd_draw_state *dstate, FILE *f)
536 {
537 fprintf(f, "%s:\n", __func__+8);
538 /* TODO */
539 }
540
541 static void
542 dd_dump_driver_state(struct dd_context *dctx, FILE *f, unsigned flags)
543 {
544 if (dctx->pipe->dump_debug_state) {
545 fprintf(f,"\n\n**************************************************"
546 "***************************\n");
547 fprintf(f, "Driver-specific state:\n\n");
548 dctx->pipe->dump_debug_state(dctx->pipe, f, flags);
549 }
550 }
551
552 static void
553 dd_dump_call(FILE *f, struct dd_draw_state *state, struct dd_call *call)
554 {
555 switch (call->type) {
556 case CALL_DRAW_VBO:
557 dd_dump_draw_vbo(state, &call->info.draw_vbo.draw, f);
558 break;
559 case CALL_LAUNCH_GRID:
560 dd_dump_launch_grid(state, &call->info.launch_grid, f);
561 break;
562 case CALL_RESOURCE_COPY_REGION:
563 dd_dump_resource_copy_region(state,
564 &call->info.resource_copy_region, f);
565 break;
566 case CALL_BLIT:
567 dd_dump_blit(state, &call->info.blit, f);
568 break;
569 case CALL_FLUSH_RESOURCE:
570 dd_dump_flush_resource(state, call->info.flush_resource, f);
571 break;
572 case CALL_CLEAR:
573 dd_dump_clear(state, &call->info.clear, f);
574 break;
575 case CALL_CLEAR_BUFFER:
576 dd_dump_clear_buffer(state, &call->info.clear_buffer, f);
577 break;
578 case CALL_CLEAR_TEXTURE:
579 dd_dump_clear_texture(state, f);
580 break;
581 case CALL_CLEAR_RENDER_TARGET:
582 dd_dump_clear_render_target(state, f);
583 break;
584 case CALL_CLEAR_DEPTH_STENCIL:
585 dd_dump_clear_depth_stencil(state, f);
586 break;
587 case CALL_GENERATE_MIPMAP:
588 dd_dump_generate_mipmap(state, f);
589 break;
590 case CALL_GET_QUERY_RESULT_RESOURCE:
591 dd_dump_get_query_result_resource(&call->info.get_query_result_resource, f);
592 break;
593 case CALL_TRANSFER_MAP:
594 dd_dump_transfer_map(&call->info.transfer_map, f);
595 break;
596 case CALL_TRANSFER_FLUSH_REGION:
597 dd_dump_transfer_flush_region(&call->info.transfer_flush_region, f);
598 break;
599 case CALL_TRANSFER_UNMAP:
600 dd_dump_transfer_unmap(&call->info.transfer_unmap, f);
601 break;
602 case CALL_BUFFER_SUBDATA:
603 dd_dump_buffer_subdata(&call->info.buffer_subdata, f);
604 break;
605 case CALL_TEXTURE_SUBDATA:
606 dd_dump_texture_subdata(&call->info.texture_subdata, f);
607 break;
608 }
609 }
610
611 static void
612 dd_kill_process(void)
613 {
614 sync();
615 fprintf(stderr, "dd: Aborting the process...\n");
616 fflush(stdout);
617 fflush(stderr);
618 exit(1);
619 }
620
621 static void
622 dd_unreference_copy_of_call(struct dd_call *dst)
623 {
624 switch (dst->type) {
625 case CALL_DRAW_VBO:
626 pipe_so_target_reference(&dst->info.draw_vbo.draw.count_from_stream_output, NULL);
627 pipe_resource_reference(&dst->info.draw_vbo.indirect.buffer, NULL);
628 pipe_resource_reference(&dst->info.draw_vbo.indirect.indirect_draw_count, NULL);
629 if (dst->info.draw_vbo.draw.index_size &&
630 !dst->info.draw_vbo.draw.has_user_indices)
631 pipe_resource_reference(&dst->info.draw_vbo.draw.index.resource, NULL);
632 else
633 dst->info.draw_vbo.draw.index.user = NULL;
634 break;
635 case CALL_LAUNCH_GRID:
636 pipe_resource_reference(&dst->info.launch_grid.indirect, NULL);
637 break;
638 case CALL_RESOURCE_COPY_REGION:
639 pipe_resource_reference(&dst->info.resource_copy_region.dst, NULL);
640 pipe_resource_reference(&dst->info.resource_copy_region.src, NULL);
641 break;
642 case CALL_BLIT:
643 pipe_resource_reference(&dst->info.blit.dst.resource, NULL);
644 pipe_resource_reference(&dst->info.blit.src.resource, NULL);
645 break;
646 case CALL_FLUSH_RESOURCE:
647 pipe_resource_reference(&dst->info.flush_resource, NULL);
648 break;
649 case CALL_CLEAR:
650 break;
651 case CALL_CLEAR_BUFFER:
652 pipe_resource_reference(&dst->info.clear_buffer.res, NULL);
653 break;
654 case CALL_CLEAR_TEXTURE:
655 break;
656 case CALL_CLEAR_RENDER_TARGET:
657 break;
658 case CALL_CLEAR_DEPTH_STENCIL:
659 break;
660 case CALL_GENERATE_MIPMAP:
661 pipe_resource_reference(&dst->info.generate_mipmap.res, NULL);
662 break;
663 case CALL_GET_QUERY_RESULT_RESOURCE:
664 pipe_resource_reference(&dst->info.get_query_result_resource.resource, NULL);
665 break;
666 case CALL_TRANSFER_MAP:
667 pipe_resource_reference(&dst->info.transfer_map.transfer.resource, NULL);
668 break;
669 case CALL_TRANSFER_FLUSH_REGION:
670 pipe_resource_reference(&dst->info.transfer_flush_region.transfer.resource, NULL);
671 break;
672 case CALL_TRANSFER_UNMAP:
673 pipe_resource_reference(&dst->info.transfer_unmap.transfer.resource, NULL);
674 break;
675 case CALL_BUFFER_SUBDATA:
676 pipe_resource_reference(&dst->info.buffer_subdata.resource, NULL);
677 break;
678 case CALL_TEXTURE_SUBDATA:
679 pipe_resource_reference(&dst->info.texture_subdata.resource, NULL);
680 break;
681 }
682 }
683
684 static void
685 dd_init_copy_of_draw_state(struct dd_draw_state_copy *state)
686 {
687 unsigned i,j;
688
689 /* Just clear pointers to gallium objects. Don't clear the whole structure,
690 * because it would kill performance with its size of 130 KB.
691 */
692 memset(state->base.vertex_buffers, 0,
693 sizeof(state->base.vertex_buffers));
694 memset(state->base.so_targets, 0,
695 sizeof(state->base.so_targets));
696 memset(state->base.constant_buffers, 0,
697 sizeof(state->base.constant_buffers));
698 memset(state->base.sampler_views, 0,
699 sizeof(state->base.sampler_views));
700 memset(state->base.shader_images, 0,
701 sizeof(state->base.shader_images));
702 memset(state->base.shader_buffers, 0,
703 sizeof(state->base.shader_buffers));
704 memset(&state->base.framebuffer_state, 0,
705 sizeof(state->base.framebuffer_state));
706
707 memset(state->shaders, 0, sizeof(state->shaders));
708
709 state->base.render_cond.query = &state->render_cond;
710
711 for (i = 0; i < PIPE_SHADER_TYPES; i++) {
712 state->base.shaders[i] = &state->shaders[i];
713 for (j = 0; j < PIPE_MAX_SAMPLERS; j++)
714 state->base.sampler_states[i][j] = &state->sampler_states[i][j];
715 }
716
717 state->base.velems = &state->velems;
718 state->base.rs = &state->rs;
719 state->base.dsa = &state->dsa;
720 state->base.blend = &state->blend;
721 }
722
723 static void
724 dd_unreference_copy_of_draw_state(struct dd_draw_state_copy *state)
725 {
726 struct dd_draw_state *dst = &state->base;
727 unsigned i,j;
728
729 for (i = 0; i < ARRAY_SIZE(dst->vertex_buffers); i++)
730 pipe_vertex_buffer_unreference(&dst->vertex_buffers[i]);
731 for (i = 0; i < ARRAY_SIZE(dst->so_targets); i++)
732 pipe_so_target_reference(&dst->so_targets[i], NULL);
733
734 for (i = 0; i < PIPE_SHADER_TYPES; i++) {
735 if (dst->shaders[i])
736 tgsi_free_tokens(dst->shaders[i]->state.shader.tokens);
737
738 for (j = 0; j < PIPE_MAX_CONSTANT_BUFFERS; j++)
739 pipe_resource_reference(&dst->constant_buffers[i][j].buffer, NULL);
740 for (j = 0; j < PIPE_MAX_SAMPLERS; j++)
741 pipe_sampler_view_reference(&dst->sampler_views[i][j], NULL);
742 for (j = 0; j < PIPE_MAX_SHADER_IMAGES; j++)
743 pipe_resource_reference(&dst->shader_images[i][j].resource, NULL);
744 for (j = 0; j < PIPE_MAX_SHADER_BUFFERS; j++)
745 pipe_resource_reference(&dst->shader_buffers[i][j].buffer, NULL);
746 }
747
748 util_unreference_framebuffer_state(&dst->framebuffer_state);
749 }
750
751 static void
752 dd_copy_draw_state(struct dd_draw_state *dst, struct dd_draw_state *src)
753 {
754 unsigned i,j;
755
756 if (src->render_cond.query) {
757 *dst->render_cond.query = *src->render_cond.query;
758 dst->render_cond.condition = src->render_cond.condition;
759 dst->render_cond.mode = src->render_cond.mode;
760 } else {
761 dst->render_cond.query = NULL;
762 }
763
764 for (i = 0; i < ARRAY_SIZE(src->vertex_buffers); i++) {
765 pipe_vertex_buffer_reference(&dst->vertex_buffers[i],
766 &src->vertex_buffers[i]);
767 }
768
769 dst->num_so_targets = src->num_so_targets;
770 for (i = 0; i < ARRAY_SIZE(src->so_targets); i++)
771 pipe_so_target_reference(&dst->so_targets[i], src->so_targets[i]);
772 memcpy(dst->so_offsets, src->so_offsets, sizeof(src->so_offsets));
773
774 for (i = 0; i < PIPE_SHADER_TYPES; i++) {
775 if (!src->shaders[i]) {
776 dst->shaders[i] = NULL;
777 continue;
778 }
779
780 if (src->shaders[i]) {
781 dst->shaders[i]->state.shader = src->shaders[i]->state.shader;
782 if (src->shaders[i]->state.shader.tokens) {
783 dst->shaders[i]->state.shader.tokens =
784 tgsi_dup_tokens(src->shaders[i]->state.shader.tokens);
785 } else {
786 dst->shaders[i]->state.shader.ir.nir = NULL;
787 }
788 } else {
789 dst->shaders[i] = NULL;
790 }
791
792 for (j = 0; j < PIPE_MAX_CONSTANT_BUFFERS; j++) {
793 pipe_resource_reference(&dst->constant_buffers[i][j].buffer,
794 src->constant_buffers[i][j].buffer);
795 memcpy(&dst->constant_buffers[i][j], &src->constant_buffers[i][j],
796 sizeof(src->constant_buffers[i][j]));
797 }
798
799 for (j = 0; j < PIPE_MAX_SAMPLERS; j++) {
800 pipe_sampler_view_reference(&dst->sampler_views[i][j],
801 src->sampler_views[i][j]);
802 if (src->sampler_states[i][j])
803 dst->sampler_states[i][j]->state.sampler =
804 src->sampler_states[i][j]->state.sampler;
805 else
806 dst->sampler_states[i][j] = NULL;
807 }
808
809 for (j = 0; j < PIPE_MAX_SHADER_IMAGES; j++) {
810 pipe_resource_reference(&dst->shader_images[i][j].resource,
811 src->shader_images[i][j].resource);
812 memcpy(&dst->shader_images[i][j], &src->shader_images[i][j],
813 sizeof(src->shader_images[i][j]));
814 }
815
816 for (j = 0; j < PIPE_MAX_SHADER_BUFFERS; j++) {
817 pipe_resource_reference(&dst->shader_buffers[i][j].buffer,
818 src->shader_buffers[i][j].buffer);
819 memcpy(&dst->shader_buffers[i][j], &src->shader_buffers[i][j],
820 sizeof(src->shader_buffers[i][j]));
821 }
822 }
823
824 if (src->velems)
825 dst->velems->state.velems = src->velems->state.velems;
826 else
827 dst->velems = NULL;
828
829 if (src->rs)
830 dst->rs->state.rs = src->rs->state.rs;
831 else
832 dst->rs = NULL;
833
834 if (src->dsa)
835 dst->dsa->state.dsa = src->dsa->state.dsa;
836 else
837 dst->dsa = NULL;
838
839 if (src->blend)
840 dst->blend->state.blend = src->blend->state.blend;
841 else
842 dst->blend = NULL;
843
844 dst->blend_color = src->blend_color;
845 dst->stencil_ref = src->stencil_ref;
846 dst->sample_mask = src->sample_mask;
847 dst->min_samples = src->min_samples;
848 dst->clip_state = src->clip_state;
849 util_copy_framebuffer_state(&dst->framebuffer_state, &src->framebuffer_state);
850 memcpy(dst->scissors, src->scissors, sizeof(src->scissors));
851 memcpy(dst->viewports, src->viewports, sizeof(src->viewports));
852 memcpy(dst->tess_default_levels, src->tess_default_levels,
853 sizeof(src->tess_default_levels));
854 dst->apitrace_call_number = src->apitrace_call_number;
855 }
856
857 static void
858 dd_free_record(struct pipe_screen *screen, struct dd_draw_record *record)
859 {
860 u_log_page_destroy(record->log_page);
861 dd_unreference_copy_of_call(&record->call);
862 dd_unreference_copy_of_draw_state(&record->draw_state);
863 screen->fence_reference(screen, &record->prev_bottom_of_pipe, NULL);
864 screen->fence_reference(screen, &record->top_of_pipe, NULL);
865 screen->fence_reference(screen, &record->bottom_of_pipe, NULL);
866 util_queue_fence_destroy(&record->driver_finished);
867 FREE(record);
868 }
869
870 static void
871 dd_write_record(FILE *f, struct dd_draw_record *record)
872 {
873 PRINT_NAMED(ptr, "pipe", record->dctx->pipe);
874 PRINT_NAMED(ns, "time before (API call)", record->time_before);
875 PRINT_NAMED(ns, "time after (driver done)", record->time_after);
876 fprintf(f, "\n");
877
878 dd_dump_call(f, &record->draw_state.base, &record->call);
879
880 if (record->log_page) {
881 fprintf(f,"\n\n**************************************************"
882 "***************************\n");
883 fprintf(f, "Context Log:\n\n");
884 u_log_page_print(record->log_page, f);
885 }
886 }
887
888 static void
889 dd_maybe_dump_record(struct dd_screen *dscreen, struct dd_draw_record *record)
890 {
891 if (dscreen->dump_mode == DD_DUMP_ONLY_HANGS ||
892 (dscreen->dump_mode == DD_DUMP_APITRACE_CALL &&
893 dscreen->apitrace_dump_call != record->draw_state.base.apitrace_call_number))
894 return;
895
896 char name[512];
897 dd_get_debug_filename_and_mkdir(name, sizeof(name), dscreen->verbose);
898 FILE *f = fopen(name, "w");
899 if (!f) {
900 fprintf(stderr, "dd: failed to open %s\n", name);
901 return;
902 }
903
904 dd_write_header(f, dscreen->screen, record->draw_state.base.apitrace_call_number);
905 dd_write_record(f, record);
906
907 fclose(f);
908 }
909
910 static const char *
911 dd_fence_state(struct pipe_screen *screen, struct pipe_fence_handle *fence,
912 bool *not_reached)
913 {
914 if (!fence)
915 return "---";
916
917 bool ok = screen->fence_finish(screen, NULL, fence, 0);
918
919 if (not_reached && !ok)
920 *not_reached = true;
921
922 return ok ? "YES" : "NO ";
923 }
924
925 static void
926 dd_report_hang(struct dd_context *dctx)
927 {
928 struct dd_screen *dscreen = dd_screen(dctx->base.screen);
929 struct pipe_screen *screen = dscreen->screen;
930 bool encountered_hang = false;
931 bool stop_output = false;
932 unsigned num_later = 0;
933
934 fprintf(stderr, "GPU hang detected, collecting information...\n\n");
935
936 fprintf(stderr, "Draw # driver prev BOP TOP BOP dump file\n"
937 "-------------------------------------------------------------\n");
938
939 list_for_each_entry(struct dd_draw_record, record, &dctx->records, list) {
940 if (!encountered_hang &&
941 screen->fence_finish(screen, NULL, record->bottom_of_pipe, 0)) {
942 dd_maybe_dump_record(dscreen, record);
943 continue;
944 }
945
946 if (stop_output) {
947 dd_maybe_dump_record(dscreen, record);
948 num_later++;
949 continue;
950 }
951
952 bool driver = util_queue_fence_is_signalled(&record->driver_finished);
953 bool top_not_reached = false;
954 const char *prev_bop = dd_fence_state(screen, record->prev_bottom_of_pipe, NULL);
955 const char *top = dd_fence_state(screen, record->top_of_pipe, &top_not_reached);
956 const char *bop = dd_fence_state(screen, record->bottom_of_pipe, NULL);
957
958 fprintf(stderr, "%-9u %s %s %s %s ",
959 record->draw_call, driver ? "YES" : "NO ", prev_bop, top, bop);
960
961 char name[512];
962 dd_get_debug_filename_and_mkdir(name, sizeof(name), false);
963
964 FILE *f = fopen(name, "w");
965 if (!f) {
966 fprintf(stderr, "fopen failed\n");
967 } else {
968 fprintf(stderr, "%s\n", name);
969
970 dd_write_header(f, dscreen->screen, record->draw_state.base.apitrace_call_number);
971 dd_write_record(f, record);
972
973 if (!encountered_hang) {
974 dd_dump_driver_state(dctx, f, PIPE_DUMP_DEVICE_STATUS_REGISTERS);
975 dd_dump_dmesg(f);
976 }
977
978 fclose(f);
979 }
980
981 if (top_not_reached)
982 stop_output = true;
983 encountered_hang = true;
984 }
985
986 if (num_later || dctx->record_pending) {
987 fprintf(stderr, "... and %u%s additional draws.\n", num_later,
988 dctx->record_pending ? "+1 (pending)" : "");
989 }
990
991 fprintf(stderr, "\nDone.\n");
992 dd_kill_process();
993 }
994
995 int
996 dd_thread_main(void *input)
997 {
998 struct dd_context *dctx = (struct dd_context *)input;
999 struct dd_screen *dscreen = dd_screen(dctx->base.screen);
1000 struct pipe_screen *screen = dscreen->screen;
1001
1002 mtx_lock(&dctx->mutex);
1003
1004 for (;;) {
1005 struct list_head records;
1006 struct pipe_fence_handle *fence;
1007 struct pipe_fence_handle *fence2 = NULL;
1008
1009 list_replace(&dctx->records, &records);
1010 list_inithead(&dctx->records);
1011 dctx->num_records = 0;
1012
1013 if (dctx->api_stalled)
1014 cnd_signal(&dctx->cond);
1015
1016 if (!list_empty(&records)) {
1017 /* Wait for the youngest draw. This means hangs can take a bit longer
1018 * to detect, but it's more efficient this way. */
1019 struct dd_draw_record *youngest =
1020 LIST_ENTRY(struct dd_draw_record, records.prev, list);
1021 fence = youngest->bottom_of_pipe;
1022 } else if (dctx->record_pending) {
1023 /* Wait for pending fences, in case the driver ends up hanging internally. */
1024 fence = dctx->record_pending->prev_bottom_of_pipe;
1025 fence2 = dctx->record_pending->top_of_pipe;
1026 } else if (dctx->kill_thread) {
1027 break;
1028 } else {
1029 cnd_wait(&dctx->cond, &dctx->mutex);
1030 continue;
1031 }
1032 mtx_unlock(&dctx->mutex);
1033
1034 /* Fences can be NULL legitimately when timeout detection is disabled. */
1035 if ((fence &&
1036 !screen->fence_finish(screen, NULL, fence,
1037 dscreen->timeout_ms * 1000*1000)) ||
1038 (fence2 &&
1039 !screen->fence_finish(screen, NULL, fence2,
1040 dscreen->timeout_ms * 1000*1000))) {
1041 mtx_lock(&dctx->mutex);
1042 list_splice(&records, &dctx->records);
1043 dd_report_hang(dctx);
1044 /* we won't actually get here */
1045 mtx_unlock(&dctx->mutex);
1046 }
1047
1048 list_for_each_entry_safe(struct dd_draw_record, record, &records, list) {
1049 dd_maybe_dump_record(dscreen, record);
1050 list_del(&record->list);
1051 dd_free_record(screen, record);
1052 }
1053
1054 mtx_lock(&dctx->mutex);
1055 }
1056 mtx_unlock(&dctx->mutex);
1057 return 0;
1058 }
1059
1060 static struct dd_draw_record *
1061 dd_create_record(struct dd_context *dctx)
1062 {
1063 struct dd_draw_record *record;
1064
1065 record = MALLOC_STRUCT(dd_draw_record);
1066 if (!record)
1067 return NULL;
1068
1069 record->dctx = dctx;
1070 record->draw_call = dctx->num_draw_calls;
1071
1072 record->prev_bottom_of_pipe = NULL;
1073 record->top_of_pipe = NULL;
1074 record->bottom_of_pipe = NULL;
1075 record->log_page = NULL;
1076 util_queue_fence_init(&record->driver_finished);
1077
1078 dd_init_copy_of_draw_state(&record->draw_state);
1079 dd_copy_draw_state(&record->draw_state.base, &dctx->draw_state);
1080
1081 return record;
1082 }
1083
1084 static void
1085 dd_context_flush(struct pipe_context *_pipe,
1086 struct pipe_fence_handle **fence, unsigned flags)
1087 {
1088 struct dd_context *dctx = dd_context(_pipe);
1089 struct pipe_context *pipe = dctx->pipe;
1090
1091 pipe->flush(pipe, fence, flags);
1092 }
1093
1094 static void
1095 dd_before_draw(struct dd_context *dctx, struct dd_draw_record *record)
1096 {
1097 struct dd_screen *dscreen = dd_screen(dctx->base.screen);
1098 struct pipe_context *pipe = dctx->pipe;
1099 struct pipe_screen *screen = dscreen->screen;
1100
1101 record->time_before = os_time_get_nano();
1102
1103 if (dscreen->timeout_ms > 0) {
1104 if (dscreen->flush_always && dctx->num_draw_calls >= dscreen->skip_count) {
1105 pipe->flush(pipe, &record->prev_bottom_of_pipe, 0);
1106 screen->fence_reference(screen, &record->top_of_pipe, record->prev_bottom_of_pipe);
1107 } else {
1108 pipe->flush(pipe, &record->prev_bottom_of_pipe,
1109 PIPE_FLUSH_DEFERRED | PIPE_FLUSH_BOTTOM_OF_PIPE);
1110 pipe->flush(pipe, &record->top_of_pipe,
1111 PIPE_FLUSH_DEFERRED | PIPE_FLUSH_TOP_OF_PIPE);
1112 }
1113
1114 mtx_lock(&dctx->mutex);
1115 dctx->record_pending = record;
1116 if (list_empty(&dctx->records))
1117 cnd_signal(&dctx->cond);
1118 mtx_unlock(&dctx->mutex);
1119 }
1120 }
1121
1122 static void
1123 dd_after_draw_async(void *data)
1124 {
1125 struct dd_draw_record *record = (struct dd_draw_record *)data;
1126 struct dd_context *dctx = record->dctx;
1127 struct dd_screen *dscreen = dd_screen(dctx->base.screen);
1128
1129 record->log_page = u_log_new_page(&dctx->log);
1130 record->time_after = os_time_get_nano();
1131
1132 if (!util_queue_fence_is_signalled(&record->driver_finished))
1133 util_queue_fence_signal(&record->driver_finished);
1134
1135 if (dscreen->dump_mode == DD_DUMP_APITRACE_CALL &&
1136 dscreen->apitrace_dump_call > dctx->draw_state.apitrace_call_number) {
1137 dd_thread_join(dctx);
1138 /* No need to continue. */
1139 exit(0);
1140 }
1141 }
1142
1143 static void
1144 dd_after_draw(struct dd_context *dctx, struct dd_draw_record *record)
1145 {
1146 struct dd_screen *dscreen = dd_screen(dctx->base.screen);
1147 struct pipe_context *pipe = dctx->pipe;
1148
1149 if (dscreen->timeout_ms > 0) {
1150 unsigned flush_flags;
1151 if (dscreen->flush_always && dctx->num_draw_calls >= dscreen->skip_count)
1152 flush_flags = 0;
1153 else
1154 flush_flags = PIPE_FLUSH_DEFERRED | PIPE_FLUSH_BOTTOM_OF_PIPE;
1155 pipe->flush(pipe, &record->bottom_of_pipe, flush_flags);
1156
1157 assert(record == dctx->record_pending);
1158 }
1159
1160 if (pipe->callback) {
1161 util_queue_fence_reset(&record->driver_finished);
1162 pipe->callback(pipe, dd_after_draw_async, record, true);
1163 } else {
1164 dd_after_draw_async(record);
1165 }
1166
1167 mtx_lock(&dctx->mutex);
1168 if (unlikely(dctx->num_records > 10000)) {
1169 dctx->api_stalled = true;
1170 /* Since this is only a heuristic to prevent the API thread from getting
1171 * too far ahead, we don't need a loop here. */
1172 cnd_wait(&dctx->cond, &dctx->mutex);
1173 dctx->api_stalled = false;
1174 }
1175
1176 if (list_empty(&dctx->records))
1177 cnd_signal(&dctx->cond);
1178
1179 list_addtail(&record->list, &dctx->records);
1180 dctx->record_pending = NULL;
1181 dctx->num_records++;
1182 mtx_unlock(&dctx->mutex);
1183
1184 ++dctx->num_draw_calls;
1185 if (dscreen->skip_count && dctx->num_draw_calls % 10000 == 0)
1186 fprintf(stderr, "Gallium debugger reached %u draw calls.\n",
1187 dctx->num_draw_calls);
1188 }
1189
1190 static void
1191 dd_context_draw_vbo(struct pipe_context *_pipe,
1192 const struct pipe_draw_info *info)
1193 {
1194 struct dd_context *dctx = dd_context(_pipe);
1195 struct pipe_context *pipe = dctx->pipe;
1196 struct dd_draw_record *record = dd_create_record(dctx);
1197
1198 record->call.type = CALL_DRAW_VBO;
1199 record->call.info.draw_vbo.draw = *info;
1200 record->call.info.draw_vbo.draw.count_from_stream_output = NULL;
1201 pipe_so_target_reference(&record->call.info.draw_vbo.draw.count_from_stream_output,
1202 info->count_from_stream_output);
1203 if (info->index_size && !info->has_user_indices) {
1204 record->call.info.draw_vbo.draw.index.resource = NULL;
1205 pipe_resource_reference(&record->call.info.draw_vbo.draw.index.resource,
1206 info->index.resource);
1207 }
1208
1209 if (info->indirect) {
1210 record->call.info.draw_vbo.indirect = *info->indirect;
1211 record->call.info.draw_vbo.draw.indirect = &record->call.info.draw_vbo.indirect;
1212
1213 record->call.info.draw_vbo.indirect.buffer = NULL;
1214 pipe_resource_reference(&record->call.info.draw_vbo.indirect.buffer,
1215 info->indirect->buffer);
1216 record->call.info.draw_vbo.indirect.indirect_draw_count = NULL;
1217 pipe_resource_reference(&record->call.info.draw_vbo.indirect.indirect_draw_count,
1218 info->indirect->indirect_draw_count);
1219 } else {
1220 memset(&record->call.info.draw_vbo.indirect, 0, sizeof(*info->indirect));
1221 }
1222
1223 dd_before_draw(dctx, record);
1224 pipe->draw_vbo(pipe, info);
1225 dd_after_draw(dctx, record);
1226 }
1227
1228 static void
1229 dd_context_launch_grid(struct pipe_context *_pipe,
1230 const struct pipe_grid_info *info)
1231 {
1232 struct dd_context *dctx = dd_context(_pipe);
1233 struct pipe_context *pipe = dctx->pipe;
1234 struct dd_draw_record *record = dd_create_record(dctx);
1235
1236 record->call.type = CALL_LAUNCH_GRID;
1237 record->call.info.launch_grid = *info;
1238 record->call.info.launch_grid.indirect = NULL;
1239 pipe_resource_reference(&record->call.info.launch_grid.indirect, info->indirect);
1240
1241 dd_before_draw(dctx, record);
1242 pipe->launch_grid(pipe, info);
1243 dd_after_draw(dctx, record);
1244 }
1245
1246 static void
1247 dd_context_resource_copy_region(struct pipe_context *_pipe,
1248 struct pipe_resource *dst, unsigned dst_level,
1249 unsigned dstx, unsigned dsty, unsigned dstz,
1250 struct pipe_resource *src, unsigned src_level,
1251 const struct pipe_box *src_box)
1252 {
1253 struct dd_context *dctx = dd_context(_pipe);
1254 struct pipe_context *pipe = dctx->pipe;
1255 struct dd_draw_record *record = dd_create_record(dctx);
1256
1257 record->call.type = CALL_RESOURCE_COPY_REGION;
1258 record->call.info.resource_copy_region.dst = NULL;
1259 pipe_resource_reference(&record->call.info.resource_copy_region.dst, dst);
1260 record->call.info.resource_copy_region.dst_level = dst_level;
1261 record->call.info.resource_copy_region.dstx = dstx;
1262 record->call.info.resource_copy_region.dsty = dsty;
1263 record->call.info.resource_copy_region.dstz = dstz;
1264 record->call.info.resource_copy_region.src = NULL;
1265 pipe_resource_reference(&record->call.info.resource_copy_region.src, src);
1266 record->call.info.resource_copy_region.src_level = src_level;
1267 record->call.info.resource_copy_region.src_box = *src_box;
1268
1269 dd_before_draw(dctx, record);
1270 pipe->resource_copy_region(pipe,
1271 dst, dst_level, dstx, dsty, dstz,
1272 src, src_level, src_box);
1273 dd_after_draw(dctx, record);
1274 }
1275
1276 static void
1277 dd_context_blit(struct pipe_context *_pipe, const struct pipe_blit_info *info)
1278 {
1279 struct dd_context *dctx = dd_context(_pipe);
1280 struct pipe_context *pipe = dctx->pipe;
1281 struct dd_draw_record *record = dd_create_record(dctx);
1282
1283 record->call.type = CALL_BLIT;
1284 record->call.info.blit = *info;
1285 record->call.info.blit.dst.resource = NULL;
1286 pipe_resource_reference(&record->call.info.blit.dst.resource, info->dst.resource);
1287 record->call.info.blit.src.resource = NULL;
1288 pipe_resource_reference(&record->call.info.blit.src.resource, info->src.resource);
1289
1290 dd_before_draw(dctx, record);
1291 pipe->blit(pipe, info);
1292 dd_after_draw(dctx, record);
1293 }
1294
1295 static boolean
1296 dd_context_generate_mipmap(struct pipe_context *_pipe,
1297 struct pipe_resource *res,
1298 enum pipe_format format,
1299 unsigned base_level,
1300 unsigned last_level,
1301 unsigned first_layer,
1302 unsigned last_layer)
1303 {
1304 struct dd_context *dctx = dd_context(_pipe);
1305 struct pipe_context *pipe = dctx->pipe;
1306 struct dd_draw_record *record = dd_create_record(dctx);
1307 boolean result;
1308
1309 record->call.type = CALL_GENERATE_MIPMAP;
1310 record->call.info.generate_mipmap.res = NULL;
1311 pipe_resource_reference(&record->call.info.generate_mipmap.res, res);
1312 record->call.info.generate_mipmap.format = format;
1313 record->call.info.generate_mipmap.base_level = base_level;
1314 record->call.info.generate_mipmap.last_level = last_level;
1315 record->call.info.generate_mipmap.first_layer = first_layer;
1316 record->call.info.generate_mipmap.last_layer = last_layer;
1317
1318 dd_before_draw(dctx, record);
1319 result = pipe->generate_mipmap(pipe, res, format, base_level, last_level,
1320 first_layer, last_layer);
1321 dd_after_draw(dctx, record);
1322 return result;
1323 }
1324
1325 static void
1326 dd_context_get_query_result_resource(struct pipe_context *_pipe,
1327 struct pipe_query *query,
1328 boolean wait,
1329 enum pipe_query_value_type result_type,
1330 int index,
1331 struct pipe_resource *resource,
1332 unsigned offset)
1333 {
1334 struct dd_context *dctx = dd_context(_pipe);
1335 struct dd_query *dquery = dd_query(query);
1336 struct pipe_context *pipe = dctx->pipe;
1337 struct dd_draw_record *record = dd_create_record(dctx);
1338
1339 record->call.type = CALL_GET_QUERY_RESULT_RESOURCE;
1340 record->call.info.get_query_result_resource.query = query;
1341 record->call.info.get_query_result_resource.wait = wait;
1342 record->call.info.get_query_result_resource.result_type = result_type;
1343 record->call.info.get_query_result_resource.index = index;
1344 record->call.info.get_query_result_resource.resource = NULL;
1345 pipe_resource_reference(&record->call.info.get_query_result_resource.resource,
1346 resource);
1347 record->call.info.get_query_result_resource.offset = offset;
1348
1349 /* The query may be deleted by the time we need to print it. */
1350 record->call.info.get_query_result_resource.query_type = dquery->type;
1351
1352 dd_before_draw(dctx, record);
1353 pipe->get_query_result_resource(pipe, dquery->query, wait,
1354 result_type, index, resource, offset);
1355 dd_after_draw(dctx, record);
1356 }
1357
1358 static void
1359 dd_context_flush_resource(struct pipe_context *_pipe,
1360 struct pipe_resource *resource)
1361 {
1362 struct dd_context *dctx = dd_context(_pipe);
1363 struct pipe_context *pipe = dctx->pipe;
1364 struct dd_draw_record *record = dd_create_record(dctx);
1365
1366 record->call.type = CALL_FLUSH_RESOURCE;
1367 record->call.info.flush_resource = NULL;
1368 pipe_resource_reference(&record->call.info.flush_resource, resource);
1369
1370 dd_before_draw(dctx, record);
1371 pipe->flush_resource(pipe, resource);
1372 dd_after_draw(dctx, record);
1373 }
1374
1375 static void
1376 dd_context_clear(struct pipe_context *_pipe, unsigned buffers,
1377 const union pipe_color_union *color, double depth,
1378 unsigned stencil)
1379 {
1380 struct dd_context *dctx = dd_context(_pipe);
1381 struct pipe_context *pipe = dctx->pipe;
1382 struct dd_draw_record *record = dd_create_record(dctx);
1383
1384 record->call.type = CALL_CLEAR;
1385 record->call.info.clear.buffers = buffers;
1386 record->call.info.clear.color = *color;
1387 record->call.info.clear.depth = depth;
1388 record->call.info.clear.stencil = stencil;
1389
1390 dd_before_draw(dctx, record);
1391 pipe->clear(pipe, buffers, color, depth, stencil);
1392 dd_after_draw(dctx, record);
1393 }
1394
1395 static void
1396 dd_context_clear_render_target(struct pipe_context *_pipe,
1397 struct pipe_surface *dst,
1398 const union pipe_color_union *color,
1399 unsigned dstx, unsigned dsty,
1400 unsigned width, unsigned height,
1401 bool render_condition_enabled)
1402 {
1403 struct dd_context *dctx = dd_context(_pipe);
1404 struct pipe_context *pipe = dctx->pipe;
1405 struct dd_draw_record *record = dd_create_record(dctx);
1406
1407 record->call.type = CALL_CLEAR_RENDER_TARGET;
1408
1409 dd_before_draw(dctx, record);
1410 pipe->clear_render_target(pipe, dst, color, dstx, dsty, width, height,
1411 render_condition_enabled);
1412 dd_after_draw(dctx, record);
1413 }
1414
1415 static void
1416 dd_context_clear_depth_stencil(struct pipe_context *_pipe,
1417 struct pipe_surface *dst, unsigned clear_flags,
1418 double depth, unsigned stencil, unsigned dstx,
1419 unsigned dsty, unsigned width, unsigned height,
1420 bool render_condition_enabled)
1421 {
1422 struct dd_context *dctx = dd_context(_pipe);
1423 struct pipe_context *pipe = dctx->pipe;
1424 struct dd_draw_record *record = dd_create_record(dctx);
1425
1426 record->call.type = CALL_CLEAR_DEPTH_STENCIL;
1427
1428 dd_before_draw(dctx, record);
1429 pipe->clear_depth_stencil(pipe, dst, clear_flags, depth, stencil,
1430 dstx, dsty, width, height,
1431 render_condition_enabled);
1432 dd_after_draw(dctx, record);
1433 }
1434
1435 static void
1436 dd_context_clear_buffer(struct pipe_context *_pipe, struct pipe_resource *res,
1437 unsigned offset, unsigned size,
1438 const void *clear_value, int clear_value_size)
1439 {
1440 struct dd_context *dctx = dd_context(_pipe);
1441 struct pipe_context *pipe = dctx->pipe;
1442 struct dd_draw_record *record = dd_create_record(dctx);
1443
1444 record->call.type = CALL_CLEAR_BUFFER;
1445 record->call.info.clear_buffer.res = NULL;
1446 pipe_resource_reference(&record->call.info.clear_buffer.res, res);
1447 record->call.info.clear_buffer.offset = offset;
1448 record->call.info.clear_buffer.size = size;
1449 record->call.info.clear_buffer.clear_value = clear_value;
1450 record->call.info.clear_buffer.clear_value_size = clear_value_size;
1451
1452 dd_before_draw(dctx, record);
1453 pipe->clear_buffer(pipe, res, offset, size, clear_value, clear_value_size);
1454 dd_after_draw(dctx, record);
1455 }
1456
1457 static void
1458 dd_context_clear_texture(struct pipe_context *_pipe,
1459 struct pipe_resource *res,
1460 unsigned level,
1461 const struct pipe_box *box,
1462 const void *data)
1463 {
1464 struct dd_context *dctx = dd_context(_pipe);
1465 struct pipe_context *pipe = dctx->pipe;
1466 struct dd_draw_record *record = dd_create_record(dctx);
1467
1468 record->call.type = CALL_CLEAR_TEXTURE;
1469
1470 dd_before_draw(dctx, record);
1471 pipe->clear_texture(pipe, res, level, box, data);
1472 dd_after_draw(dctx, record);
1473 }
1474
1475 /********************************************************************
1476 * transfer
1477 */
1478
1479 static void *
1480 dd_context_transfer_map(struct pipe_context *_pipe,
1481 struct pipe_resource *resource, unsigned level,
1482 unsigned usage, const struct pipe_box *box,
1483 struct pipe_transfer **transfer)
1484 {
1485 struct dd_context *dctx = dd_context(_pipe);
1486 struct pipe_context *pipe = dctx->pipe;
1487 struct dd_draw_record *record =
1488 dd_screen(dctx->base.screen)->transfers ? dd_create_record(dctx) : NULL;
1489
1490 if (record) {
1491 record->call.type = CALL_TRANSFER_MAP;
1492
1493 dd_before_draw(dctx, record);
1494 }
1495 void *ptr = pipe->transfer_map(pipe, resource, level, usage, box, transfer);
1496 if (record) {
1497 record->call.info.transfer_map.transfer_ptr = *transfer;
1498 record->call.info.transfer_map.ptr = ptr;
1499 if (*transfer) {
1500 record->call.info.transfer_map.transfer = **transfer;
1501 record->call.info.transfer_map.transfer.resource = NULL;
1502 pipe_resource_reference(&record->call.info.transfer_map.transfer.resource,
1503 (*transfer)->resource);
1504 } else {
1505 memset(&record->call.info.transfer_map.transfer, 0, sizeof(struct pipe_transfer));
1506 }
1507
1508 dd_after_draw(dctx, record);
1509 }
1510 return ptr;
1511 }
1512
1513 static void
1514 dd_context_transfer_flush_region(struct pipe_context *_pipe,
1515 struct pipe_transfer *transfer,
1516 const struct pipe_box *box)
1517 {
1518 struct dd_context *dctx = dd_context(_pipe);
1519 struct pipe_context *pipe = dctx->pipe;
1520 struct dd_draw_record *record =
1521 dd_screen(dctx->base.screen)->transfers ? dd_create_record(dctx) : NULL;
1522
1523 if (record) {
1524 record->call.type = CALL_TRANSFER_FLUSH_REGION;
1525 record->call.info.transfer_flush_region.transfer_ptr = transfer;
1526 record->call.info.transfer_flush_region.box = *box;
1527 record->call.info.transfer_flush_region.transfer = *transfer;
1528 record->call.info.transfer_flush_region.transfer.resource = NULL;
1529 pipe_resource_reference(
1530 &record->call.info.transfer_flush_region.transfer.resource,
1531 transfer->resource);
1532
1533 dd_before_draw(dctx, record);
1534 }
1535 pipe->transfer_flush_region(pipe, transfer, box);
1536 if (record)
1537 dd_after_draw(dctx, record);
1538 }
1539
1540 static void
1541 dd_context_transfer_unmap(struct pipe_context *_pipe,
1542 struct pipe_transfer *transfer)
1543 {
1544 struct dd_context *dctx = dd_context(_pipe);
1545 struct pipe_context *pipe = dctx->pipe;
1546 struct dd_draw_record *record =
1547 dd_screen(dctx->base.screen)->transfers ? dd_create_record(dctx) : NULL;
1548
1549 if (record) {
1550 record->call.type = CALL_TRANSFER_UNMAP;
1551 record->call.info.transfer_unmap.transfer_ptr = transfer;
1552 record->call.info.transfer_unmap.transfer = *transfer;
1553 record->call.info.transfer_unmap.transfer.resource = NULL;
1554 pipe_resource_reference(
1555 &record->call.info.transfer_unmap.transfer.resource,
1556 transfer->resource);
1557
1558 dd_before_draw(dctx, record);
1559 }
1560 pipe->transfer_unmap(pipe, transfer);
1561 if (record)
1562 dd_after_draw(dctx, record);
1563 }
1564
1565 static void
1566 dd_context_buffer_subdata(struct pipe_context *_pipe,
1567 struct pipe_resource *resource,
1568 unsigned usage, unsigned offset,
1569 unsigned size, const void *data)
1570 {
1571 struct dd_context *dctx = dd_context(_pipe);
1572 struct pipe_context *pipe = dctx->pipe;
1573 struct dd_draw_record *record =
1574 dd_screen(dctx->base.screen)->transfers ? dd_create_record(dctx) : NULL;
1575
1576 if (record) {
1577 record->call.type = CALL_BUFFER_SUBDATA;
1578 record->call.info.buffer_subdata.resource = NULL;
1579 pipe_resource_reference(&record->call.info.buffer_subdata.resource, resource);
1580 record->call.info.buffer_subdata.usage = usage;
1581 record->call.info.buffer_subdata.offset = offset;
1582 record->call.info.buffer_subdata.size = size;
1583 record->call.info.buffer_subdata.data = data;
1584
1585 dd_before_draw(dctx, record);
1586 }
1587 pipe->buffer_subdata(pipe, resource, usage, offset, size, data);
1588 if (record)
1589 dd_after_draw(dctx, record);
1590 }
1591
1592 static void
1593 dd_context_texture_subdata(struct pipe_context *_pipe,
1594 struct pipe_resource *resource,
1595 unsigned level, unsigned usage,
1596 const struct pipe_box *box,
1597 const void *data, unsigned stride,
1598 unsigned layer_stride)
1599 {
1600 struct dd_context *dctx = dd_context(_pipe);
1601 struct pipe_context *pipe = dctx->pipe;
1602 struct dd_draw_record *record =
1603 dd_screen(dctx->base.screen)->transfers ? dd_create_record(dctx) : NULL;
1604
1605 if (record) {
1606 record->call.type = CALL_TEXTURE_SUBDATA;
1607 record->call.info.texture_subdata.resource = NULL;
1608 pipe_resource_reference(&record->call.info.texture_subdata.resource, resource);
1609 record->call.info.texture_subdata.level = level;
1610 record->call.info.texture_subdata.usage = usage;
1611 record->call.info.texture_subdata.box = *box;
1612 record->call.info.texture_subdata.data = data;
1613 record->call.info.texture_subdata.stride = stride;
1614 record->call.info.texture_subdata.layer_stride = layer_stride;
1615
1616 dd_before_draw(dctx, record);
1617 }
1618 pipe->texture_subdata(pipe, resource, level, usage, box, data,
1619 stride, layer_stride);
1620 if (record)
1621 dd_after_draw(dctx, record);
1622 }
1623
1624 void
1625 dd_init_draw_functions(struct dd_context *dctx)
1626 {
1627 CTX_INIT(flush);
1628 CTX_INIT(draw_vbo);
1629 CTX_INIT(launch_grid);
1630 CTX_INIT(resource_copy_region);
1631 CTX_INIT(blit);
1632 CTX_INIT(clear);
1633 CTX_INIT(clear_render_target);
1634 CTX_INIT(clear_depth_stencil);
1635 CTX_INIT(clear_buffer);
1636 CTX_INIT(clear_texture);
1637 CTX_INIT(flush_resource);
1638 CTX_INIT(generate_mipmap);
1639 CTX_INIT(get_query_result_resource);
1640 CTX_INIT(transfer_map);
1641 CTX_INIT(transfer_flush_region);
1642 CTX_INIT(transfer_unmap);
1643 CTX_INIT(buffer_subdata);
1644 CTX_INIT(texture_subdata);
1645 }