iris: Don't enable smooth points when point sprites are enabled
[mesa.git] / src / gallium / auxiliary / driver_ddebug / dd_draw.c
1 /**************************************************************************
2 *
3 * Copyright 2015 Advanced Micro Devices, Inc.
4 * Copyright 2008 VMware, Inc.
5 * All Rights Reserved.
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the "Software"),
9 * to deal in the Software without restriction, including without limitation
10 * on the rights to use, copy, modify, merge, publish, distribute, sub
11 * license, and/or sell copies of the Software, and to permit persons to whom
12 * the Software is furnished to do so, subject to the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the next
15 * paragraph) shall be included in all copies or substantial portions of the
16 * Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28 #include "dd_pipe.h"
29
30 #include "util/u_dump.h"
31 #include "util/u_format.h"
32 #include "util/u_framebuffer.h"
33 #include "util/u_helpers.h"
34 #include "util/u_inlines.h"
35 #include "util/u_memory.h"
36 #include "tgsi/tgsi_parse.h"
37 #include "tgsi/tgsi_scan.h"
38 #include "util/os_time.h"
39 #include <inttypes.h>
40 #include "pipe/p_config.h"
41
42
43 static void
44 dd_write_header(FILE *f, struct pipe_screen *screen, unsigned apitrace_call_number)
45 {
46 char cmd_line[4096];
47 if (os_get_command_line(cmd_line, sizeof(cmd_line)))
48 fprintf(f, "Command: %s\n", cmd_line);
49 fprintf(f, "Driver vendor: %s\n", screen->get_vendor(screen));
50 fprintf(f, "Device vendor: %s\n", screen->get_device_vendor(screen));
51 fprintf(f, "Device name: %s\n\n", screen->get_name(screen));
52
53 if (apitrace_call_number)
54 fprintf(f, "Last apitrace call: %u\n\n", apitrace_call_number);
55 }
56
57 FILE *
58 dd_get_file_stream(struct dd_screen *dscreen, unsigned apitrace_call_number)
59 {
60 struct pipe_screen *screen = dscreen->screen;
61
62 FILE *f = dd_get_debug_file(dscreen->verbose);
63 if (!f)
64 return NULL;
65
66 dd_write_header(f, screen, apitrace_call_number);
67 return f;
68 }
69
70 static void
71 dd_dump_dmesg(FILE *f)
72 {
73 #ifdef PIPE_OS_LINUX
74 char line[2000];
75 FILE *p = popen("dmesg | tail -n60", "r");
76
77 if (!p)
78 return;
79
80 fprintf(f, "\nLast 60 lines of dmesg:\n\n");
81 while (fgets(line, sizeof(line), p))
82 fputs(line, f);
83
84 pclose(p);
85 #endif
86 }
87
88 static unsigned
89 dd_num_active_viewports(struct dd_draw_state *dstate)
90 {
91 struct tgsi_shader_info info;
92 const struct tgsi_token *tokens;
93
94 if (dstate->shaders[PIPE_SHADER_GEOMETRY])
95 tokens = dstate->shaders[PIPE_SHADER_GEOMETRY]->state.shader.tokens;
96 else if (dstate->shaders[PIPE_SHADER_TESS_EVAL])
97 tokens = dstate->shaders[PIPE_SHADER_TESS_EVAL]->state.shader.tokens;
98 else if (dstate->shaders[PIPE_SHADER_VERTEX])
99 tokens = dstate->shaders[PIPE_SHADER_VERTEX]->state.shader.tokens;
100 else
101 return 1;
102
103 if (tokens) {
104 tgsi_scan_shader(tokens, &info);
105 if (info.writes_viewport_index)
106 return PIPE_MAX_VIEWPORTS;
107 }
108
109 return 1;
110 }
111
112 #define COLOR_RESET "\033[0m"
113 #define COLOR_SHADER "\033[1;32m"
114 #define COLOR_STATE "\033[1;33m"
115
116 #define DUMP(name, var) do { \
117 fprintf(f, COLOR_STATE #name ": " COLOR_RESET); \
118 util_dump_##name(f, var); \
119 fprintf(f, "\n"); \
120 } while(0)
121
122 #define DUMP_I(name, var, i) do { \
123 fprintf(f, COLOR_STATE #name " %i: " COLOR_RESET, i); \
124 util_dump_##name(f, var); \
125 fprintf(f, "\n"); \
126 } while(0)
127
128 #define DUMP_M(name, var, member) do { \
129 fprintf(f, " " #member ": "); \
130 util_dump_##name(f, (var)->member); \
131 fprintf(f, "\n"); \
132 } while(0)
133
134 #define DUMP_M_ADDR(name, var, member) do { \
135 fprintf(f, " " #member ": "); \
136 util_dump_##name(f, &(var)->member); \
137 fprintf(f, "\n"); \
138 } while(0)
139
140 #define PRINT_NAMED(type, name, value) \
141 do { \
142 fprintf(f, COLOR_STATE "%s" COLOR_RESET " = ", name); \
143 util_dump_##type(f, value); \
144 fprintf(f, "\n"); \
145 } while (0)
146
147 static void
148 util_dump_uint(FILE *f, unsigned i)
149 {
150 fprintf(f, "%u", i);
151 }
152
153 static void
154 util_dump_int(FILE *f, int i)
155 {
156 fprintf(f, "%d", i);
157 }
158
159 static void
160 util_dump_hex(FILE *f, unsigned i)
161 {
162 fprintf(f, "0x%x", i);
163 }
164
165 static void
166 util_dump_double(FILE *f, double d)
167 {
168 fprintf(f, "%f", d);
169 }
170
171 static void
172 util_dump_format(FILE *f, enum pipe_format format)
173 {
174 fprintf(f, "%s", util_format_name(format));
175 }
176
177 static void
178 util_dump_color_union(FILE *f, const union pipe_color_union *color)
179 {
180 fprintf(f, "{f = {%f, %f, %f, %f}, ui = {%u, %u, %u, %u}",
181 color->f[0], color->f[1], color->f[2], color->f[3],
182 color->ui[0], color->ui[1], color->ui[2], color->ui[3]);
183 }
184
185 static void
186 dd_dump_render_condition(struct dd_draw_state *dstate, FILE *f)
187 {
188 if (dstate->render_cond.query) {
189 fprintf(f, "render condition:\n");
190 DUMP_M(query_type, &dstate->render_cond, query->type);
191 DUMP_M(uint, &dstate->render_cond, condition);
192 DUMP_M(uint, &dstate->render_cond, mode);
193 fprintf(f, "\n");
194 }
195 }
196
197 static void
198 dd_dump_shader(struct dd_draw_state *dstate, enum pipe_shader_type sh, FILE *f)
199 {
200 int i;
201 const char *shader_str[PIPE_SHADER_TYPES];
202
203 shader_str[PIPE_SHADER_VERTEX] = "VERTEX";
204 shader_str[PIPE_SHADER_TESS_CTRL] = "TESS_CTRL";
205 shader_str[PIPE_SHADER_TESS_EVAL] = "TESS_EVAL";
206 shader_str[PIPE_SHADER_GEOMETRY] = "GEOMETRY";
207 shader_str[PIPE_SHADER_FRAGMENT] = "FRAGMENT";
208 shader_str[PIPE_SHADER_COMPUTE] = "COMPUTE";
209
210 if (sh == PIPE_SHADER_TESS_CTRL &&
211 !dstate->shaders[PIPE_SHADER_TESS_CTRL] &&
212 dstate->shaders[PIPE_SHADER_TESS_EVAL])
213 fprintf(f, "tess_state: {default_outer_level = {%f, %f, %f, %f}, "
214 "default_inner_level = {%f, %f}}\n",
215 dstate->tess_default_levels[0],
216 dstate->tess_default_levels[1],
217 dstate->tess_default_levels[2],
218 dstate->tess_default_levels[3],
219 dstate->tess_default_levels[4],
220 dstate->tess_default_levels[5]);
221
222 if (sh == PIPE_SHADER_FRAGMENT)
223 if (dstate->rs) {
224 unsigned num_viewports = dd_num_active_viewports(dstate);
225
226 if (dstate->rs->state.rs.clip_plane_enable)
227 DUMP(clip_state, &dstate->clip_state);
228
229 for (i = 0; i < num_viewports; i++)
230 DUMP_I(viewport_state, &dstate->viewports[i], i);
231
232 if (dstate->rs->state.rs.scissor)
233 for (i = 0; i < num_viewports; i++)
234 DUMP_I(scissor_state, &dstate->scissors[i], i);
235
236 DUMP(rasterizer_state, &dstate->rs->state.rs);
237
238 if (dstate->rs->state.rs.poly_stipple_enable)
239 DUMP(poly_stipple, &dstate->polygon_stipple);
240 fprintf(f, "\n");
241 }
242
243 if (!dstate->shaders[sh])
244 return;
245
246 fprintf(f, COLOR_SHADER "begin shader: %s" COLOR_RESET "\n", shader_str[sh]);
247 DUMP(shader_state, &dstate->shaders[sh]->state.shader);
248
249 for (i = 0; i < PIPE_MAX_CONSTANT_BUFFERS; i++)
250 if (dstate->constant_buffers[sh][i].buffer ||
251 dstate->constant_buffers[sh][i].user_buffer) {
252 DUMP_I(constant_buffer, &dstate->constant_buffers[sh][i], i);
253 if (dstate->constant_buffers[sh][i].buffer)
254 DUMP_M(resource, &dstate->constant_buffers[sh][i], buffer);
255 }
256
257 for (i = 0; i < PIPE_MAX_SAMPLERS; i++)
258 if (dstate->sampler_states[sh][i])
259 DUMP_I(sampler_state, &dstate->sampler_states[sh][i]->state.sampler, i);
260
261 for (i = 0; i < PIPE_MAX_SAMPLERS; i++)
262 if (dstate->sampler_views[sh][i]) {
263 DUMP_I(sampler_view, dstate->sampler_views[sh][i], i);
264 DUMP_M(resource, dstate->sampler_views[sh][i], texture);
265 }
266
267 for (i = 0; i < PIPE_MAX_SHADER_IMAGES; i++)
268 if (dstate->shader_images[sh][i].resource) {
269 DUMP_I(image_view, &dstate->shader_images[sh][i], i);
270 if (dstate->shader_images[sh][i].resource)
271 DUMP_M(resource, &dstate->shader_images[sh][i], resource);
272 }
273
274 for (i = 0; i < PIPE_MAX_SHADER_BUFFERS; i++)
275 if (dstate->shader_buffers[sh][i].buffer) {
276 DUMP_I(shader_buffer, &dstate->shader_buffers[sh][i], i);
277 if (dstate->shader_buffers[sh][i].buffer)
278 DUMP_M(resource, &dstate->shader_buffers[sh][i], buffer);
279 }
280
281 fprintf(f, COLOR_SHADER "end shader: %s" COLOR_RESET "\n\n", shader_str[sh]);
282 }
283
284 static void
285 dd_dump_draw_vbo(struct dd_draw_state *dstate, struct pipe_draw_info *info, FILE *f)
286 {
287 int sh, i;
288
289 DUMP(draw_info, info);
290 if (info->count_from_stream_output)
291 DUMP_M(stream_output_target, info,
292 count_from_stream_output);
293 if (info->indirect) {
294 DUMP_M(resource, info, indirect->buffer);
295 if (info->indirect->indirect_draw_count)
296 DUMP_M(resource, info, indirect->indirect_draw_count);
297 }
298
299 fprintf(f, "\n");
300
301 /* TODO: dump active queries */
302
303 dd_dump_render_condition(dstate, f);
304
305 for (i = 0; i < PIPE_MAX_ATTRIBS; i++)
306 if (dstate->vertex_buffers[i].buffer.resource) {
307 DUMP_I(vertex_buffer, &dstate->vertex_buffers[i], i);
308 if (!dstate->vertex_buffers[i].is_user_buffer)
309 DUMP_M(resource, &dstate->vertex_buffers[i], buffer.resource);
310 }
311
312 if (dstate->velems) {
313 PRINT_NAMED(uint, "num vertex elements",
314 dstate->velems->state.velems.count);
315 for (i = 0; i < dstate->velems->state.velems.count; i++) {
316 fprintf(f, " ");
317 DUMP_I(vertex_element, &dstate->velems->state.velems.velems[i], i);
318 }
319 }
320
321 PRINT_NAMED(uint, "num stream output targets", dstate->num_so_targets);
322 for (i = 0; i < dstate->num_so_targets; i++)
323 if (dstate->so_targets[i]) {
324 DUMP_I(stream_output_target, dstate->so_targets[i], i);
325 DUMP_M(resource, dstate->so_targets[i], buffer);
326 fprintf(f, " offset = %i\n", dstate->so_offsets[i]);
327 }
328
329 fprintf(f, "\n");
330 for (sh = 0; sh < PIPE_SHADER_TYPES; sh++) {
331 if (sh == PIPE_SHADER_COMPUTE)
332 continue;
333
334 dd_dump_shader(dstate, sh, f);
335 }
336
337 if (dstate->dsa)
338 DUMP(depth_stencil_alpha_state, &dstate->dsa->state.dsa);
339 DUMP(stencil_ref, &dstate->stencil_ref);
340
341 if (dstate->blend)
342 DUMP(blend_state, &dstate->blend->state.blend);
343 DUMP(blend_color, &dstate->blend_color);
344
345 PRINT_NAMED(uint, "min_samples", dstate->min_samples);
346 PRINT_NAMED(hex, "sample_mask", dstate->sample_mask);
347 fprintf(f, "\n");
348
349 DUMP(framebuffer_state, &dstate->framebuffer_state);
350 for (i = 0; i < dstate->framebuffer_state.nr_cbufs; i++)
351 if (dstate->framebuffer_state.cbufs[i]) {
352 fprintf(f, " " COLOR_STATE "cbufs[%i]:" COLOR_RESET "\n ", i);
353 DUMP(surface, dstate->framebuffer_state.cbufs[i]);
354 fprintf(f, " ");
355 DUMP(resource, dstate->framebuffer_state.cbufs[i]->texture);
356 }
357 if (dstate->framebuffer_state.zsbuf) {
358 fprintf(f, " " COLOR_STATE "zsbuf:" COLOR_RESET "\n ");
359 DUMP(surface, dstate->framebuffer_state.zsbuf);
360 fprintf(f, " ");
361 DUMP(resource, dstate->framebuffer_state.zsbuf->texture);
362 }
363 fprintf(f, "\n");
364 }
365
366 static void
367 dd_dump_launch_grid(struct dd_draw_state *dstate, struct pipe_grid_info *info, FILE *f)
368 {
369 fprintf(f, "%s:\n", __func__+8);
370 DUMP(grid_info, info);
371 fprintf(f, "\n");
372
373 dd_dump_shader(dstate, PIPE_SHADER_COMPUTE, f);
374 fprintf(f, "\n");
375 }
376
377 static void
378 dd_dump_resource_copy_region(struct dd_draw_state *dstate,
379 struct call_resource_copy_region *info,
380 FILE *f)
381 {
382 fprintf(f, "%s:\n", __func__+8);
383 DUMP_M(resource, info, dst);
384 DUMP_M(uint, info, dst_level);
385 DUMP_M(uint, info, dstx);
386 DUMP_M(uint, info, dsty);
387 DUMP_M(uint, info, dstz);
388 DUMP_M(resource, info, src);
389 DUMP_M(uint, info, src_level);
390 DUMP_M_ADDR(box, info, src_box);
391 }
392
393 static void
394 dd_dump_blit(struct dd_draw_state *dstate, struct pipe_blit_info *info, FILE *f)
395 {
396 fprintf(f, "%s:\n", __func__+8);
397 DUMP_M(resource, info, dst.resource);
398 DUMP_M(uint, info, dst.level);
399 DUMP_M_ADDR(box, info, dst.box);
400 DUMP_M(format, info, dst.format);
401
402 DUMP_M(resource, info, src.resource);
403 DUMP_M(uint, info, src.level);
404 DUMP_M_ADDR(box, info, src.box);
405 DUMP_M(format, info, src.format);
406
407 DUMP_M(hex, info, mask);
408 DUMP_M(uint, info, filter);
409 DUMP_M(uint, info, scissor_enable);
410 DUMP_M_ADDR(scissor_state, info, scissor);
411 DUMP_M(uint, info, render_condition_enable);
412
413 if (info->render_condition_enable)
414 dd_dump_render_condition(dstate, f);
415 }
416
417 static void
418 dd_dump_generate_mipmap(struct dd_draw_state *dstate, FILE *f)
419 {
420 fprintf(f, "%s:\n", __func__+8);
421 /* TODO */
422 }
423
424 static void
425 dd_dump_get_query_result_resource(struct call_get_query_result_resource *info, FILE *f)
426 {
427 fprintf(f, "%s:\n", __func__ + 8);
428 DUMP_M(query_type, info, query_type);
429 DUMP_M(uint, info, wait);
430 DUMP_M(query_value_type, info, result_type);
431 DUMP_M(int, info, index);
432 DUMP_M(resource, info, resource);
433 DUMP_M(uint, info, offset);
434 }
435
436 static void
437 dd_dump_flush_resource(struct dd_draw_state *dstate, struct pipe_resource *res,
438 FILE *f)
439 {
440 fprintf(f, "%s:\n", __func__+8);
441 DUMP(resource, res);
442 }
443
444 static void
445 dd_dump_clear(struct dd_draw_state *dstate, struct call_clear *info, FILE *f)
446 {
447 fprintf(f, "%s:\n", __func__+8);
448 DUMP_M(uint, info, buffers);
449 DUMP_M_ADDR(color_union, info, color);
450 DUMP_M(double, info, depth);
451 DUMP_M(hex, info, stencil);
452 }
453
454 static void
455 dd_dump_clear_buffer(struct dd_draw_state *dstate, struct call_clear_buffer *info,
456 FILE *f)
457 {
458 int i;
459 const char *value = (const char*)info->clear_value;
460
461 fprintf(f, "%s:\n", __func__+8);
462 DUMP_M(resource, info, res);
463 DUMP_M(uint, info, offset);
464 DUMP_M(uint, info, size);
465 DUMP_M(uint, info, clear_value_size);
466
467 fprintf(f, " clear_value:");
468 for (i = 0; i < info->clear_value_size; i++)
469 fprintf(f, " %02x", value[i]);
470 fprintf(f, "\n");
471 }
472
473 static void
474 dd_dump_transfer_map(struct call_transfer_map *info, FILE *f)
475 {
476 fprintf(f, "%s:\n", __func__+8);
477 DUMP_M_ADDR(transfer, info, transfer);
478 DUMP_M(ptr, info, transfer_ptr);
479 DUMP_M(ptr, info, ptr);
480 }
481
482 static void
483 dd_dump_transfer_flush_region(struct call_transfer_flush_region *info, FILE *f)
484 {
485 fprintf(f, "%s:\n", __func__+8);
486 DUMP_M_ADDR(transfer, info, transfer);
487 DUMP_M(ptr, info, transfer_ptr);
488 DUMP_M_ADDR(box, info, box);
489 }
490
491 static void
492 dd_dump_transfer_unmap(struct call_transfer_unmap *info, FILE *f)
493 {
494 fprintf(f, "%s:\n", __func__+8);
495 DUMP_M_ADDR(transfer, info, transfer);
496 DUMP_M(ptr, info, transfer_ptr);
497 }
498
499 static void
500 dd_dump_buffer_subdata(struct call_buffer_subdata *info, FILE *f)
501 {
502 fprintf(f, "%s:\n", __func__+8);
503 DUMP_M(resource, info, resource);
504 DUMP_M(transfer_usage, info, usage);
505 DUMP_M(uint, info, offset);
506 DUMP_M(uint, info, size);
507 DUMP_M(ptr, info, data);
508 }
509
510 static void
511 dd_dump_texture_subdata(struct call_texture_subdata *info, FILE *f)
512 {
513 fprintf(f, "%s:\n", __func__+8);
514 DUMP_M(resource, info, resource);
515 DUMP_M(uint, info, level);
516 DUMP_M(transfer_usage, info, usage);
517 DUMP_M_ADDR(box, info, box);
518 DUMP_M(ptr, info, data);
519 DUMP_M(uint, info, stride);
520 DUMP_M(uint, info, layer_stride);
521 }
522
523 static void
524 dd_dump_clear_texture(struct dd_draw_state *dstate, FILE *f)
525 {
526 fprintf(f, "%s:\n", __func__+8);
527 /* TODO */
528 }
529
530 static void
531 dd_dump_clear_render_target(struct dd_draw_state *dstate, FILE *f)
532 {
533 fprintf(f, "%s:\n", __func__+8);
534 /* TODO */
535 }
536
537 static void
538 dd_dump_clear_depth_stencil(struct dd_draw_state *dstate, FILE *f)
539 {
540 fprintf(f, "%s:\n", __func__+8);
541 /* TODO */
542 }
543
544 static void
545 dd_dump_driver_state(struct dd_context *dctx, FILE *f, unsigned flags)
546 {
547 if (dctx->pipe->dump_debug_state) {
548 fprintf(f,"\n\n**************************************************"
549 "***************************\n");
550 fprintf(f, "Driver-specific state:\n\n");
551 dctx->pipe->dump_debug_state(dctx->pipe, f, flags);
552 }
553 }
554
555 static void
556 dd_dump_call(FILE *f, struct dd_draw_state *state, struct dd_call *call)
557 {
558 switch (call->type) {
559 case CALL_DRAW_VBO:
560 dd_dump_draw_vbo(state, &call->info.draw_vbo.draw, f);
561 break;
562 case CALL_LAUNCH_GRID:
563 dd_dump_launch_grid(state, &call->info.launch_grid, f);
564 break;
565 case CALL_RESOURCE_COPY_REGION:
566 dd_dump_resource_copy_region(state,
567 &call->info.resource_copy_region, f);
568 break;
569 case CALL_BLIT:
570 dd_dump_blit(state, &call->info.blit, f);
571 break;
572 case CALL_FLUSH_RESOURCE:
573 dd_dump_flush_resource(state, call->info.flush_resource, f);
574 break;
575 case CALL_CLEAR:
576 dd_dump_clear(state, &call->info.clear, f);
577 break;
578 case CALL_CLEAR_BUFFER:
579 dd_dump_clear_buffer(state, &call->info.clear_buffer, f);
580 break;
581 case CALL_CLEAR_TEXTURE:
582 dd_dump_clear_texture(state, f);
583 break;
584 case CALL_CLEAR_RENDER_TARGET:
585 dd_dump_clear_render_target(state, f);
586 break;
587 case CALL_CLEAR_DEPTH_STENCIL:
588 dd_dump_clear_depth_stencil(state, f);
589 break;
590 case CALL_GENERATE_MIPMAP:
591 dd_dump_generate_mipmap(state, f);
592 break;
593 case CALL_GET_QUERY_RESULT_RESOURCE:
594 dd_dump_get_query_result_resource(&call->info.get_query_result_resource, f);
595 break;
596 case CALL_TRANSFER_MAP:
597 dd_dump_transfer_map(&call->info.transfer_map, f);
598 break;
599 case CALL_TRANSFER_FLUSH_REGION:
600 dd_dump_transfer_flush_region(&call->info.transfer_flush_region, f);
601 break;
602 case CALL_TRANSFER_UNMAP:
603 dd_dump_transfer_unmap(&call->info.transfer_unmap, f);
604 break;
605 case CALL_BUFFER_SUBDATA:
606 dd_dump_buffer_subdata(&call->info.buffer_subdata, f);
607 break;
608 case CALL_TEXTURE_SUBDATA:
609 dd_dump_texture_subdata(&call->info.texture_subdata, f);
610 break;
611 }
612 }
613
614 static void
615 dd_kill_process(void)
616 {
617 #ifdef PIPE_OS_UNIX
618 sync();
619 #endif
620 fprintf(stderr, "dd: Aborting the process...\n");
621 fflush(stdout);
622 fflush(stderr);
623 exit(1);
624 }
625
626 static void
627 dd_unreference_copy_of_call(struct dd_call *dst)
628 {
629 switch (dst->type) {
630 case CALL_DRAW_VBO:
631 pipe_so_target_reference(&dst->info.draw_vbo.draw.count_from_stream_output, NULL);
632 pipe_resource_reference(&dst->info.draw_vbo.indirect.buffer, NULL);
633 pipe_resource_reference(&dst->info.draw_vbo.indirect.indirect_draw_count, NULL);
634 if (dst->info.draw_vbo.draw.index_size &&
635 !dst->info.draw_vbo.draw.has_user_indices)
636 pipe_resource_reference(&dst->info.draw_vbo.draw.index.resource, NULL);
637 else
638 dst->info.draw_vbo.draw.index.user = NULL;
639 break;
640 case CALL_LAUNCH_GRID:
641 pipe_resource_reference(&dst->info.launch_grid.indirect, NULL);
642 break;
643 case CALL_RESOURCE_COPY_REGION:
644 pipe_resource_reference(&dst->info.resource_copy_region.dst, NULL);
645 pipe_resource_reference(&dst->info.resource_copy_region.src, NULL);
646 break;
647 case CALL_BLIT:
648 pipe_resource_reference(&dst->info.blit.dst.resource, NULL);
649 pipe_resource_reference(&dst->info.blit.src.resource, NULL);
650 break;
651 case CALL_FLUSH_RESOURCE:
652 pipe_resource_reference(&dst->info.flush_resource, NULL);
653 break;
654 case CALL_CLEAR:
655 break;
656 case CALL_CLEAR_BUFFER:
657 pipe_resource_reference(&dst->info.clear_buffer.res, NULL);
658 break;
659 case CALL_CLEAR_TEXTURE:
660 break;
661 case CALL_CLEAR_RENDER_TARGET:
662 break;
663 case CALL_CLEAR_DEPTH_STENCIL:
664 break;
665 case CALL_GENERATE_MIPMAP:
666 pipe_resource_reference(&dst->info.generate_mipmap.res, NULL);
667 break;
668 case CALL_GET_QUERY_RESULT_RESOURCE:
669 pipe_resource_reference(&dst->info.get_query_result_resource.resource, NULL);
670 break;
671 case CALL_TRANSFER_MAP:
672 pipe_resource_reference(&dst->info.transfer_map.transfer.resource, NULL);
673 break;
674 case CALL_TRANSFER_FLUSH_REGION:
675 pipe_resource_reference(&dst->info.transfer_flush_region.transfer.resource, NULL);
676 break;
677 case CALL_TRANSFER_UNMAP:
678 pipe_resource_reference(&dst->info.transfer_unmap.transfer.resource, NULL);
679 break;
680 case CALL_BUFFER_SUBDATA:
681 pipe_resource_reference(&dst->info.buffer_subdata.resource, NULL);
682 break;
683 case CALL_TEXTURE_SUBDATA:
684 pipe_resource_reference(&dst->info.texture_subdata.resource, NULL);
685 break;
686 }
687 }
688
689 static void
690 dd_init_copy_of_draw_state(struct dd_draw_state_copy *state)
691 {
692 unsigned i,j;
693
694 /* Just clear pointers to gallium objects. Don't clear the whole structure,
695 * because it would kill performance with its size of 130 KB.
696 */
697 memset(state->base.vertex_buffers, 0,
698 sizeof(state->base.vertex_buffers));
699 memset(state->base.so_targets, 0,
700 sizeof(state->base.so_targets));
701 memset(state->base.constant_buffers, 0,
702 sizeof(state->base.constant_buffers));
703 memset(state->base.sampler_views, 0,
704 sizeof(state->base.sampler_views));
705 memset(state->base.shader_images, 0,
706 sizeof(state->base.shader_images));
707 memset(state->base.shader_buffers, 0,
708 sizeof(state->base.shader_buffers));
709 memset(&state->base.framebuffer_state, 0,
710 sizeof(state->base.framebuffer_state));
711
712 memset(state->shaders, 0, sizeof(state->shaders));
713
714 state->base.render_cond.query = &state->render_cond;
715
716 for (i = 0; i < PIPE_SHADER_TYPES; i++) {
717 state->base.shaders[i] = &state->shaders[i];
718 for (j = 0; j < PIPE_MAX_SAMPLERS; j++)
719 state->base.sampler_states[i][j] = &state->sampler_states[i][j];
720 }
721
722 state->base.velems = &state->velems;
723 state->base.rs = &state->rs;
724 state->base.dsa = &state->dsa;
725 state->base.blend = &state->blend;
726 }
727
728 static void
729 dd_unreference_copy_of_draw_state(struct dd_draw_state_copy *state)
730 {
731 struct dd_draw_state *dst = &state->base;
732 unsigned i,j;
733
734 for (i = 0; i < ARRAY_SIZE(dst->vertex_buffers); i++)
735 pipe_vertex_buffer_unreference(&dst->vertex_buffers[i]);
736 for (i = 0; i < ARRAY_SIZE(dst->so_targets); i++)
737 pipe_so_target_reference(&dst->so_targets[i], NULL);
738
739 for (i = 0; i < PIPE_SHADER_TYPES; i++) {
740 if (dst->shaders[i])
741 tgsi_free_tokens(dst->shaders[i]->state.shader.tokens);
742
743 for (j = 0; j < PIPE_MAX_CONSTANT_BUFFERS; j++)
744 pipe_resource_reference(&dst->constant_buffers[i][j].buffer, NULL);
745 for (j = 0; j < PIPE_MAX_SAMPLERS; j++)
746 pipe_sampler_view_reference(&dst->sampler_views[i][j], NULL);
747 for (j = 0; j < PIPE_MAX_SHADER_IMAGES; j++)
748 pipe_resource_reference(&dst->shader_images[i][j].resource, NULL);
749 for (j = 0; j < PIPE_MAX_SHADER_BUFFERS; j++)
750 pipe_resource_reference(&dst->shader_buffers[i][j].buffer, NULL);
751 }
752
753 util_unreference_framebuffer_state(&dst->framebuffer_state);
754 }
755
756 static void
757 dd_copy_draw_state(struct dd_draw_state *dst, struct dd_draw_state *src)
758 {
759 unsigned i,j;
760
761 if (src->render_cond.query) {
762 *dst->render_cond.query = *src->render_cond.query;
763 dst->render_cond.condition = src->render_cond.condition;
764 dst->render_cond.mode = src->render_cond.mode;
765 } else {
766 dst->render_cond.query = NULL;
767 }
768
769 for (i = 0; i < ARRAY_SIZE(src->vertex_buffers); i++) {
770 pipe_vertex_buffer_reference(&dst->vertex_buffers[i],
771 &src->vertex_buffers[i]);
772 }
773
774 dst->num_so_targets = src->num_so_targets;
775 for (i = 0; i < src->num_so_targets; i++)
776 pipe_so_target_reference(&dst->so_targets[i], src->so_targets[i]);
777 memcpy(dst->so_offsets, src->so_offsets, sizeof(src->so_offsets));
778
779 for (i = 0; i < PIPE_SHADER_TYPES; i++) {
780 if (!src->shaders[i]) {
781 dst->shaders[i] = NULL;
782 continue;
783 }
784
785 if (src->shaders[i]) {
786 dst->shaders[i]->state.shader = src->shaders[i]->state.shader;
787 if (src->shaders[i]->state.shader.tokens) {
788 dst->shaders[i]->state.shader.tokens =
789 tgsi_dup_tokens(src->shaders[i]->state.shader.tokens);
790 } else {
791 dst->shaders[i]->state.shader.ir.nir = NULL;
792 }
793 } else {
794 dst->shaders[i] = NULL;
795 }
796
797 for (j = 0; j < PIPE_MAX_CONSTANT_BUFFERS; j++) {
798 pipe_resource_reference(&dst->constant_buffers[i][j].buffer,
799 src->constant_buffers[i][j].buffer);
800 memcpy(&dst->constant_buffers[i][j], &src->constant_buffers[i][j],
801 sizeof(src->constant_buffers[i][j]));
802 }
803
804 for (j = 0; j < PIPE_MAX_SAMPLERS; j++) {
805 pipe_sampler_view_reference(&dst->sampler_views[i][j],
806 src->sampler_views[i][j]);
807 if (src->sampler_states[i][j])
808 dst->sampler_states[i][j]->state.sampler =
809 src->sampler_states[i][j]->state.sampler;
810 else
811 dst->sampler_states[i][j] = NULL;
812 }
813
814 for (j = 0; j < PIPE_MAX_SHADER_IMAGES; j++) {
815 pipe_resource_reference(&dst->shader_images[i][j].resource,
816 src->shader_images[i][j].resource);
817 memcpy(&dst->shader_images[i][j], &src->shader_images[i][j],
818 sizeof(src->shader_images[i][j]));
819 }
820
821 for (j = 0; j < PIPE_MAX_SHADER_BUFFERS; j++) {
822 pipe_resource_reference(&dst->shader_buffers[i][j].buffer,
823 src->shader_buffers[i][j].buffer);
824 memcpy(&dst->shader_buffers[i][j], &src->shader_buffers[i][j],
825 sizeof(src->shader_buffers[i][j]));
826 }
827 }
828
829 if (src->velems)
830 dst->velems->state.velems = src->velems->state.velems;
831 else
832 dst->velems = NULL;
833
834 if (src->rs)
835 dst->rs->state.rs = src->rs->state.rs;
836 else
837 dst->rs = NULL;
838
839 if (src->dsa)
840 dst->dsa->state.dsa = src->dsa->state.dsa;
841 else
842 dst->dsa = NULL;
843
844 if (src->blend)
845 dst->blend->state.blend = src->blend->state.blend;
846 else
847 dst->blend = NULL;
848
849 dst->blend_color = src->blend_color;
850 dst->stencil_ref = src->stencil_ref;
851 dst->sample_mask = src->sample_mask;
852 dst->min_samples = src->min_samples;
853 dst->clip_state = src->clip_state;
854 util_copy_framebuffer_state(&dst->framebuffer_state, &src->framebuffer_state);
855 memcpy(dst->scissors, src->scissors, sizeof(src->scissors));
856 memcpy(dst->viewports, src->viewports, sizeof(src->viewports));
857 memcpy(dst->tess_default_levels, src->tess_default_levels,
858 sizeof(src->tess_default_levels));
859 dst->apitrace_call_number = src->apitrace_call_number;
860 }
861
862 static void
863 dd_free_record(struct pipe_screen *screen, struct dd_draw_record *record)
864 {
865 u_log_page_destroy(record->log_page);
866 dd_unreference_copy_of_call(&record->call);
867 dd_unreference_copy_of_draw_state(&record->draw_state);
868 screen->fence_reference(screen, &record->prev_bottom_of_pipe, NULL);
869 screen->fence_reference(screen, &record->top_of_pipe, NULL);
870 screen->fence_reference(screen, &record->bottom_of_pipe, NULL);
871 util_queue_fence_destroy(&record->driver_finished);
872 FREE(record);
873 }
874
875 static void
876 dd_write_record(FILE *f, struct dd_draw_record *record)
877 {
878 PRINT_NAMED(ptr, "pipe", record->dctx->pipe);
879 PRINT_NAMED(ns, "time before (API call)", record->time_before);
880 PRINT_NAMED(ns, "time after (driver done)", record->time_after);
881 fprintf(f, "\n");
882
883 dd_dump_call(f, &record->draw_state.base, &record->call);
884
885 if (record->log_page) {
886 fprintf(f,"\n\n**************************************************"
887 "***************************\n");
888 fprintf(f, "Context Log:\n\n");
889 u_log_page_print(record->log_page, f);
890 }
891 }
892
893 static void
894 dd_maybe_dump_record(struct dd_screen *dscreen, struct dd_draw_record *record)
895 {
896 if (dscreen->dump_mode == DD_DUMP_ONLY_HANGS ||
897 (dscreen->dump_mode == DD_DUMP_APITRACE_CALL &&
898 dscreen->apitrace_dump_call != record->draw_state.base.apitrace_call_number))
899 return;
900
901 char name[512];
902 dd_get_debug_filename_and_mkdir(name, sizeof(name), dscreen->verbose);
903 FILE *f = fopen(name, "w");
904 if (!f) {
905 fprintf(stderr, "dd: failed to open %s\n", name);
906 return;
907 }
908
909 dd_write_header(f, dscreen->screen, record->draw_state.base.apitrace_call_number);
910 dd_write_record(f, record);
911
912 fclose(f);
913 }
914
915 static const char *
916 dd_fence_state(struct pipe_screen *screen, struct pipe_fence_handle *fence,
917 bool *not_reached)
918 {
919 if (!fence)
920 return "---";
921
922 bool ok = screen->fence_finish(screen, NULL, fence, 0);
923
924 if (not_reached && !ok)
925 *not_reached = true;
926
927 return ok ? "YES" : "NO ";
928 }
929
930 static void
931 dd_report_hang(struct dd_context *dctx)
932 {
933 struct dd_screen *dscreen = dd_screen(dctx->base.screen);
934 struct pipe_screen *screen = dscreen->screen;
935 bool encountered_hang = false;
936 bool stop_output = false;
937 unsigned num_later = 0;
938
939 fprintf(stderr, "GPU hang detected, collecting information...\n\n");
940
941 fprintf(stderr, "Draw # driver prev BOP TOP BOP dump file\n"
942 "-------------------------------------------------------------\n");
943
944 list_for_each_entry(struct dd_draw_record, record, &dctx->records, list) {
945 if (!encountered_hang &&
946 screen->fence_finish(screen, NULL, record->bottom_of_pipe, 0)) {
947 dd_maybe_dump_record(dscreen, record);
948 continue;
949 }
950
951 if (stop_output) {
952 dd_maybe_dump_record(dscreen, record);
953 num_later++;
954 continue;
955 }
956
957 bool driver = util_queue_fence_is_signalled(&record->driver_finished);
958 bool top_not_reached = false;
959 const char *prev_bop = dd_fence_state(screen, record->prev_bottom_of_pipe, NULL);
960 const char *top = dd_fence_state(screen, record->top_of_pipe, &top_not_reached);
961 const char *bop = dd_fence_state(screen, record->bottom_of_pipe, NULL);
962
963 fprintf(stderr, "%-9u %s %s %s %s ",
964 record->draw_call, driver ? "YES" : "NO ", prev_bop, top, bop);
965
966 char name[512];
967 dd_get_debug_filename_and_mkdir(name, sizeof(name), false);
968
969 FILE *f = fopen(name, "w");
970 if (!f) {
971 fprintf(stderr, "fopen failed\n");
972 } else {
973 fprintf(stderr, "%s\n", name);
974
975 dd_write_header(f, dscreen->screen, record->draw_state.base.apitrace_call_number);
976 dd_write_record(f, record);
977
978 if (!encountered_hang) {
979 dd_dump_driver_state(dctx, f, PIPE_DUMP_DEVICE_STATUS_REGISTERS);
980 dd_dump_dmesg(f);
981 }
982
983 fclose(f);
984 }
985
986 if (top_not_reached)
987 stop_output = true;
988 encountered_hang = true;
989 }
990
991 if (num_later)
992 fprintf(stderr, "... and %u additional draws.\n", num_later);
993
994 fprintf(stderr, "\nDone.\n");
995 dd_kill_process();
996 }
997
998 int
999 dd_thread_main(void *input)
1000 {
1001 struct dd_context *dctx = (struct dd_context *)input;
1002 struct dd_screen *dscreen = dd_screen(dctx->base.screen);
1003 struct pipe_screen *screen = dscreen->screen;
1004
1005 mtx_lock(&dctx->mutex);
1006
1007 for (;;) {
1008 struct list_head records;
1009 list_replace(&dctx->records, &records);
1010 list_inithead(&dctx->records);
1011 dctx->num_records = 0;
1012
1013 if (dctx->api_stalled)
1014 cnd_signal(&dctx->cond);
1015
1016 if (list_empty(&records)) {
1017 if (dctx->kill_thread)
1018 break;
1019
1020 cnd_wait(&dctx->cond, &dctx->mutex);
1021 continue;
1022 }
1023
1024 mtx_unlock(&dctx->mutex);
1025
1026 /* Wait for the youngest draw. This means hangs can take a bit longer
1027 * to detect, but it's more efficient this way. */
1028 struct dd_draw_record *youngest =
1029 list_last_entry(&records, struct dd_draw_record, list);
1030
1031 if (dscreen->timeout_ms > 0) {
1032 uint64_t abs_timeout = os_time_get_absolute_timeout(
1033 (uint64_t)dscreen->timeout_ms * 1000*1000);
1034
1035 if (!util_queue_fence_wait_timeout(&youngest->driver_finished, abs_timeout) ||
1036 !screen->fence_finish(screen, NULL, youngest->bottom_of_pipe,
1037 (uint64_t)dscreen->timeout_ms * 1000*1000)) {
1038 mtx_lock(&dctx->mutex);
1039 list_splice(&records, &dctx->records);
1040 dd_report_hang(dctx);
1041 /* we won't actually get here */
1042 mtx_unlock(&dctx->mutex);
1043 }
1044 } else {
1045 util_queue_fence_wait(&youngest->driver_finished);
1046 }
1047
1048 list_for_each_entry_safe(struct dd_draw_record, record, &records, list) {
1049 dd_maybe_dump_record(dscreen, record);
1050 list_del(&record->list);
1051 dd_free_record(screen, record);
1052 }
1053
1054 mtx_lock(&dctx->mutex);
1055 }
1056 mtx_unlock(&dctx->mutex);
1057 return 0;
1058 }
1059
1060 static struct dd_draw_record *
1061 dd_create_record(struct dd_context *dctx)
1062 {
1063 struct dd_draw_record *record;
1064
1065 record = MALLOC_STRUCT(dd_draw_record);
1066 if (!record)
1067 return NULL;
1068
1069 record->dctx = dctx;
1070 record->draw_call = dctx->num_draw_calls;
1071
1072 record->prev_bottom_of_pipe = NULL;
1073 record->top_of_pipe = NULL;
1074 record->bottom_of_pipe = NULL;
1075 record->log_page = NULL;
1076 util_queue_fence_init(&record->driver_finished);
1077 util_queue_fence_reset(&record->driver_finished);
1078
1079 dd_init_copy_of_draw_state(&record->draw_state);
1080 dd_copy_draw_state(&record->draw_state.base, &dctx->draw_state);
1081
1082 return record;
1083 }
1084
1085 static void
1086 dd_context_flush(struct pipe_context *_pipe,
1087 struct pipe_fence_handle **fence, unsigned flags)
1088 {
1089 struct dd_context *dctx = dd_context(_pipe);
1090 struct pipe_context *pipe = dctx->pipe;
1091
1092 pipe->flush(pipe, fence, flags);
1093 }
1094
1095 static void
1096 dd_before_draw(struct dd_context *dctx, struct dd_draw_record *record)
1097 {
1098 struct dd_screen *dscreen = dd_screen(dctx->base.screen);
1099 struct pipe_context *pipe = dctx->pipe;
1100 struct pipe_screen *screen = dscreen->screen;
1101
1102 record->time_before = os_time_get_nano();
1103
1104 if (dscreen->timeout_ms > 0) {
1105 if (dscreen->flush_always && dctx->num_draw_calls >= dscreen->skip_count) {
1106 pipe->flush(pipe, &record->prev_bottom_of_pipe, 0);
1107 screen->fence_reference(screen, &record->top_of_pipe, record->prev_bottom_of_pipe);
1108 } else {
1109 pipe->flush(pipe, &record->prev_bottom_of_pipe,
1110 PIPE_FLUSH_DEFERRED | PIPE_FLUSH_BOTTOM_OF_PIPE);
1111 pipe->flush(pipe, &record->top_of_pipe,
1112 PIPE_FLUSH_DEFERRED | PIPE_FLUSH_TOP_OF_PIPE);
1113 }
1114 } else if (dscreen->flush_always && dctx->num_draw_calls >= dscreen->skip_count) {
1115 pipe->flush(pipe, NULL, 0);
1116 }
1117
1118 mtx_lock(&dctx->mutex);
1119 if (unlikely(dctx->num_records > 10000)) {
1120 dctx->api_stalled = true;
1121 /* Since this is only a heuristic to prevent the API thread from getting
1122 * too far ahead, we don't need a loop here. */
1123 cnd_wait(&dctx->cond, &dctx->mutex);
1124 dctx->api_stalled = false;
1125 }
1126
1127 if (list_empty(&dctx->records))
1128 cnd_signal(&dctx->cond);
1129
1130 list_addtail(&record->list, &dctx->records);
1131 dctx->num_records++;
1132 mtx_unlock(&dctx->mutex);
1133 }
1134
1135 static void
1136 dd_after_draw_async(void *data)
1137 {
1138 struct dd_draw_record *record = (struct dd_draw_record *)data;
1139 struct dd_context *dctx = record->dctx;
1140 struct dd_screen *dscreen = dd_screen(dctx->base.screen);
1141
1142 record->log_page = u_log_new_page(&dctx->log);
1143 record->time_after = os_time_get_nano();
1144
1145 util_queue_fence_signal(&record->driver_finished);
1146
1147 if (dscreen->dump_mode == DD_DUMP_APITRACE_CALL &&
1148 dscreen->apitrace_dump_call > dctx->draw_state.apitrace_call_number) {
1149 dd_thread_join(dctx);
1150 /* No need to continue. */
1151 exit(0);
1152 }
1153 }
1154
1155 static void
1156 dd_after_draw(struct dd_context *dctx, struct dd_draw_record *record)
1157 {
1158 struct dd_screen *dscreen = dd_screen(dctx->base.screen);
1159 struct pipe_context *pipe = dctx->pipe;
1160
1161 if (dscreen->timeout_ms > 0) {
1162 unsigned flush_flags;
1163 if (dscreen->flush_always && dctx->num_draw_calls >= dscreen->skip_count)
1164 flush_flags = 0;
1165 else
1166 flush_flags = PIPE_FLUSH_DEFERRED | PIPE_FLUSH_BOTTOM_OF_PIPE;
1167 pipe->flush(pipe, &record->bottom_of_pipe, flush_flags);
1168 }
1169
1170 if (pipe->callback) {
1171 pipe->callback(pipe, dd_after_draw_async, record, true);
1172 } else {
1173 dd_after_draw_async(record);
1174 }
1175
1176 ++dctx->num_draw_calls;
1177 if (dscreen->skip_count && dctx->num_draw_calls % 10000 == 0)
1178 fprintf(stderr, "Gallium debugger reached %u draw calls.\n",
1179 dctx->num_draw_calls);
1180 }
1181
1182 static void
1183 dd_context_draw_vbo(struct pipe_context *_pipe,
1184 const struct pipe_draw_info *info)
1185 {
1186 struct dd_context *dctx = dd_context(_pipe);
1187 struct pipe_context *pipe = dctx->pipe;
1188 struct dd_draw_record *record = dd_create_record(dctx);
1189
1190 record->call.type = CALL_DRAW_VBO;
1191 record->call.info.draw_vbo.draw = *info;
1192 record->call.info.draw_vbo.draw.count_from_stream_output = NULL;
1193 pipe_so_target_reference(&record->call.info.draw_vbo.draw.count_from_stream_output,
1194 info->count_from_stream_output);
1195 if (info->index_size && !info->has_user_indices) {
1196 record->call.info.draw_vbo.draw.index.resource = NULL;
1197 pipe_resource_reference(&record->call.info.draw_vbo.draw.index.resource,
1198 info->index.resource);
1199 }
1200
1201 if (info->indirect) {
1202 record->call.info.draw_vbo.indirect = *info->indirect;
1203 record->call.info.draw_vbo.draw.indirect = &record->call.info.draw_vbo.indirect;
1204
1205 record->call.info.draw_vbo.indirect.buffer = NULL;
1206 pipe_resource_reference(&record->call.info.draw_vbo.indirect.buffer,
1207 info->indirect->buffer);
1208 record->call.info.draw_vbo.indirect.indirect_draw_count = NULL;
1209 pipe_resource_reference(&record->call.info.draw_vbo.indirect.indirect_draw_count,
1210 info->indirect->indirect_draw_count);
1211 } else {
1212 memset(&record->call.info.draw_vbo.indirect, 0, sizeof(*info->indirect));
1213 }
1214
1215 dd_before_draw(dctx, record);
1216 pipe->draw_vbo(pipe, info);
1217 dd_after_draw(dctx, record);
1218 }
1219
1220 static void
1221 dd_context_launch_grid(struct pipe_context *_pipe,
1222 const struct pipe_grid_info *info)
1223 {
1224 struct dd_context *dctx = dd_context(_pipe);
1225 struct pipe_context *pipe = dctx->pipe;
1226 struct dd_draw_record *record = dd_create_record(dctx);
1227
1228 record->call.type = CALL_LAUNCH_GRID;
1229 record->call.info.launch_grid = *info;
1230 record->call.info.launch_grid.indirect = NULL;
1231 pipe_resource_reference(&record->call.info.launch_grid.indirect, info->indirect);
1232
1233 dd_before_draw(dctx, record);
1234 pipe->launch_grid(pipe, info);
1235 dd_after_draw(dctx, record);
1236 }
1237
1238 static void
1239 dd_context_resource_copy_region(struct pipe_context *_pipe,
1240 struct pipe_resource *dst, unsigned dst_level,
1241 unsigned dstx, unsigned dsty, unsigned dstz,
1242 struct pipe_resource *src, unsigned src_level,
1243 const struct pipe_box *src_box)
1244 {
1245 struct dd_context *dctx = dd_context(_pipe);
1246 struct pipe_context *pipe = dctx->pipe;
1247 struct dd_draw_record *record = dd_create_record(dctx);
1248
1249 record->call.type = CALL_RESOURCE_COPY_REGION;
1250 record->call.info.resource_copy_region.dst = NULL;
1251 pipe_resource_reference(&record->call.info.resource_copy_region.dst, dst);
1252 record->call.info.resource_copy_region.dst_level = dst_level;
1253 record->call.info.resource_copy_region.dstx = dstx;
1254 record->call.info.resource_copy_region.dsty = dsty;
1255 record->call.info.resource_copy_region.dstz = dstz;
1256 record->call.info.resource_copy_region.src = NULL;
1257 pipe_resource_reference(&record->call.info.resource_copy_region.src, src);
1258 record->call.info.resource_copy_region.src_level = src_level;
1259 record->call.info.resource_copy_region.src_box = *src_box;
1260
1261 dd_before_draw(dctx, record);
1262 pipe->resource_copy_region(pipe,
1263 dst, dst_level, dstx, dsty, dstz,
1264 src, src_level, src_box);
1265 dd_after_draw(dctx, record);
1266 }
1267
1268 static void
1269 dd_context_blit(struct pipe_context *_pipe, const struct pipe_blit_info *info)
1270 {
1271 struct dd_context *dctx = dd_context(_pipe);
1272 struct pipe_context *pipe = dctx->pipe;
1273 struct dd_draw_record *record = dd_create_record(dctx);
1274
1275 record->call.type = CALL_BLIT;
1276 record->call.info.blit = *info;
1277 record->call.info.blit.dst.resource = NULL;
1278 pipe_resource_reference(&record->call.info.blit.dst.resource, info->dst.resource);
1279 record->call.info.blit.src.resource = NULL;
1280 pipe_resource_reference(&record->call.info.blit.src.resource, info->src.resource);
1281
1282 dd_before_draw(dctx, record);
1283 pipe->blit(pipe, info);
1284 dd_after_draw(dctx, record);
1285 }
1286
1287 static boolean
1288 dd_context_generate_mipmap(struct pipe_context *_pipe,
1289 struct pipe_resource *res,
1290 enum pipe_format format,
1291 unsigned base_level,
1292 unsigned last_level,
1293 unsigned first_layer,
1294 unsigned last_layer)
1295 {
1296 struct dd_context *dctx = dd_context(_pipe);
1297 struct pipe_context *pipe = dctx->pipe;
1298 struct dd_draw_record *record = dd_create_record(dctx);
1299 boolean result;
1300
1301 record->call.type = CALL_GENERATE_MIPMAP;
1302 record->call.info.generate_mipmap.res = NULL;
1303 pipe_resource_reference(&record->call.info.generate_mipmap.res, res);
1304 record->call.info.generate_mipmap.format = format;
1305 record->call.info.generate_mipmap.base_level = base_level;
1306 record->call.info.generate_mipmap.last_level = last_level;
1307 record->call.info.generate_mipmap.first_layer = first_layer;
1308 record->call.info.generate_mipmap.last_layer = last_layer;
1309
1310 dd_before_draw(dctx, record);
1311 result = pipe->generate_mipmap(pipe, res, format, base_level, last_level,
1312 first_layer, last_layer);
1313 dd_after_draw(dctx, record);
1314 return result;
1315 }
1316
1317 static void
1318 dd_context_get_query_result_resource(struct pipe_context *_pipe,
1319 struct pipe_query *query,
1320 boolean wait,
1321 enum pipe_query_value_type result_type,
1322 int index,
1323 struct pipe_resource *resource,
1324 unsigned offset)
1325 {
1326 struct dd_context *dctx = dd_context(_pipe);
1327 struct dd_query *dquery = dd_query(query);
1328 struct pipe_context *pipe = dctx->pipe;
1329 struct dd_draw_record *record = dd_create_record(dctx);
1330
1331 record->call.type = CALL_GET_QUERY_RESULT_RESOURCE;
1332 record->call.info.get_query_result_resource.query = query;
1333 record->call.info.get_query_result_resource.wait = wait;
1334 record->call.info.get_query_result_resource.result_type = result_type;
1335 record->call.info.get_query_result_resource.index = index;
1336 record->call.info.get_query_result_resource.resource = NULL;
1337 pipe_resource_reference(&record->call.info.get_query_result_resource.resource,
1338 resource);
1339 record->call.info.get_query_result_resource.offset = offset;
1340
1341 /* The query may be deleted by the time we need to print it. */
1342 record->call.info.get_query_result_resource.query_type = dquery->type;
1343
1344 dd_before_draw(dctx, record);
1345 pipe->get_query_result_resource(pipe, dquery->query, wait,
1346 result_type, index, resource, offset);
1347 dd_after_draw(dctx, record);
1348 }
1349
1350 static void
1351 dd_context_flush_resource(struct pipe_context *_pipe,
1352 struct pipe_resource *resource)
1353 {
1354 struct dd_context *dctx = dd_context(_pipe);
1355 struct pipe_context *pipe = dctx->pipe;
1356 struct dd_draw_record *record = dd_create_record(dctx);
1357
1358 record->call.type = CALL_FLUSH_RESOURCE;
1359 record->call.info.flush_resource = NULL;
1360 pipe_resource_reference(&record->call.info.flush_resource, resource);
1361
1362 dd_before_draw(dctx, record);
1363 pipe->flush_resource(pipe, resource);
1364 dd_after_draw(dctx, record);
1365 }
1366
1367 static void
1368 dd_context_clear(struct pipe_context *_pipe, unsigned buffers,
1369 const union pipe_color_union *color, double depth,
1370 unsigned stencil)
1371 {
1372 struct dd_context *dctx = dd_context(_pipe);
1373 struct pipe_context *pipe = dctx->pipe;
1374 struct dd_draw_record *record = dd_create_record(dctx);
1375
1376 record->call.type = CALL_CLEAR;
1377 record->call.info.clear.buffers = buffers;
1378 record->call.info.clear.color = *color;
1379 record->call.info.clear.depth = depth;
1380 record->call.info.clear.stencil = stencil;
1381
1382 dd_before_draw(dctx, record);
1383 pipe->clear(pipe, buffers, color, depth, stencil);
1384 dd_after_draw(dctx, record);
1385 }
1386
1387 static void
1388 dd_context_clear_render_target(struct pipe_context *_pipe,
1389 struct pipe_surface *dst,
1390 const union pipe_color_union *color,
1391 unsigned dstx, unsigned dsty,
1392 unsigned width, unsigned height,
1393 bool render_condition_enabled)
1394 {
1395 struct dd_context *dctx = dd_context(_pipe);
1396 struct pipe_context *pipe = dctx->pipe;
1397 struct dd_draw_record *record = dd_create_record(dctx);
1398
1399 record->call.type = CALL_CLEAR_RENDER_TARGET;
1400
1401 dd_before_draw(dctx, record);
1402 pipe->clear_render_target(pipe, dst, color, dstx, dsty, width, height,
1403 render_condition_enabled);
1404 dd_after_draw(dctx, record);
1405 }
1406
1407 static void
1408 dd_context_clear_depth_stencil(struct pipe_context *_pipe,
1409 struct pipe_surface *dst, unsigned clear_flags,
1410 double depth, unsigned stencil, unsigned dstx,
1411 unsigned dsty, unsigned width, unsigned height,
1412 bool render_condition_enabled)
1413 {
1414 struct dd_context *dctx = dd_context(_pipe);
1415 struct pipe_context *pipe = dctx->pipe;
1416 struct dd_draw_record *record = dd_create_record(dctx);
1417
1418 record->call.type = CALL_CLEAR_DEPTH_STENCIL;
1419
1420 dd_before_draw(dctx, record);
1421 pipe->clear_depth_stencil(pipe, dst, clear_flags, depth, stencil,
1422 dstx, dsty, width, height,
1423 render_condition_enabled);
1424 dd_after_draw(dctx, record);
1425 }
1426
1427 static void
1428 dd_context_clear_buffer(struct pipe_context *_pipe, struct pipe_resource *res,
1429 unsigned offset, unsigned size,
1430 const void *clear_value, int clear_value_size)
1431 {
1432 struct dd_context *dctx = dd_context(_pipe);
1433 struct pipe_context *pipe = dctx->pipe;
1434 struct dd_draw_record *record = dd_create_record(dctx);
1435
1436 record->call.type = CALL_CLEAR_BUFFER;
1437 record->call.info.clear_buffer.res = NULL;
1438 pipe_resource_reference(&record->call.info.clear_buffer.res, res);
1439 record->call.info.clear_buffer.offset = offset;
1440 record->call.info.clear_buffer.size = size;
1441 record->call.info.clear_buffer.clear_value = clear_value;
1442 record->call.info.clear_buffer.clear_value_size = clear_value_size;
1443
1444 dd_before_draw(dctx, record);
1445 pipe->clear_buffer(pipe, res, offset, size, clear_value, clear_value_size);
1446 dd_after_draw(dctx, record);
1447 }
1448
1449 static void
1450 dd_context_clear_texture(struct pipe_context *_pipe,
1451 struct pipe_resource *res,
1452 unsigned level,
1453 const struct pipe_box *box,
1454 const void *data)
1455 {
1456 struct dd_context *dctx = dd_context(_pipe);
1457 struct pipe_context *pipe = dctx->pipe;
1458 struct dd_draw_record *record = dd_create_record(dctx);
1459
1460 record->call.type = CALL_CLEAR_TEXTURE;
1461
1462 dd_before_draw(dctx, record);
1463 pipe->clear_texture(pipe, res, level, box, data);
1464 dd_after_draw(dctx, record);
1465 }
1466
1467 /********************************************************************
1468 * transfer
1469 */
1470
1471 static void *
1472 dd_context_transfer_map(struct pipe_context *_pipe,
1473 struct pipe_resource *resource, unsigned level,
1474 unsigned usage, const struct pipe_box *box,
1475 struct pipe_transfer **transfer)
1476 {
1477 struct dd_context *dctx = dd_context(_pipe);
1478 struct pipe_context *pipe = dctx->pipe;
1479 struct dd_draw_record *record =
1480 dd_screen(dctx->base.screen)->transfers ? dd_create_record(dctx) : NULL;
1481
1482 if (record) {
1483 record->call.type = CALL_TRANSFER_MAP;
1484
1485 dd_before_draw(dctx, record);
1486 }
1487 void *ptr = pipe->transfer_map(pipe, resource, level, usage, box, transfer);
1488 if (record) {
1489 record->call.info.transfer_map.transfer_ptr = *transfer;
1490 record->call.info.transfer_map.ptr = ptr;
1491 if (*transfer) {
1492 record->call.info.transfer_map.transfer = **transfer;
1493 record->call.info.transfer_map.transfer.resource = NULL;
1494 pipe_resource_reference(&record->call.info.transfer_map.transfer.resource,
1495 (*transfer)->resource);
1496 } else {
1497 memset(&record->call.info.transfer_map.transfer, 0, sizeof(struct pipe_transfer));
1498 }
1499
1500 dd_after_draw(dctx, record);
1501 }
1502 return ptr;
1503 }
1504
1505 static void
1506 dd_context_transfer_flush_region(struct pipe_context *_pipe,
1507 struct pipe_transfer *transfer,
1508 const struct pipe_box *box)
1509 {
1510 struct dd_context *dctx = dd_context(_pipe);
1511 struct pipe_context *pipe = dctx->pipe;
1512 struct dd_draw_record *record =
1513 dd_screen(dctx->base.screen)->transfers ? dd_create_record(dctx) : NULL;
1514
1515 if (record) {
1516 record->call.type = CALL_TRANSFER_FLUSH_REGION;
1517 record->call.info.transfer_flush_region.transfer_ptr = transfer;
1518 record->call.info.transfer_flush_region.box = *box;
1519 record->call.info.transfer_flush_region.transfer = *transfer;
1520 record->call.info.transfer_flush_region.transfer.resource = NULL;
1521 pipe_resource_reference(
1522 &record->call.info.transfer_flush_region.transfer.resource,
1523 transfer->resource);
1524
1525 dd_before_draw(dctx, record);
1526 }
1527 pipe->transfer_flush_region(pipe, transfer, box);
1528 if (record)
1529 dd_after_draw(dctx, record);
1530 }
1531
1532 static void
1533 dd_context_transfer_unmap(struct pipe_context *_pipe,
1534 struct pipe_transfer *transfer)
1535 {
1536 struct dd_context *dctx = dd_context(_pipe);
1537 struct pipe_context *pipe = dctx->pipe;
1538 struct dd_draw_record *record =
1539 dd_screen(dctx->base.screen)->transfers ? dd_create_record(dctx) : NULL;
1540
1541 if (record) {
1542 record->call.type = CALL_TRANSFER_UNMAP;
1543 record->call.info.transfer_unmap.transfer_ptr = transfer;
1544 record->call.info.transfer_unmap.transfer = *transfer;
1545 record->call.info.transfer_unmap.transfer.resource = NULL;
1546 pipe_resource_reference(
1547 &record->call.info.transfer_unmap.transfer.resource,
1548 transfer->resource);
1549
1550 dd_before_draw(dctx, record);
1551 }
1552 pipe->transfer_unmap(pipe, transfer);
1553 if (record)
1554 dd_after_draw(dctx, record);
1555 }
1556
1557 static void
1558 dd_context_buffer_subdata(struct pipe_context *_pipe,
1559 struct pipe_resource *resource,
1560 unsigned usage, unsigned offset,
1561 unsigned size, const void *data)
1562 {
1563 struct dd_context *dctx = dd_context(_pipe);
1564 struct pipe_context *pipe = dctx->pipe;
1565 struct dd_draw_record *record =
1566 dd_screen(dctx->base.screen)->transfers ? dd_create_record(dctx) : NULL;
1567
1568 if (record) {
1569 record->call.type = CALL_BUFFER_SUBDATA;
1570 record->call.info.buffer_subdata.resource = NULL;
1571 pipe_resource_reference(&record->call.info.buffer_subdata.resource, resource);
1572 record->call.info.buffer_subdata.usage = usage;
1573 record->call.info.buffer_subdata.offset = offset;
1574 record->call.info.buffer_subdata.size = size;
1575 record->call.info.buffer_subdata.data = data;
1576
1577 dd_before_draw(dctx, record);
1578 }
1579 pipe->buffer_subdata(pipe, resource, usage, offset, size, data);
1580 if (record)
1581 dd_after_draw(dctx, record);
1582 }
1583
1584 static void
1585 dd_context_texture_subdata(struct pipe_context *_pipe,
1586 struct pipe_resource *resource,
1587 unsigned level, unsigned usage,
1588 const struct pipe_box *box,
1589 const void *data, unsigned stride,
1590 unsigned layer_stride)
1591 {
1592 struct dd_context *dctx = dd_context(_pipe);
1593 struct pipe_context *pipe = dctx->pipe;
1594 struct dd_draw_record *record =
1595 dd_screen(dctx->base.screen)->transfers ? dd_create_record(dctx) : NULL;
1596
1597 if (record) {
1598 record->call.type = CALL_TEXTURE_SUBDATA;
1599 record->call.info.texture_subdata.resource = NULL;
1600 pipe_resource_reference(&record->call.info.texture_subdata.resource, resource);
1601 record->call.info.texture_subdata.level = level;
1602 record->call.info.texture_subdata.usage = usage;
1603 record->call.info.texture_subdata.box = *box;
1604 record->call.info.texture_subdata.data = data;
1605 record->call.info.texture_subdata.stride = stride;
1606 record->call.info.texture_subdata.layer_stride = layer_stride;
1607
1608 dd_before_draw(dctx, record);
1609 }
1610 pipe->texture_subdata(pipe, resource, level, usage, box, data,
1611 stride, layer_stride);
1612 if (record)
1613 dd_after_draw(dctx, record);
1614 }
1615
1616 void
1617 dd_init_draw_functions(struct dd_context *dctx)
1618 {
1619 CTX_INIT(flush);
1620 CTX_INIT(draw_vbo);
1621 CTX_INIT(launch_grid);
1622 CTX_INIT(resource_copy_region);
1623 CTX_INIT(blit);
1624 CTX_INIT(clear);
1625 CTX_INIT(clear_render_target);
1626 CTX_INIT(clear_depth_stencil);
1627 CTX_INIT(clear_buffer);
1628 CTX_INIT(clear_texture);
1629 CTX_INIT(flush_resource);
1630 CTX_INIT(generate_mipmap);
1631 CTX_INIT(get_query_result_resource);
1632 CTX_INIT(transfer_map);
1633 CTX_INIT(transfer_flush_region);
1634 CTX_INIT(transfer_unmap);
1635 CTX_INIT(buffer_subdata);
1636 CTX_INIT(texture_subdata);
1637 }