gallium: remove pipe_index_buffer and set_index_buffer
[mesa.git] / src / gallium / drivers / ddebug / dd_draw.c
1 /**************************************************************************
2 *
3 * Copyright 2015 Advanced Micro Devices, Inc.
4 * Copyright 2008 VMware, Inc.
5 * All Rights Reserved.
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the "Software"),
9 * to deal in the Software without restriction, including without limitation
10 * on the rights to use, copy, modify, merge, publish, distribute, sub
11 * license, and/or sell copies of the Software, and to permit persons to whom
12 * the Software is furnished to do so, subject to the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the next
15 * paragraph) shall be included in all copies or substantial portions of the
16 * Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28 #include "dd_pipe.h"
29
30 #include "util/u_dump.h"
31 #include "util/u_format.h"
32 #include "util/u_framebuffer.h"
33 #include "util/u_helpers.h"
34 #include "util/u_inlines.h"
35 #include "util/u_memory.h"
36 #include "tgsi/tgsi_parse.h"
37 #include "tgsi/tgsi_scan.h"
38 #include "os/os_time.h"
39 #include <inttypes.h>
40
41
42 static FILE *
43 dd_get_file_stream(struct dd_screen *dscreen, unsigned apitrace_call_number)
44 {
45 struct pipe_screen *screen = dscreen->screen;
46 char cmd_line[4096];
47
48 FILE *f = dd_get_debug_file(dscreen->verbose);
49 if (!f)
50 return NULL;
51
52 if (os_get_command_line(cmd_line, sizeof(cmd_line)))
53 fprintf(f, "Command: %s\n", cmd_line);
54 fprintf(f, "Driver vendor: %s\n", screen->get_vendor(screen));
55 fprintf(f, "Device vendor: %s\n", screen->get_device_vendor(screen));
56 fprintf(f, "Device name: %s\n\n", screen->get_name(screen));
57
58 if (apitrace_call_number)
59 fprintf(f, "Last apitrace call: %u\n\n",
60 apitrace_call_number);
61 return f;
62 }
63
64 static void
65 dd_dump_dmesg(FILE *f)
66 {
67 char line[2000];
68 FILE *p = popen("dmesg | tail -n60", "r");
69
70 if (!p)
71 return;
72
73 fprintf(f, "\nLast 60 lines of dmesg:\n\n");
74 while (fgets(line, sizeof(line), p))
75 fputs(line, f);
76
77 pclose(p);
78 }
79
80 static void
81 dd_close_file_stream(FILE *f)
82 {
83 fclose(f);
84 }
85
86 static unsigned
87 dd_num_active_viewports(struct dd_draw_state *dstate)
88 {
89 struct tgsi_shader_info info;
90 const struct tgsi_token *tokens;
91
92 if (dstate->shaders[PIPE_SHADER_GEOMETRY])
93 tokens = dstate->shaders[PIPE_SHADER_GEOMETRY]->state.shader.tokens;
94 else if (dstate->shaders[PIPE_SHADER_TESS_EVAL])
95 tokens = dstate->shaders[PIPE_SHADER_TESS_EVAL]->state.shader.tokens;
96 else if (dstate->shaders[PIPE_SHADER_VERTEX])
97 tokens = dstate->shaders[PIPE_SHADER_VERTEX]->state.shader.tokens;
98 else
99 return 1;
100
101 tgsi_scan_shader(tokens, &info);
102 return info.writes_viewport_index ? PIPE_MAX_VIEWPORTS : 1;
103 }
104
105 #define COLOR_RESET "\033[0m"
106 #define COLOR_SHADER "\033[1;32m"
107 #define COLOR_STATE "\033[1;33m"
108
109 #define DUMP(name, var) do { \
110 fprintf(f, COLOR_STATE #name ": " COLOR_RESET); \
111 util_dump_##name(f, var); \
112 fprintf(f, "\n"); \
113 } while(0)
114
115 #define DUMP_I(name, var, i) do { \
116 fprintf(f, COLOR_STATE #name " %i: " COLOR_RESET, i); \
117 util_dump_##name(f, var); \
118 fprintf(f, "\n"); \
119 } while(0)
120
121 #define DUMP_M(name, var, member) do { \
122 fprintf(f, " " #member ": "); \
123 util_dump_##name(f, (var)->member); \
124 fprintf(f, "\n"); \
125 } while(0)
126
127 #define DUMP_M_ADDR(name, var, member) do { \
128 fprintf(f, " " #member ": "); \
129 util_dump_##name(f, &(var)->member); \
130 fprintf(f, "\n"); \
131 } while(0)
132
133 static void
134 print_named_value(FILE *f, const char *name, int value)
135 {
136 fprintf(f, COLOR_STATE "%s" COLOR_RESET " = %i\n", name, value);
137 }
138
139 static void
140 print_named_xvalue(FILE *f, const char *name, int value)
141 {
142 fprintf(f, COLOR_STATE "%s" COLOR_RESET " = 0x%08x\n", name, value);
143 }
144
145 static void
146 util_dump_uint(FILE *f, unsigned i)
147 {
148 fprintf(f, "%u", i);
149 }
150
151 static void
152 util_dump_hex(FILE *f, unsigned i)
153 {
154 fprintf(f, "0x%x", i);
155 }
156
157 static void
158 util_dump_double(FILE *f, double d)
159 {
160 fprintf(f, "%f", d);
161 }
162
163 static void
164 util_dump_format(FILE *f, enum pipe_format format)
165 {
166 fprintf(f, "%s", util_format_name(format));
167 }
168
169 static void
170 util_dump_color_union(FILE *f, const union pipe_color_union *color)
171 {
172 fprintf(f, "{f = {%f, %f, %f, %f}, ui = {%u, %u, %u, %u}",
173 color->f[0], color->f[1], color->f[2], color->f[3],
174 color->ui[0], color->ui[1], color->ui[2], color->ui[3]);
175 }
176
177 static void
178 util_dump_query(FILE *f, struct dd_query *query)
179 {
180 if (query->type >= PIPE_QUERY_DRIVER_SPECIFIC)
181 fprintf(f, "PIPE_QUERY_DRIVER_SPECIFIC + %i",
182 query->type - PIPE_QUERY_DRIVER_SPECIFIC);
183 else
184 fprintf(f, "%s", util_dump_query_type(query->type, false));
185 }
186
187 static void
188 dd_dump_render_condition(struct dd_draw_state *dstate, FILE *f)
189 {
190 if (dstate->render_cond.query) {
191 fprintf(f, "render condition:\n");
192 DUMP_M(query, &dstate->render_cond, query);
193 DUMP_M(uint, &dstate->render_cond, condition);
194 DUMP_M(uint, &dstate->render_cond, mode);
195 fprintf(f, "\n");
196 }
197 }
198
199 static void
200 dd_dump_shader(struct dd_draw_state *dstate, enum pipe_shader_type sh, FILE *f)
201 {
202 int i;
203 const char *shader_str[PIPE_SHADER_TYPES];
204
205 shader_str[PIPE_SHADER_VERTEX] = "VERTEX";
206 shader_str[PIPE_SHADER_TESS_CTRL] = "TESS_CTRL";
207 shader_str[PIPE_SHADER_TESS_EVAL] = "TESS_EVAL";
208 shader_str[PIPE_SHADER_GEOMETRY] = "GEOMETRY";
209 shader_str[PIPE_SHADER_FRAGMENT] = "FRAGMENT";
210 shader_str[PIPE_SHADER_COMPUTE] = "COMPUTE";
211
212 if (sh == PIPE_SHADER_TESS_CTRL &&
213 !dstate->shaders[PIPE_SHADER_TESS_CTRL] &&
214 dstate->shaders[PIPE_SHADER_TESS_EVAL])
215 fprintf(f, "tess_state: {default_outer_level = {%f, %f, %f, %f}, "
216 "default_inner_level = {%f, %f}}\n",
217 dstate->tess_default_levels[0],
218 dstate->tess_default_levels[1],
219 dstate->tess_default_levels[2],
220 dstate->tess_default_levels[3],
221 dstate->tess_default_levels[4],
222 dstate->tess_default_levels[5]);
223
224 if (sh == PIPE_SHADER_FRAGMENT)
225 if (dstate->rs) {
226 unsigned num_viewports = dd_num_active_viewports(dstate);
227
228 if (dstate->rs->state.rs.clip_plane_enable)
229 DUMP(clip_state, &dstate->clip_state);
230
231 for (i = 0; i < num_viewports; i++)
232 DUMP_I(viewport_state, &dstate->viewports[i], i);
233
234 if (dstate->rs->state.rs.scissor)
235 for (i = 0; i < num_viewports; i++)
236 DUMP_I(scissor_state, &dstate->scissors[i], i);
237
238 DUMP(rasterizer_state, &dstate->rs->state.rs);
239
240 if (dstate->rs->state.rs.poly_stipple_enable)
241 DUMP(poly_stipple, &dstate->polygon_stipple);
242 fprintf(f, "\n");
243 }
244
245 if (!dstate->shaders[sh])
246 return;
247
248 fprintf(f, COLOR_SHADER "begin shader: %s" COLOR_RESET "\n", shader_str[sh]);
249 DUMP(shader_state, &dstate->shaders[sh]->state.shader);
250
251 for (i = 0; i < PIPE_MAX_CONSTANT_BUFFERS; i++)
252 if (dstate->constant_buffers[sh][i].buffer ||
253 dstate->constant_buffers[sh][i].user_buffer) {
254 DUMP_I(constant_buffer, &dstate->constant_buffers[sh][i], i);
255 if (dstate->constant_buffers[sh][i].buffer)
256 DUMP_M(resource, &dstate->constant_buffers[sh][i], buffer);
257 }
258
259 for (i = 0; i < PIPE_MAX_SAMPLERS; i++)
260 if (dstate->sampler_states[sh][i])
261 DUMP_I(sampler_state, &dstate->sampler_states[sh][i]->state.sampler, i);
262
263 for (i = 0; i < PIPE_MAX_SAMPLERS; i++)
264 if (dstate->sampler_views[sh][i]) {
265 DUMP_I(sampler_view, dstate->sampler_views[sh][i], i);
266 DUMP_M(resource, dstate->sampler_views[sh][i], texture);
267 }
268
269 for (i = 0; i < PIPE_MAX_SHADER_IMAGES; i++)
270 if (dstate->shader_images[sh][i].resource) {
271 DUMP_I(image_view, &dstate->shader_images[sh][i], i);
272 if (dstate->shader_images[sh][i].resource)
273 DUMP_M(resource, &dstate->shader_images[sh][i], resource);
274 }
275
276 for (i = 0; i < PIPE_MAX_SHADER_BUFFERS; i++)
277 if (dstate->shader_buffers[sh][i].buffer) {
278 DUMP_I(shader_buffer, &dstate->shader_buffers[sh][i], i);
279 if (dstate->shader_buffers[sh][i].buffer)
280 DUMP_M(resource, &dstate->shader_buffers[sh][i], buffer);
281 }
282
283 fprintf(f, COLOR_SHADER "end shader: %s" COLOR_RESET "\n\n", shader_str[sh]);
284 }
285
286 static void
287 dd_dump_draw_vbo(struct dd_draw_state *dstate, struct pipe_draw_info *info, FILE *f)
288 {
289 int sh, i;
290
291 DUMP(draw_info, info);
292 if (info->count_from_stream_output)
293 DUMP_M(stream_output_target, info,
294 count_from_stream_output);
295 if (info->indirect) {
296 DUMP_M(resource, info, indirect->buffer);
297 if (info->indirect->indirect_draw_count)
298 DUMP_M(resource, info, indirect->indirect_draw_count);
299 }
300
301 fprintf(f, "\n");
302
303 /* TODO: dump active queries */
304
305 dd_dump_render_condition(dstate, f);
306
307 for (i = 0; i < PIPE_MAX_ATTRIBS; i++)
308 if (dstate->vertex_buffers[i].buffer.resource) {
309 DUMP_I(vertex_buffer, &dstate->vertex_buffers[i], i);
310 if (!dstate->vertex_buffers[i].is_user_buffer)
311 DUMP_M(resource, &dstate->vertex_buffers[i], buffer.resource);
312 }
313
314 if (dstate->velems) {
315 print_named_value(f, "num vertex elements",
316 dstate->velems->state.velems.count);
317 for (i = 0; i < dstate->velems->state.velems.count; i++) {
318 fprintf(f, " ");
319 DUMP_I(vertex_element, &dstate->velems->state.velems.velems[i], i);
320 }
321 }
322
323 print_named_value(f, "num stream output targets", dstate->num_so_targets);
324 for (i = 0; i < dstate->num_so_targets; i++)
325 if (dstate->so_targets[i]) {
326 DUMP_I(stream_output_target, dstate->so_targets[i], i);
327 DUMP_M(resource, dstate->so_targets[i], buffer);
328 fprintf(f, " offset = %i\n", dstate->so_offsets[i]);
329 }
330
331 fprintf(f, "\n");
332 for (sh = 0; sh < PIPE_SHADER_TYPES; sh++) {
333 if (sh == PIPE_SHADER_COMPUTE)
334 continue;
335
336 dd_dump_shader(dstate, sh, f);
337 }
338
339 if (dstate->dsa)
340 DUMP(depth_stencil_alpha_state, &dstate->dsa->state.dsa);
341 DUMP(stencil_ref, &dstate->stencil_ref);
342
343 if (dstate->blend)
344 DUMP(blend_state, &dstate->blend->state.blend);
345 DUMP(blend_color, &dstate->blend_color);
346
347 print_named_value(f, "min_samples", dstate->min_samples);
348 print_named_xvalue(f, "sample_mask", dstate->sample_mask);
349 fprintf(f, "\n");
350
351 DUMP(framebuffer_state, &dstate->framebuffer_state);
352 for (i = 0; i < dstate->framebuffer_state.nr_cbufs; i++)
353 if (dstate->framebuffer_state.cbufs[i]) {
354 fprintf(f, " " COLOR_STATE "cbufs[%i]:" COLOR_RESET "\n ", i);
355 DUMP(surface, dstate->framebuffer_state.cbufs[i]);
356 fprintf(f, " ");
357 DUMP(resource, dstate->framebuffer_state.cbufs[i]->texture);
358 }
359 if (dstate->framebuffer_state.zsbuf) {
360 fprintf(f, " " COLOR_STATE "zsbuf:" COLOR_RESET "\n ");
361 DUMP(surface, dstate->framebuffer_state.zsbuf);
362 fprintf(f, " ");
363 DUMP(resource, dstate->framebuffer_state.zsbuf->texture);
364 }
365 fprintf(f, "\n");
366 }
367
368 static void
369 dd_dump_launch_grid(struct dd_draw_state *dstate, struct pipe_grid_info *info, FILE *f)
370 {
371 fprintf(f, "%s:\n", __func__+8);
372 DUMP(grid_info, info);
373 fprintf(f, "\n");
374
375 dd_dump_shader(dstate, PIPE_SHADER_COMPUTE, f);
376 fprintf(f, "\n");
377 }
378
379 static void
380 dd_dump_resource_copy_region(struct dd_draw_state *dstate,
381 struct call_resource_copy_region *info,
382 FILE *f)
383 {
384 fprintf(f, "%s:\n", __func__+8);
385 DUMP_M(resource, info, dst);
386 DUMP_M(uint, info, dst_level);
387 DUMP_M(uint, info, dstx);
388 DUMP_M(uint, info, dsty);
389 DUMP_M(uint, info, dstz);
390 DUMP_M(resource, info, src);
391 DUMP_M(uint, info, src_level);
392 DUMP_M_ADDR(box, info, src_box);
393 }
394
395 static void
396 dd_dump_blit(struct dd_draw_state *dstate, struct pipe_blit_info *info, FILE *f)
397 {
398 fprintf(f, "%s:\n", __func__+8);
399 DUMP_M(resource, info, dst.resource);
400 DUMP_M(uint, info, dst.level);
401 DUMP_M_ADDR(box, info, dst.box);
402 DUMP_M(format, info, dst.format);
403
404 DUMP_M(resource, info, src.resource);
405 DUMP_M(uint, info, src.level);
406 DUMP_M_ADDR(box, info, src.box);
407 DUMP_M(format, info, src.format);
408
409 DUMP_M(hex, info, mask);
410 DUMP_M(uint, info, filter);
411 DUMP_M(uint, info, scissor_enable);
412 DUMP_M_ADDR(scissor_state, info, scissor);
413 DUMP_M(uint, info, render_condition_enable);
414
415 if (info->render_condition_enable)
416 dd_dump_render_condition(dstate, f);
417 }
418
419 static void
420 dd_dump_generate_mipmap(struct dd_draw_state *dstate, FILE *f)
421 {
422 fprintf(f, "%s:\n", __func__+8);
423 /* TODO */
424 }
425
426 static void
427 dd_dump_flush_resource(struct dd_draw_state *dstate, struct pipe_resource *res,
428 FILE *f)
429 {
430 fprintf(f, "%s:\n", __func__+8);
431 DUMP(resource, res);
432 }
433
434 static void
435 dd_dump_clear(struct dd_draw_state *dstate, struct call_clear *info, FILE *f)
436 {
437 fprintf(f, "%s:\n", __func__+8);
438 DUMP_M(uint, info, buffers);
439 DUMP_M_ADDR(color_union, info, color);
440 DUMP_M(double, info, depth);
441 DUMP_M(hex, info, stencil);
442 }
443
444 static void
445 dd_dump_clear_buffer(struct dd_draw_state *dstate, struct call_clear_buffer *info,
446 FILE *f)
447 {
448 int i;
449 const char *value = (const char*)info->clear_value;
450
451 fprintf(f, "%s:\n", __func__+8);
452 DUMP_M(resource, info, res);
453 DUMP_M(uint, info, offset);
454 DUMP_M(uint, info, size);
455 DUMP_M(uint, info, clear_value_size);
456
457 fprintf(f, " clear_value:");
458 for (i = 0; i < info->clear_value_size; i++)
459 fprintf(f, " %02x", value[i]);
460 fprintf(f, "\n");
461 }
462
463 static void
464 dd_dump_clear_texture(struct dd_draw_state *dstate, FILE *f)
465 {
466 fprintf(f, "%s:\n", __func__+8);
467 /* TODO */
468 }
469
470 static void
471 dd_dump_clear_render_target(struct dd_draw_state *dstate, FILE *f)
472 {
473 fprintf(f, "%s:\n", __func__+8);
474 /* TODO */
475 }
476
477 static void
478 dd_dump_clear_depth_stencil(struct dd_draw_state *dstate, FILE *f)
479 {
480 fprintf(f, "%s:\n", __func__+8);
481 /* TODO */
482 }
483
484 static void
485 dd_dump_driver_state(struct dd_context *dctx, FILE *f, unsigned flags)
486 {
487 if (dctx->pipe->dump_debug_state) {
488 fprintf(f,"\n\n**************************************************"
489 "***************************\n");
490 fprintf(f, "Driver-specific state:\n\n");
491 dctx->pipe->dump_debug_state(dctx->pipe, f, flags);
492 }
493 }
494
495 static void
496 dd_dump_call(FILE *f, struct dd_draw_state *state, struct dd_call *call)
497 {
498 switch (call->type) {
499 case CALL_DRAW_VBO:
500 dd_dump_draw_vbo(state, &call->info.draw_vbo.draw, f);
501 break;
502 case CALL_LAUNCH_GRID:
503 dd_dump_launch_grid(state, &call->info.launch_grid, f);
504 break;
505 case CALL_RESOURCE_COPY_REGION:
506 dd_dump_resource_copy_region(state,
507 &call->info.resource_copy_region, f);
508 break;
509 case CALL_BLIT:
510 dd_dump_blit(state, &call->info.blit, f);
511 break;
512 case CALL_FLUSH_RESOURCE:
513 dd_dump_flush_resource(state, call->info.flush_resource, f);
514 break;
515 case CALL_CLEAR:
516 dd_dump_clear(state, &call->info.clear, f);
517 break;
518 case CALL_CLEAR_BUFFER:
519 dd_dump_clear_buffer(state, &call->info.clear_buffer, f);
520 break;
521 case CALL_CLEAR_TEXTURE:
522 dd_dump_clear_texture(state, f);
523 break;
524 case CALL_CLEAR_RENDER_TARGET:
525 dd_dump_clear_render_target(state, f);
526 break;
527 case CALL_CLEAR_DEPTH_STENCIL:
528 dd_dump_clear_depth_stencil(state, f);
529 break;
530 case CALL_GENERATE_MIPMAP:
531 dd_dump_generate_mipmap(state, f);
532 break;
533 }
534 }
535
536 static void
537 dd_write_report(struct dd_context *dctx, struct dd_call *call, unsigned flags,
538 bool dump_dmesg)
539 {
540 FILE *f = dd_get_file_stream(dd_screen(dctx->base.screen),
541 dctx->draw_state.apitrace_call_number);
542
543 if (!f)
544 return;
545
546 dd_dump_call(f, &dctx->draw_state, call);
547 dd_dump_driver_state(dctx, f, flags);
548 if (dump_dmesg)
549 dd_dump_dmesg(f);
550 dd_close_file_stream(f);
551 }
552
553 static void
554 dd_kill_process(void)
555 {
556 sync();
557 fprintf(stderr, "dd: Aborting the process...\n");
558 fflush(stdout);
559 fflush(stderr);
560 exit(1);
561 }
562
563 static bool
564 dd_flush_and_check_hang(struct dd_context *dctx,
565 struct pipe_fence_handle **flush_fence,
566 unsigned flush_flags)
567 {
568 struct pipe_fence_handle *fence = NULL;
569 struct pipe_context *pipe = dctx->pipe;
570 struct pipe_screen *screen = pipe->screen;
571 uint64_t timeout_ms = dd_screen(dctx->base.screen)->timeout_ms;
572 bool idle;
573
574 assert(timeout_ms > 0);
575
576 pipe->flush(pipe, &fence, flush_flags);
577 if (flush_fence)
578 screen->fence_reference(screen, flush_fence, fence);
579 if (!fence)
580 return false;
581
582 idle = screen->fence_finish(screen, pipe, fence, timeout_ms * 1000000);
583 screen->fence_reference(screen, &fence, NULL);
584 if (!idle)
585 fprintf(stderr, "dd: GPU hang detected!\n");
586 return !idle;
587 }
588
589 static void
590 dd_flush_and_handle_hang(struct dd_context *dctx,
591 struct pipe_fence_handle **fence, unsigned flags,
592 const char *cause)
593 {
594 if (dd_flush_and_check_hang(dctx, fence, flags)) {
595 FILE *f = dd_get_file_stream(dd_screen(dctx->base.screen),
596 dctx->draw_state.apitrace_call_number);
597
598 if (f) {
599 fprintf(f, "dd: %s.\n", cause);
600 dd_dump_driver_state(dctx, f,
601 PIPE_DUMP_DEVICE_STATUS_REGISTERS |
602 PIPE_DUMP_CURRENT_STATES |
603 PIPE_DUMP_CURRENT_SHADERS |
604 PIPE_DUMP_LAST_COMMAND_BUFFER);
605 dd_dump_dmesg(f);
606 dd_close_file_stream(f);
607 }
608
609 /* Terminate the process to prevent future hangs. */
610 dd_kill_process();
611 }
612 }
613
614 static void
615 dd_unreference_copy_of_call(struct dd_call *dst)
616 {
617 switch (dst->type) {
618 case CALL_DRAW_VBO:
619 pipe_so_target_reference(&dst->info.draw_vbo.draw.count_from_stream_output, NULL);
620 pipe_resource_reference(&dst->info.draw_vbo.indirect.buffer, NULL);
621 pipe_resource_reference(&dst->info.draw_vbo.indirect.indirect_draw_count, NULL);
622 if (dst->info.draw_vbo.draw.index_size &&
623 !dst->info.draw_vbo.draw.has_user_indices)
624 pipe_resource_reference(&dst->info.draw_vbo.draw.index.resource, NULL);
625 else
626 dst->info.draw_vbo.draw.index.user = NULL;
627 break;
628 case CALL_LAUNCH_GRID:
629 pipe_resource_reference(&dst->info.launch_grid.indirect, NULL);
630 break;
631 case CALL_RESOURCE_COPY_REGION:
632 pipe_resource_reference(&dst->info.resource_copy_region.dst, NULL);
633 pipe_resource_reference(&dst->info.resource_copy_region.src, NULL);
634 break;
635 case CALL_BLIT:
636 pipe_resource_reference(&dst->info.blit.dst.resource, NULL);
637 pipe_resource_reference(&dst->info.blit.src.resource, NULL);
638 break;
639 case CALL_FLUSH_RESOURCE:
640 pipe_resource_reference(&dst->info.flush_resource, NULL);
641 break;
642 case CALL_CLEAR:
643 break;
644 case CALL_CLEAR_BUFFER:
645 pipe_resource_reference(&dst->info.clear_buffer.res, NULL);
646 break;
647 case CALL_CLEAR_TEXTURE:
648 break;
649 case CALL_CLEAR_RENDER_TARGET:
650 break;
651 case CALL_CLEAR_DEPTH_STENCIL:
652 break;
653 case CALL_GENERATE_MIPMAP:
654 pipe_resource_reference(&dst->info.generate_mipmap.res, NULL);
655 break;
656 }
657 }
658
659 static void
660 dd_copy_call(struct dd_call *dst, struct dd_call *src)
661 {
662 dst->type = src->type;
663
664 switch (src->type) {
665 case CALL_DRAW_VBO:
666 pipe_so_target_reference(&dst->info.draw_vbo.draw.count_from_stream_output,
667 src->info.draw_vbo.draw.count_from_stream_output);
668 pipe_resource_reference(&dst->info.draw_vbo.indirect.buffer,
669 src->info.draw_vbo.indirect.buffer);
670 pipe_resource_reference(&dst->info.draw_vbo.indirect.indirect_draw_count,
671 src->info.draw_vbo.indirect.indirect_draw_count);
672
673 if (dst->info.draw_vbo.draw.index_size &&
674 !dst->info.draw_vbo.draw.has_user_indices)
675 pipe_resource_reference(&dst->info.draw_vbo.draw.index.resource, NULL);
676 else
677 dst->info.draw_vbo.draw.index.user = NULL;
678
679 if (src->info.draw_vbo.draw.index_size &&
680 !src->info.draw_vbo.draw.has_user_indices) {
681 pipe_resource_reference(&dst->info.draw_vbo.draw.index.resource,
682 src->info.draw_vbo.draw.index.resource);
683 }
684
685 dst->info.draw_vbo = src->info.draw_vbo;
686 if (!src->info.draw_vbo.draw.indirect)
687 dst->info.draw_vbo.draw.indirect = NULL;
688 else
689 dst->info.draw_vbo.draw.indirect = &dst->info.draw_vbo.indirect;
690 break;
691 case CALL_LAUNCH_GRID:
692 pipe_resource_reference(&dst->info.launch_grid.indirect,
693 src->info.launch_grid.indirect);
694 dst->info.launch_grid = src->info.launch_grid;
695 break;
696 case CALL_RESOURCE_COPY_REGION:
697 pipe_resource_reference(&dst->info.resource_copy_region.dst,
698 src->info.resource_copy_region.dst);
699 pipe_resource_reference(&dst->info.resource_copy_region.src,
700 src->info.resource_copy_region.src);
701 dst->info.resource_copy_region = src->info.resource_copy_region;
702 break;
703 case CALL_BLIT:
704 pipe_resource_reference(&dst->info.blit.dst.resource,
705 src->info.blit.dst.resource);
706 pipe_resource_reference(&dst->info.blit.src.resource,
707 src->info.blit.src.resource);
708 dst->info.blit = src->info.blit;
709 break;
710 case CALL_FLUSH_RESOURCE:
711 pipe_resource_reference(&dst->info.flush_resource,
712 src->info.flush_resource);
713 break;
714 case CALL_CLEAR:
715 dst->info.clear = src->info.clear;
716 break;
717 case CALL_CLEAR_BUFFER:
718 pipe_resource_reference(&dst->info.clear_buffer.res,
719 src->info.clear_buffer.res);
720 dst->info.clear_buffer = src->info.clear_buffer;
721 break;
722 case CALL_CLEAR_TEXTURE:
723 break;
724 case CALL_CLEAR_RENDER_TARGET:
725 break;
726 case CALL_CLEAR_DEPTH_STENCIL:
727 break;
728 case CALL_GENERATE_MIPMAP:
729 pipe_resource_reference(&dst->info.generate_mipmap.res,
730 src->info.generate_mipmap.res);
731 dst->info.generate_mipmap = src->info.generate_mipmap;
732 break;
733 }
734 }
735
736 static void
737 dd_init_copy_of_draw_state(struct dd_draw_state_copy *state)
738 {
739 unsigned i,j;
740
741 /* Just clear pointers to gallium objects. Don't clear the whole structure,
742 * because it would kill performance with its size of 130 KB.
743 */
744 memset(state->base.vertex_buffers, 0,
745 sizeof(state->base.vertex_buffers));
746 memset(state->base.so_targets, 0,
747 sizeof(state->base.so_targets));
748 memset(state->base.constant_buffers, 0,
749 sizeof(state->base.constant_buffers));
750 memset(state->base.sampler_views, 0,
751 sizeof(state->base.sampler_views));
752 memset(state->base.shader_images, 0,
753 sizeof(state->base.shader_images));
754 memset(state->base.shader_buffers, 0,
755 sizeof(state->base.shader_buffers));
756 memset(&state->base.framebuffer_state, 0,
757 sizeof(state->base.framebuffer_state));
758
759 memset(state->shaders, 0, sizeof(state->shaders));
760
761 state->base.render_cond.query = &state->render_cond;
762
763 for (i = 0; i < PIPE_SHADER_TYPES; i++) {
764 state->base.shaders[i] = &state->shaders[i];
765 for (j = 0; j < PIPE_MAX_SAMPLERS; j++)
766 state->base.sampler_states[i][j] = &state->sampler_states[i][j];
767 }
768
769 state->base.velems = &state->velems;
770 state->base.rs = &state->rs;
771 state->base.dsa = &state->dsa;
772 state->base.blend = &state->blend;
773 }
774
775 static void
776 dd_unreference_copy_of_draw_state(struct dd_draw_state_copy *state)
777 {
778 struct dd_draw_state *dst = &state->base;
779 unsigned i,j;
780
781 for (i = 0; i < ARRAY_SIZE(dst->vertex_buffers); i++)
782 pipe_vertex_buffer_unreference(&dst->vertex_buffers[i]);
783 for (i = 0; i < ARRAY_SIZE(dst->so_targets); i++)
784 pipe_so_target_reference(&dst->so_targets[i], NULL);
785
786 for (i = 0; i < PIPE_SHADER_TYPES; i++) {
787 if (dst->shaders[i])
788 tgsi_free_tokens(dst->shaders[i]->state.shader.tokens);
789
790 for (j = 0; j < PIPE_MAX_CONSTANT_BUFFERS; j++)
791 pipe_resource_reference(&dst->constant_buffers[i][j].buffer, NULL);
792 for (j = 0; j < PIPE_MAX_SAMPLERS; j++)
793 pipe_sampler_view_reference(&dst->sampler_views[i][j], NULL);
794 for (j = 0; j < PIPE_MAX_SHADER_IMAGES; j++)
795 pipe_resource_reference(&dst->shader_images[i][j].resource, NULL);
796 for (j = 0; j < PIPE_MAX_SHADER_BUFFERS; j++)
797 pipe_resource_reference(&dst->shader_buffers[i][j].buffer, NULL);
798 }
799
800 util_unreference_framebuffer_state(&dst->framebuffer_state);
801 }
802
803 static void
804 dd_copy_draw_state(struct dd_draw_state *dst, struct dd_draw_state *src)
805 {
806 unsigned i,j;
807
808 if (src->render_cond.query) {
809 *dst->render_cond.query = *src->render_cond.query;
810 dst->render_cond.condition = src->render_cond.condition;
811 dst->render_cond.mode = src->render_cond.mode;
812 } else {
813 dst->render_cond.query = NULL;
814 }
815
816 for (i = 0; i < ARRAY_SIZE(src->vertex_buffers); i++) {
817 pipe_vertex_buffer_reference(&dst->vertex_buffers[i],
818 &src->vertex_buffers[i]);
819 }
820
821 dst->num_so_targets = src->num_so_targets;
822 for (i = 0; i < ARRAY_SIZE(src->so_targets); i++)
823 pipe_so_target_reference(&dst->so_targets[i], src->so_targets[i]);
824 memcpy(dst->so_offsets, src->so_offsets, sizeof(src->so_offsets));
825
826 for (i = 0; i < PIPE_SHADER_TYPES; i++) {
827 if (!src->shaders[i]) {
828 dst->shaders[i] = NULL;
829 continue;
830 }
831
832 if (src->shaders[i]) {
833 dst->shaders[i]->state.shader = src->shaders[i]->state.shader;
834 dst->shaders[i]->state.shader.tokens =
835 tgsi_dup_tokens(src->shaders[i]->state.shader.tokens);
836 } else {
837 dst->shaders[i] = NULL;
838 }
839
840 for (j = 0; j < PIPE_MAX_CONSTANT_BUFFERS; j++) {
841 pipe_resource_reference(&dst->constant_buffers[i][j].buffer,
842 src->constant_buffers[i][j].buffer);
843 memcpy(&dst->constant_buffers[i][j], &src->constant_buffers[i][j],
844 sizeof(src->constant_buffers[i][j]));
845 }
846
847 for (j = 0; j < PIPE_MAX_SAMPLERS; j++) {
848 pipe_sampler_view_reference(&dst->sampler_views[i][j],
849 src->sampler_views[i][j]);
850 if (src->sampler_states[i][j])
851 dst->sampler_states[i][j]->state.sampler =
852 src->sampler_states[i][j]->state.sampler;
853 else
854 dst->sampler_states[i][j] = NULL;
855 }
856
857 for (j = 0; j < PIPE_MAX_SHADER_IMAGES; j++) {
858 pipe_resource_reference(&dst->shader_images[i][j].resource,
859 src->shader_images[i][j].resource);
860 memcpy(&dst->shader_images[i][j], &src->shader_images[i][j],
861 sizeof(src->shader_images[i][j]));
862 }
863
864 for (j = 0; j < PIPE_MAX_SHADER_BUFFERS; j++) {
865 pipe_resource_reference(&dst->shader_buffers[i][j].buffer,
866 src->shader_buffers[i][j].buffer);
867 memcpy(&dst->shader_buffers[i][j], &src->shader_buffers[i][j],
868 sizeof(src->shader_buffers[i][j]));
869 }
870 }
871
872 if (src->velems)
873 dst->velems->state.velems = src->velems->state.velems;
874 else
875 dst->velems = NULL;
876
877 if (src->rs)
878 dst->rs->state.rs = src->rs->state.rs;
879 else
880 dst->rs = NULL;
881
882 if (src->dsa)
883 dst->dsa->state.dsa = src->dsa->state.dsa;
884 else
885 dst->dsa = NULL;
886
887 if (src->blend)
888 dst->blend->state.blend = src->blend->state.blend;
889 else
890 dst->blend = NULL;
891
892 dst->blend_color = src->blend_color;
893 dst->stencil_ref = src->stencil_ref;
894 dst->sample_mask = src->sample_mask;
895 dst->min_samples = src->min_samples;
896 dst->clip_state = src->clip_state;
897 util_copy_framebuffer_state(&dst->framebuffer_state, &src->framebuffer_state);
898 memcpy(dst->scissors, src->scissors, sizeof(src->scissors));
899 memcpy(dst->viewports, src->viewports, sizeof(src->viewports));
900 memcpy(dst->tess_default_levels, src->tess_default_levels,
901 sizeof(src->tess_default_levels));
902 dst->apitrace_call_number = src->apitrace_call_number;
903 }
904
905 static void
906 dd_free_record(struct dd_draw_record **record)
907 {
908 struct dd_draw_record *next = (*record)->next;
909
910 dd_unreference_copy_of_call(&(*record)->call);
911 dd_unreference_copy_of_draw_state(&(*record)->draw_state);
912 FREE((*record)->driver_state_log);
913 FREE(*record);
914 *record = next;
915 }
916
917 static void
918 dd_dump_record(struct dd_context *dctx, struct dd_draw_record *record,
919 uint32_t hw_sequence_no, int64_t now)
920 {
921 FILE *f = dd_get_file_stream(dd_screen(dctx->base.screen),
922 record->draw_state.base.apitrace_call_number);
923 if (!f)
924 return;
925
926 fprintf(f, "Draw call sequence # = %u\n", record->sequence_no);
927 fprintf(f, "HW reached sequence # = %u\n", hw_sequence_no);
928 fprintf(f, "Elapsed time = %"PRIi64" ms\n\n",
929 (now - record->timestamp) / 1000);
930
931 dd_dump_call(f, &record->draw_state.base, &record->call);
932 fprintf(f, "%s\n", record->driver_state_log);
933
934 dctx->pipe->dump_debug_state(dctx->pipe, f,
935 PIPE_DUMP_DEVICE_STATUS_REGISTERS);
936 dd_dump_dmesg(f);
937 fclose(f);
938 }
939
940 int
941 dd_thread_pipelined_hang_detect(void *input)
942 {
943 struct dd_context *dctx = (struct dd_context *)input;
944 struct dd_screen *dscreen = dd_screen(dctx->base.screen);
945
946 mtx_lock(&dctx->mutex);
947
948 while (!dctx->kill_thread) {
949 struct dd_draw_record **record = &dctx->records;
950
951 /* Loop over all records. */
952 while (*record) {
953 int64_t now;
954
955 /* If the fence has been signalled, release the record and all older
956 * records.
957 */
958 if (*dctx->mapped_fence >= (*record)->sequence_no) {
959 while (*record)
960 dd_free_record(record);
961 break;
962 }
963
964 /* The fence hasn't been signalled. Check the timeout. */
965 now = os_time_get();
966 if (os_time_timeout((*record)->timestamp,
967 (*record)->timestamp + dscreen->timeout_ms * 1000,
968 now)) {
969 fprintf(stderr, "GPU hang detected.\n");
970
971 /* Get the oldest unsignalled draw call. */
972 while ((*record)->next &&
973 *dctx->mapped_fence < (*record)->next->sequence_no)
974 record = &(*record)->next;
975
976 dd_dump_record(dctx, *record, *dctx->mapped_fence, now);
977 dd_kill_process();
978 }
979
980 record = &(*record)->next;
981 }
982
983 /* Unlock and sleep before starting all over again. */
984 mtx_unlock(&dctx->mutex);
985 os_time_sleep(10000); /* 10 ms */
986 mtx_lock(&dctx->mutex);
987 }
988
989 /* Thread termination. */
990 while (dctx->records)
991 dd_free_record(&dctx->records);
992
993 mtx_unlock(&dctx->mutex);
994 return 0;
995 }
996
997 static char *
998 dd_get_driver_shader_log(struct dd_context *dctx)
999 {
1000 #if defined(PIPE_OS_LINUX)
1001 FILE *f;
1002 char *buf;
1003 int written_bytes;
1004
1005 if (!dctx->max_log_buffer_size)
1006 dctx->max_log_buffer_size = 16 * 1024;
1007
1008 /* Keep increasing the buffer size until there is enough space.
1009 *
1010 * open_memstream can resize automatically, but it's VERY SLOW.
1011 * fmemopen is much faster.
1012 */
1013 while (1) {
1014 buf = malloc(dctx->max_log_buffer_size);
1015 buf[0] = 0;
1016
1017 f = fmemopen(buf, dctx->max_log_buffer_size, "a");
1018 if (!f) {
1019 free(buf);
1020 return NULL;
1021 }
1022
1023 dd_dump_driver_state(dctx, f, PIPE_DUMP_CURRENT_SHADERS);
1024 written_bytes = ftell(f);
1025 fclose(f);
1026
1027 /* Return if the backing buffer is large enough. */
1028 if (written_bytes < dctx->max_log_buffer_size - 1)
1029 break;
1030
1031 /* Try again. */
1032 free(buf);
1033 dctx->max_log_buffer_size *= 2;
1034 }
1035
1036 return buf;
1037 #else
1038 /* Return an empty string. */
1039 return (char*)calloc(1, 4);
1040 #endif
1041 }
1042
1043 static void
1044 dd_pipelined_process_draw(struct dd_context *dctx, struct dd_call *call)
1045 {
1046 struct pipe_context *pipe = dctx->pipe;
1047 struct dd_draw_record *record;
1048 char *log;
1049
1050 /* Make a record of the draw call. */
1051 record = MALLOC_STRUCT(dd_draw_record);
1052 if (!record)
1053 return;
1054
1055 /* Create the log. */
1056 log = dd_get_driver_shader_log(dctx);
1057 if (!log) {
1058 FREE(record);
1059 return;
1060 }
1061
1062 /* Update the fence with the GPU.
1063 *
1064 * radeonsi/clear_buffer waits in the command processor until shaders are
1065 * idle before writing to memory. That's a necessary condition for isolating
1066 * draw calls.
1067 */
1068 dctx->sequence_no++;
1069 pipe->clear_buffer(pipe, dctx->fence, 0, 4, &dctx->sequence_no, 4);
1070
1071 /* Initialize the record. */
1072 record->timestamp = os_time_get();
1073 record->sequence_no = dctx->sequence_no;
1074 record->driver_state_log = log;
1075
1076 memset(&record->call, 0, sizeof(record->call));
1077 dd_copy_call(&record->call, call);
1078
1079 dd_init_copy_of_draw_state(&record->draw_state);
1080 dd_copy_draw_state(&record->draw_state.base, &dctx->draw_state);
1081
1082 /* Add the record to the list. */
1083 mtx_lock(&dctx->mutex);
1084 record->next = dctx->records;
1085 dctx->records = record;
1086 mtx_unlock(&dctx->mutex);
1087 }
1088
1089 static void
1090 dd_context_flush(struct pipe_context *_pipe,
1091 struct pipe_fence_handle **fence, unsigned flags)
1092 {
1093 struct dd_context *dctx = dd_context(_pipe);
1094 struct pipe_context *pipe = dctx->pipe;
1095
1096 switch (dd_screen(dctx->base.screen)->mode) {
1097 case DD_DETECT_HANGS:
1098 dd_flush_and_handle_hang(dctx, fence, flags,
1099 "GPU hang detected in pipe->flush()");
1100 break;
1101 case DD_DETECT_HANGS_PIPELINED: /* nothing to do here */
1102 case DD_DUMP_ALL_CALLS:
1103 case DD_DUMP_APITRACE_CALL:
1104 pipe->flush(pipe, fence, flags);
1105 break;
1106 default:
1107 assert(0);
1108 }
1109 }
1110
1111 static void
1112 dd_before_draw(struct dd_context *dctx)
1113 {
1114 struct dd_screen *dscreen = dd_screen(dctx->base.screen);
1115
1116 if (dscreen->mode == DD_DETECT_HANGS &&
1117 !dscreen->no_flush &&
1118 dctx->num_draw_calls >= dscreen->skip_count)
1119 dd_flush_and_handle_hang(dctx, NULL, 0,
1120 "GPU hang most likely caused by internal "
1121 "driver commands");
1122 }
1123
1124 static void
1125 dd_after_draw(struct dd_context *dctx, struct dd_call *call)
1126 {
1127 struct dd_screen *dscreen = dd_screen(dctx->base.screen);
1128 struct pipe_context *pipe = dctx->pipe;
1129
1130 if (dctx->num_draw_calls >= dscreen->skip_count) {
1131 switch (dscreen->mode) {
1132 case DD_DETECT_HANGS:
1133 if (!dscreen->no_flush &&
1134 dd_flush_and_check_hang(dctx, NULL, 0)) {
1135 dd_write_report(dctx, call,
1136 PIPE_DUMP_DEVICE_STATUS_REGISTERS |
1137 PIPE_DUMP_CURRENT_STATES |
1138 PIPE_DUMP_CURRENT_SHADERS |
1139 PIPE_DUMP_LAST_COMMAND_BUFFER,
1140 true);
1141
1142 /* Terminate the process to prevent future hangs. */
1143 dd_kill_process();
1144 }
1145 break;
1146 case DD_DETECT_HANGS_PIPELINED:
1147 dd_pipelined_process_draw(dctx, call);
1148 break;
1149 case DD_DUMP_ALL_CALLS:
1150 if (!dscreen->no_flush)
1151 pipe->flush(pipe, NULL, 0);
1152 dd_write_report(dctx, call,
1153 PIPE_DUMP_CURRENT_STATES |
1154 PIPE_DUMP_CURRENT_SHADERS |
1155 PIPE_DUMP_LAST_COMMAND_BUFFER,
1156 false);
1157 break;
1158 case DD_DUMP_APITRACE_CALL:
1159 if (dscreen->apitrace_dump_call ==
1160 dctx->draw_state.apitrace_call_number) {
1161 dd_write_report(dctx, call,
1162 PIPE_DUMP_CURRENT_STATES |
1163 PIPE_DUMP_CURRENT_SHADERS,
1164 false);
1165 /* No need to continue. */
1166 exit(0);
1167 }
1168 break;
1169 default:
1170 assert(0);
1171 }
1172 }
1173
1174 ++dctx->num_draw_calls;
1175 if (dscreen->skip_count && dctx->num_draw_calls % 10000 == 0)
1176 fprintf(stderr, "Gallium debugger reached %u draw calls.\n",
1177 dctx->num_draw_calls);
1178 }
1179
1180 static void
1181 dd_context_draw_vbo(struct pipe_context *_pipe,
1182 const struct pipe_draw_info *info)
1183 {
1184 struct dd_context *dctx = dd_context(_pipe);
1185 struct pipe_context *pipe = dctx->pipe;
1186 struct dd_call call;
1187
1188 call.type = CALL_DRAW_VBO;
1189 call.info.draw_vbo.draw = *info;
1190 if (info->indirect) {
1191 call.info.draw_vbo.indirect = *info->indirect;
1192 call.info.draw_vbo.draw.indirect = &call.info.draw_vbo.indirect;
1193 } else {
1194 memset(&call.info.draw_vbo.indirect, 0, sizeof(*info->indirect));
1195 }
1196
1197 dd_before_draw(dctx);
1198 pipe->draw_vbo(pipe, info);
1199 dd_after_draw(dctx, &call);
1200 }
1201
1202 static void
1203 dd_context_launch_grid(struct pipe_context *_pipe,
1204 const struct pipe_grid_info *info)
1205 {
1206 struct dd_context *dctx = dd_context(_pipe);
1207 struct pipe_context *pipe = dctx->pipe;
1208 struct dd_call call;
1209
1210 call.type = CALL_LAUNCH_GRID;
1211 call.info.launch_grid = *info;
1212
1213 dd_before_draw(dctx);
1214 pipe->launch_grid(pipe, info);
1215 dd_after_draw(dctx, &call);
1216 }
1217
1218 static void
1219 dd_context_resource_copy_region(struct pipe_context *_pipe,
1220 struct pipe_resource *dst, unsigned dst_level,
1221 unsigned dstx, unsigned dsty, unsigned dstz,
1222 struct pipe_resource *src, unsigned src_level,
1223 const struct pipe_box *src_box)
1224 {
1225 struct dd_context *dctx = dd_context(_pipe);
1226 struct pipe_context *pipe = dctx->pipe;
1227 struct dd_call call;
1228
1229 call.type = CALL_RESOURCE_COPY_REGION;
1230 call.info.resource_copy_region.dst = dst;
1231 call.info.resource_copy_region.dst_level = dst_level;
1232 call.info.resource_copy_region.dstx = dstx;
1233 call.info.resource_copy_region.dsty = dsty;
1234 call.info.resource_copy_region.dstz = dstz;
1235 call.info.resource_copy_region.src = src;
1236 call.info.resource_copy_region.src_level = src_level;
1237 call.info.resource_copy_region.src_box = *src_box;
1238
1239 dd_before_draw(dctx);
1240 pipe->resource_copy_region(pipe,
1241 dst, dst_level, dstx, dsty, dstz,
1242 src, src_level, src_box);
1243 dd_after_draw(dctx, &call);
1244 }
1245
1246 static void
1247 dd_context_blit(struct pipe_context *_pipe, const struct pipe_blit_info *info)
1248 {
1249 struct dd_context *dctx = dd_context(_pipe);
1250 struct pipe_context *pipe = dctx->pipe;
1251 struct dd_call call;
1252
1253 call.type = CALL_BLIT;
1254 call.info.blit = *info;
1255
1256 dd_before_draw(dctx);
1257 pipe->blit(pipe, info);
1258 dd_after_draw(dctx, &call);
1259 }
1260
1261 static boolean
1262 dd_context_generate_mipmap(struct pipe_context *_pipe,
1263 struct pipe_resource *res,
1264 enum pipe_format format,
1265 unsigned base_level,
1266 unsigned last_level,
1267 unsigned first_layer,
1268 unsigned last_layer)
1269 {
1270 struct dd_context *dctx = dd_context(_pipe);
1271 struct pipe_context *pipe = dctx->pipe;
1272 struct dd_call call;
1273 boolean result;
1274
1275 call.type = CALL_GENERATE_MIPMAP;
1276 call.info.generate_mipmap.res = res;
1277 call.info.generate_mipmap.format = format;
1278 call.info.generate_mipmap.base_level = base_level;
1279 call.info.generate_mipmap.last_level = last_level;
1280 call.info.generate_mipmap.first_layer = first_layer;
1281 call.info.generate_mipmap.last_layer = last_layer;
1282
1283 dd_before_draw(dctx);
1284 result = pipe->generate_mipmap(pipe, res, format, base_level, last_level,
1285 first_layer, last_layer);
1286 dd_after_draw(dctx, &call);
1287 return result;
1288 }
1289
1290 static void
1291 dd_context_flush_resource(struct pipe_context *_pipe,
1292 struct pipe_resource *resource)
1293 {
1294 struct dd_context *dctx = dd_context(_pipe);
1295 struct pipe_context *pipe = dctx->pipe;
1296 struct dd_call call;
1297
1298 call.type = CALL_FLUSH_RESOURCE;
1299 call.info.flush_resource = resource;
1300
1301 dd_before_draw(dctx);
1302 pipe->flush_resource(pipe, resource);
1303 dd_after_draw(dctx, &call);
1304 }
1305
1306 static void
1307 dd_context_clear(struct pipe_context *_pipe, unsigned buffers,
1308 const union pipe_color_union *color, double depth,
1309 unsigned stencil)
1310 {
1311 struct dd_context *dctx = dd_context(_pipe);
1312 struct pipe_context *pipe = dctx->pipe;
1313 struct dd_call call;
1314
1315 call.type = CALL_CLEAR;
1316 call.info.clear.buffers = buffers;
1317 call.info.clear.color = *color;
1318 call.info.clear.depth = depth;
1319 call.info.clear.stencil = stencil;
1320
1321 dd_before_draw(dctx);
1322 pipe->clear(pipe, buffers, color, depth, stencil);
1323 dd_after_draw(dctx, &call);
1324 }
1325
1326 static void
1327 dd_context_clear_render_target(struct pipe_context *_pipe,
1328 struct pipe_surface *dst,
1329 const union pipe_color_union *color,
1330 unsigned dstx, unsigned dsty,
1331 unsigned width, unsigned height,
1332 bool render_condition_enabled)
1333 {
1334 struct dd_context *dctx = dd_context(_pipe);
1335 struct pipe_context *pipe = dctx->pipe;
1336 struct dd_call call;
1337
1338 call.type = CALL_CLEAR_RENDER_TARGET;
1339
1340 dd_before_draw(dctx);
1341 pipe->clear_render_target(pipe, dst, color, dstx, dsty, width, height,
1342 render_condition_enabled);
1343 dd_after_draw(dctx, &call);
1344 }
1345
1346 static void
1347 dd_context_clear_depth_stencil(struct pipe_context *_pipe,
1348 struct pipe_surface *dst, unsigned clear_flags,
1349 double depth, unsigned stencil, unsigned dstx,
1350 unsigned dsty, unsigned width, unsigned height,
1351 bool render_condition_enabled)
1352 {
1353 struct dd_context *dctx = dd_context(_pipe);
1354 struct pipe_context *pipe = dctx->pipe;
1355 struct dd_call call;
1356
1357 call.type = CALL_CLEAR_DEPTH_STENCIL;
1358
1359 dd_before_draw(dctx);
1360 pipe->clear_depth_stencil(pipe, dst, clear_flags, depth, stencil,
1361 dstx, dsty, width, height,
1362 render_condition_enabled);
1363 dd_after_draw(dctx, &call);
1364 }
1365
1366 static void
1367 dd_context_clear_buffer(struct pipe_context *_pipe, struct pipe_resource *res,
1368 unsigned offset, unsigned size,
1369 const void *clear_value, int clear_value_size)
1370 {
1371 struct dd_context *dctx = dd_context(_pipe);
1372 struct pipe_context *pipe = dctx->pipe;
1373 struct dd_call call;
1374
1375 call.type = CALL_CLEAR_BUFFER;
1376 call.info.clear_buffer.res = res;
1377 call.info.clear_buffer.offset = offset;
1378 call.info.clear_buffer.size = size;
1379 call.info.clear_buffer.clear_value = clear_value;
1380 call.info.clear_buffer.clear_value_size = clear_value_size;
1381
1382 dd_before_draw(dctx);
1383 pipe->clear_buffer(pipe, res, offset, size, clear_value, clear_value_size);
1384 dd_after_draw(dctx, &call);
1385 }
1386
1387 static void
1388 dd_context_clear_texture(struct pipe_context *_pipe,
1389 struct pipe_resource *res,
1390 unsigned level,
1391 const struct pipe_box *box,
1392 const void *data)
1393 {
1394 struct dd_context *dctx = dd_context(_pipe);
1395 struct pipe_context *pipe = dctx->pipe;
1396 struct dd_call call;
1397
1398 call.type = CALL_CLEAR_TEXTURE;
1399
1400 dd_before_draw(dctx);
1401 pipe->clear_texture(pipe, res, level, box, data);
1402 dd_after_draw(dctx, &call);
1403 }
1404
1405 void
1406 dd_init_draw_functions(struct dd_context *dctx)
1407 {
1408 CTX_INIT(flush);
1409 CTX_INIT(draw_vbo);
1410 CTX_INIT(launch_grid);
1411 CTX_INIT(resource_copy_region);
1412 CTX_INIT(blit);
1413 CTX_INIT(clear);
1414 CTX_INIT(clear_render_target);
1415 CTX_INIT(clear_depth_stencil);
1416 CTX_INIT(clear_buffer);
1417 CTX_INIT(clear_texture);
1418 CTX_INIT(flush_resource);
1419 CTX_INIT(generate_mipmap);
1420 }