4c8167a9e7dbcf8f382c43279d47342cafcd747a
[mesa.git] / src / gallium / drivers / llvmpipe / lp_setup.c
1 /**************************************************************************
2 *
3 * Copyright 2007 VMware, Inc.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28 /**
29 * Tiling engine.
30 *
31 * Builds per-tile display lists and executes them on calls to
32 * lp_setup_flush().
33 */
34
35 #include <limits.h>
36
37 #include "pipe/p_defines.h"
38 #include "util/u_framebuffer.h"
39 #include "util/u_inlines.h"
40 #include "util/u_memory.h"
41 #include "util/u_pack_color.h"
42 #include "draw/draw_pipe.h"
43 #include "os/os_time.h"
44 #include "lp_context.h"
45 #include "lp_memory.h"
46 #include "lp_scene.h"
47 #include "lp_texture.h"
48 #include "lp_debug.h"
49 #include "lp_fence.h"
50 #include "lp_query.h"
51 #include "lp_rast.h"
52 #include "lp_setup_context.h"
53 #include "lp_screen.h"
54 #include "lp_state.h"
55 #include "state_tracker/sw_winsys.h"
56
57 #include "draw/draw_context.h"
58 #include "draw/draw_vbuf.h"
59
60
61 static boolean set_scene_state( struct lp_setup_context *, enum setup_state,
62 const char *reason);
63 static boolean try_update_scene_state( struct lp_setup_context *setup );
64
65
66 static void
67 lp_setup_get_empty_scene(struct lp_setup_context *setup)
68 {
69 assert(setup->scene == NULL);
70
71 setup->scene_idx++;
72 setup->scene_idx %= Elements(setup->scenes);
73
74 setup->scene = setup->scenes[setup->scene_idx];
75
76 if (setup->scene->fence) {
77 if (LP_DEBUG & DEBUG_SETUP)
78 debug_printf("%s: wait for scene %d\n",
79 __FUNCTION__, setup->scene->fence->id);
80
81 lp_fence_wait(setup->scene->fence);
82 }
83
84 lp_scene_begin_binning(setup->scene, &setup->fb, setup->rasterizer_discard);
85
86 }
87
88
89 static void
90 first_triangle( struct lp_setup_context *setup,
91 const float (*v0)[4],
92 const float (*v1)[4],
93 const float (*v2)[4])
94 {
95 assert(setup->state == SETUP_ACTIVE);
96 lp_setup_choose_triangle( setup );
97 setup->triangle( setup, v0, v1, v2 );
98 }
99
100 static void
101 first_line( struct lp_setup_context *setup,
102 const float (*v0)[4],
103 const float (*v1)[4])
104 {
105 assert(setup->state == SETUP_ACTIVE);
106 lp_setup_choose_line( setup );
107 setup->line( setup, v0, v1 );
108 }
109
110 static void
111 first_point( struct lp_setup_context *setup,
112 const float (*v0)[4])
113 {
114 assert(setup->state == SETUP_ACTIVE);
115 lp_setup_choose_point( setup );
116 setup->point( setup, v0 );
117 }
118
119 void lp_setup_reset( struct lp_setup_context *setup )
120 {
121 unsigned i;
122
123 LP_DBG(DEBUG_SETUP, "%s\n", __FUNCTION__);
124
125 /* Reset derived state */
126 for (i = 0; i < Elements(setup->constants); ++i) {
127 setup->constants[i].stored_size = 0;
128 setup->constants[i].stored_data = NULL;
129 }
130 setup->fs.stored = NULL;
131 setup->dirty = ~0;
132
133 /* no current bin */
134 setup->scene = NULL;
135
136 /* Reset some state:
137 */
138 memset(&setup->clear, 0, sizeof setup->clear);
139
140 /* Have an explicit "start-binning" call and get rid of this
141 * pointer twiddling?
142 */
143 setup->line = first_line;
144 setup->point = first_point;
145 setup->triangle = first_triangle;
146 }
147
148
149 /** Rasterize all scene's bins */
150 static void
151 lp_setup_rasterize_scene( struct lp_setup_context *setup )
152 {
153 struct lp_scene *scene = setup->scene;
154 struct llvmpipe_screen *screen = llvmpipe_screen(scene->pipe->screen);
155
156 scene->num_active_queries = setup->active_binned_queries;
157 memcpy(scene->active_queries, setup->active_queries,
158 scene->num_active_queries * sizeof(scene->active_queries[0]));
159
160 lp_scene_end_binning(scene);
161
162 lp_fence_reference(&setup->last_fence, scene->fence);
163
164 if (setup->last_fence)
165 setup->last_fence->issued = TRUE;
166
167 pipe_mutex_lock(screen->rast_mutex);
168
169 /* FIXME: We enqueue the scene then wait on the rasterizer to finish.
170 * This means we never actually run any vertex stuff in parallel to
171 * rasterization (not in the same context at least) which is what the
172 * multiple scenes per setup is about - when we get a new empty scene
173 * any old one is already empty again because we waited here for
174 * raster tasks to be finished. Ideally, we shouldn't need to wait here
175 * and rely on fences elsewhere when waiting is necessary.
176 * Certainly, lp_scene_end_rasterization() would need to be deferred too
177 * and there's probably other bits why this doesn't actually work.
178 */
179 lp_rast_queue_scene(screen->rast, scene);
180 lp_rast_finish(screen->rast);
181 pipe_mutex_unlock(screen->rast_mutex);
182
183 lp_scene_end_rasterization(setup->scene);
184 lp_setup_reset( setup );
185
186 LP_DBG(DEBUG_SETUP, "%s done \n", __FUNCTION__);
187 }
188
189
190
191 static boolean
192 begin_binning( struct lp_setup_context *setup )
193 {
194 struct lp_scene *scene = setup->scene;
195 boolean need_zsload = FALSE;
196 boolean ok;
197
198 assert(scene);
199 assert(scene->fence == NULL);
200
201 /* Always create a fence:
202 */
203 scene->fence = lp_fence_create(MAX2(1, setup->num_threads));
204 if (!scene->fence)
205 return FALSE;
206
207 ok = try_update_scene_state(setup);
208 if (!ok)
209 return FALSE;
210
211 if (setup->fb.zsbuf &&
212 ((setup->clear.flags & PIPE_CLEAR_DEPTHSTENCIL) != PIPE_CLEAR_DEPTHSTENCIL) &&
213 util_format_is_depth_and_stencil(setup->fb.zsbuf->format))
214 need_zsload = TRUE;
215
216 LP_DBG(DEBUG_SETUP, "%s color clear bufs: %x depth: %s\n", __FUNCTION__,
217 setup->clear.flags >> 2,
218 need_zsload ? "clear": "load");
219
220 if (setup->clear.flags & PIPE_CLEAR_COLOR) {
221 unsigned cbuf;
222 for (cbuf = 0; cbuf < setup->fb.nr_cbufs; cbuf++) {
223 assert(PIPE_CLEAR_COLOR0 == 1 << 2);
224 if (setup->clear.flags & (1 << (2 + cbuf))) {
225 union lp_rast_cmd_arg clearrb_arg;
226 struct lp_rast_clear_rb *cc_scene =
227 (struct lp_rast_clear_rb *)
228 lp_scene_alloc(scene, sizeof(struct lp_rast_clear_rb));
229
230 if (!cc_scene) {
231 return FALSE;
232 }
233
234 cc_scene->cbuf = cbuf;
235 cc_scene->color_val = setup->clear.color_val[cbuf];
236 clearrb_arg.clear_rb = cc_scene;
237
238 if (!lp_scene_bin_everywhere(scene,
239 LP_RAST_OP_CLEAR_COLOR,
240 clearrb_arg))
241 return FALSE;
242 }
243 }
244 }
245
246 if (setup->fb.zsbuf) {
247 if (setup->clear.flags & PIPE_CLEAR_DEPTHSTENCIL) {
248 ok = lp_scene_bin_everywhere( scene,
249 LP_RAST_OP_CLEAR_ZSTENCIL,
250 lp_rast_arg_clearzs(
251 setup->clear.zsvalue,
252 setup->clear.zsmask));
253 if (!ok)
254 return FALSE;
255 }
256 }
257
258 setup->clear.flags = 0;
259 setup->clear.zsmask = 0;
260 setup->clear.zsvalue = 0;
261
262 scene->had_queries = !!setup->active_binned_queries;
263
264 LP_DBG(DEBUG_SETUP, "%s done\n", __FUNCTION__);
265 return TRUE;
266 }
267
268
269 /* This basically bins and then flushes any outstanding full-screen
270 * clears.
271 *
272 * TODO: fast path for fullscreen clears and no triangles.
273 */
274 static boolean
275 execute_clears( struct lp_setup_context *setup )
276 {
277 LP_DBG(DEBUG_SETUP, "%s\n", __FUNCTION__);
278
279 return begin_binning( setup );
280 }
281
282 const char *states[] = {
283 "FLUSHED",
284 "CLEARED",
285 "ACTIVE "
286 };
287
288
289 static boolean
290 set_scene_state( struct lp_setup_context *setup,
291 enum setup_state new_state,
292 const char *reason)
293 {
294 unsigned old_state = setup->state;
295
296 if (old_state == new_state)
297 return TRUE;
298
299 if (LP_DEBUG & DEBUG_SCENE) {
300 debug_printf("%s old %s new %s%s%s\n",
301 __FUNCTION__,
302 states[old_state],
303 states[new_state],
304 (new_state == SETUP_FLUSHED) ? ": " : "",
305 (new_state == SETUP_FLUSHED) ? reason : "");
306
307 if (new_state == SETUP_FLUSHED && setup->scene)
308 lp_debug_draw_bins_by_cmd_length(setup->scene);
309 }
310
311 /* wait for a free/empty scene
312 */
313 if (old_state == SETUP_FLUSHED)
314 lp_setup_get_empty_scene(setup);
315
316 switch (new_state) {
317 case SETUP_CLEARED:
318 break;
319
320 case SETUP_ACTIVE:
321 if (!begin_binning( setup ))
322 goto fail;
323 break;
324
325 case SETUP_FLUSHED:
326 if (old_state == SETUP_CLEARED)
327 if (!execute_clears( setup ))
328 goto fail;
329
330 lp_setup_rasterize_scene( setup );
331 assert(setup->scene == NULL);
332 break;
333
334 default:
335 assert(0 && "invalid setup state mode");
336 goto fail;
337 }
338
339 setup->state = new_state;
340 return TRUE;
341
342 fail:
343 if (setup->scene) {
344 lp_scene_end_rasterization(setup->scene);
345 setup->scene = NULL;
346 }
347
348 setup->state = SETUP_FLUSHED;
349 lp_setup_reset( setup );
350 return FALSE;
351 }
352
353
354 void
355 lp_setup_flush( struct lp_setup_context *setup,
356 struct pipe_fence_handle **fence,
357 const char *reason)
358 {
359 set_scene_state( setup, SETUP_FLUSHED, reason );
360
361 if (fence) {
362 lp_fence_reference((struct lp_fence **)fence, setup->last_fence);
363 }
364 }
365
366
367 void
368 lp_setup_bind_framebuffer( struct lp_setup_context *setup,
369 const struct pipe_framebuffer_state *fb )
370 {
371 LP_DBG(DEBUG_SETUP, "%s\n", __FUNCTION__);
372
373 /* Flush any old scene.
374 */
375 set_scene_state( setup, SETUP_FLUSHED, __FUNCTION__ );
376
377 /*
378 * Ensure the old scene is not reused.
379 */
380 assert(!setup->scene);
381
382 /* Set new state. This will be picked up later when we next need a
383 * scene.
384 */
385 util_copy_framebuffer_state(&setup->fb, fb);
386 setup->framebuffer.x0 = 0;
387 setup->framebuffer.y0 = 0;
388 setup->framebuffer.x1 = fb->width-1;
389 setup->framebuffer.y1 = fb->height-1;
390 setup->dirty |= LP_SETUP_NEW_SCISSOR;
391 }
392
393
394 /*
395 * Try to clear one color buffer of the attached fb, either by binning a clear
396 * command or queuing up the clear for later (when binning is started).
397 */
398 static boolean
399 lp_setup_try_clear_color_buffer(struct lp_setup_context *setup,
400 const union pipe_color_union *color,
401 unsigned cbuf)
402 {
403 union lp_rast_cmd_arg clearrb_arg;
404 union util_color uc;
405 enum pipe_format format = setup->fb.cbufs[cbuf]->format;
406
407 LP_DBG(DEBUG_SETUP, "%s state %d\n", __FUNCTION__, setup->state);
408
409 if (util_format_is_pure_integer(format)) {
410 /*
411 * We expect int/uint clear values here, though some APIs
412 * might disagree (but in any case util_pack_color()
413 * couldn't handle it)...
414 */
415 if (util_format_is_pure_sint(format)) {
416 util_format_write_4i(format, color->i, 0, &uc, 0, 0, 0, 1, 1);
417 }
418 else {
419 assert(util_format_is_pure_uint(format));
420 util_format_write_4ui(format, color->ui, 0, &uc, 0, 0, 0, 1, 1);
421 }
422 }
423 else {
424 util_pack_color(color->f, format, &uc);
425 }
426
427 if (setup->state == SETUP_ACTIVE) {
428 struct lp_scene *scene = setup->scene;
429
430 /* Add the clear to existing scene. In the unusual case where
431 * both color and depth-stencil are being cleared when there's
432 * already been some rendering, we could discard the currently
433 * binned scene and start again, but I don't see that as being
434 * a common usage.
435 */
436 struct lp_rast_clear_rb *cc_scene =
437 (struct lp_rast_clear_rb *)
438 lp_scene_alloc_aligned(scene, sizeof(struct lp_rast_clear_rb), 8);
439
440 if (!cc_scene) {
441 return FALSE;
442 }
443
444 cc_scene->cbuf = cbuf;
445 cc_scene->color_val = uc;
446 clearrb_arg.clear_rb = cc_scene;
447
448 if (!lp_scene_bin_everywhere(scene,
449 LP_RAST_OP_CLEAR_COLOR,
450 clearrb_arg))
451 return FALSE;
452 }
453 else {
454 /* Put ourselves into the 'pre-clear' state, specifically to try
455 * and accumulate multiple clears to color and depth_stencil
456 * buffers which the app or state-tracker might issue
457 * separately.
458 */
459 set_scene_state( setup, SETUP_CLEARED, __FUNCTION__ );
460
461 assert(PIPE_CLEAR_COLOR0 == (1 << 2));
462 setup->clear.flags |= 1 << (cbuf + 2);
463 setup->clear.color_val[cbuf] = uc;
464 }
465
466 return TRUE;
467 }
468
469 static boolean
470 lp_setup_try_clear_zs(struct lp_setup_context *setup,
471 double depth,
472 unsigned stencil,
473 unsigned flags)
474 {
475 uint64_t zsmask = 0;
476 uint64_t zsvalue = 0;
477 uint32_t zmask32;
478 uint8_t smask8;
479
480 LP_DBG(DEBUG_SETUP, "%s state %d\n", __FUNCTION__, setup->state);
481
482 zmask32 = (flags & PIPE_CLEAR_DEPTH) ? ~0 : 0;
483 smask8 = (flags & PIPE_CLEAR_STENCIL) ? ~0 : 0;
484
485 zsvalue = util_pack64_z_stencil(setup->fb.zsbuf->format,
486 depth,
487 stencil);
488
489 zsmask = util_pack64_mask_z_stencil(setup->fb.zsbuf->format,
490 zmask32,
491 smask8);
492
493 zsvalue &= zsmask;
494
495 if (setup->state == SETUP_ACTIVE) {
496 struct lp_scene *scene = setup->scene;
497
498 /* Add the clear to existing scene. In the unusual case where
499 * both color and depth-stencil are being cleared when there's
500 * already been some rendering, we could discard the currently
501 * binned scene and start again, but I don't see that as being
502 * a common usage.
503 */
504 if (!lp_scene_bin_everywhere(scene,
505 LP_RAST_OP_CLEAR_ZSTENCIL,
506 lp_rast_arg_clearzs(zsvalue, zsmask)))
507 return FALSE;
508 }
509 else {
510 /* Put ourselves into the 'pre-clear' state, specifically to try
511 * and accumulate multiple clears to color and depth_stencil
512 * buffers which the app or state-tracker might issue
513 * separately.
514 */
515 set_scene_state( setup, SETUP_CLEARED, __FUNCTION__ );
516
517 setup->clear.flags |= flags;
518
519 setup->clear.zsmask |= zsmask;
520 setup->clear.zsvalue =
521 (setup->clear.zsvalue & ~zsmask) | (zsvalue & zsmask);
522 }
523
524 return TRUE;
525 }
526
527 void
528 lp_setup_clear( struct lp_setup_context *setup,
529 const union pipe_color_union *color,
530 double depth,
531 unsigned stencil,
532 unsigned flags )
533 {
534 unsigned i;
535
536 /*
537 * Note any of these (max 9) clears could fail (but at most there should
538 * be just one failure!). This avoids doing the previous succeeded
539 * clears again (we still clear tiles twice if a clear command succeeded
540 * partially for one buffer).
541 */
542 if (flags & PIPE_CLEAR_DEPTHSTENCIL) {
543 unsigned flagszs = flags & PIPE_CLEAR_DEPTHSTENCIL;
544 if (!lp_setup_try_clear_zs(setup, depth, stencil, flagszs)) {
545 lp_setup_flush(setup, NULL, __FUNCTION__);
546
547 if (!lp_setup_try_clear_zs(setup, depth, stencil, flagszs))
548 assert(0);
549 }
550 }
551
552 if (flags & PIPE_CLEAR_COLOR) {
553 assert(PIPE_CLEAR_COLOR0 == (1 << 2));
554 for (i = 0; i < setup->fb.nr_cbufs; i++) {
555 if ((flags & (1 << (2 + i))) && setup->fb.cbufs[i]) {
556 if (!lp_setup_try_clear_color_buffer(setup, color, i)) {
557 lp_setup_flush(setup, NULL, __FUNCTION__);
558
559 if (!lp_setup_try_clear_color_buffer(setup, color, i))
560 assert(0);
561 }
562 }
563 }
564 }
565 }
566
567
568
569 void
570 lp_setup_set_triangle_state( struct lp_setup_context *setup,
571 unsigned cull_mode,
572 boolean ccw_is_frontface,
573 boolean scissor,
574 boolean half_pixel_center,
575 boolean bottom_edge_rule)
576 {
577 LP_DBG(DEBUG_SETUP, "%s\n", __FUNCTION__);
578
579 setup->ccw_is_frontface = ccw_is_frontface;
580 setup->cullmode = cull_mode;
581 setup->triangle = first_triangle;
582 setup->pixel_offset = half_pixel_center ? 0.5f : 0.0f;
583 setup->bottom_edge_rule = bottom_edge_rule;
584
585 if (setup->scissor_test != scissor) {
586 setup->dirty |= LP_SETUP_NEW_SCISSOR;
587 setup->scissor_test = scissor;
588 }
589 }
590
591 void
592 lp_setup_set_line_state( struct lp_setup_context *setup,
593 float line_width)
594 {
595 LP_DBG(DEBUG_SETUP, "%s\n", __FUNCTION__);
596
597 setup->line_width = line_width;
598 }
599
600 void
601 lp_setup_set_point_state( struct lp_setup_context *setup,
602 float point_size,
603 boolean point_size_per_vertex,
604 uint sprite_coord_enable,
605 uint sprite_coord_origin)
606 {
607 LP_DBG(DEBUG_SETUP, "%s\n", __FUNCTION__);
608
609 setup->point_size = point_size;
610 setup->sprite_coord_enable = sprite_coord_enable;
611 setup->sprite_coord_origin = sprite_coord_origin;
612 setup->point_size_per_vertex = point_size_per_vertex;
613 }
614
615 void
616 lp_setup_set_setup_variant( struct lp_setup_context *setup,
617 const struct lp_setup_variant *variant)
618 {
619 LP_DBG(DEBUG_SETUP, "%s\n", __FUNCTION__);
620
621 setup->setup.variant = variant;
622 }
623
624 void
625 lp_setup_set_fs_variant( struct lp_setup_context *setup,
626 struct lp_fragment_shader_variant *variant)
627 {
628 LP_DBG(DEBUG_SETUP, "%s %p\n", __FUNCTION__,
629 variant);
630 /* FIXME: reference count */
631
632 setup->fs.current.variant = variant;
633 setup->dirty |= LP_SETUP_NEW_FS;
634 }
635
636 void
637 lp_setup_set_fs_constants(struct lp_setup_context *setup,
638 unsigned num,
639 struct pipe_constant_buffer *buffers)
640 {
641 unsigned i;
642
643 LP_DBG(DEBUG_SETUP, "%s %p\n", __FUNCTION__, (void *) buffers);
644
645 assert(num <= Elements(setup->constants));
646
647 for (i = 0; i < num; ++i) {
648 util_copy_constant_buffer(&setup->constants[i].current, &buffers[i]);
649 }
650 for (; i < Elements(setup->constants); i++) {
651 util_copy_constant_buffer(&setup->constants[i].current, NULL);
652 }
653 setup->dirty |= LP_SETUP_NEW_CONSTANTS;
654 }
655
656
657 void
658 lp_setup_set_alpha_ref_value( struct lp_setup_context *setup,
659 float alpha_ref_value )
660 {
661 LP_DBG(DEBUG_SETUP, "%s %f\n", __FUNCTION__, alpha_ref_value);
662
663 if(setup->fs.current.jit_context.alpha_ref_value != alpha_ref_value) {
664 setup->fs.current.jit_context.alpha_ref_value = alpha_ref_value;
665 setup->dirty |= LP_SETUP_NEW_FS;
666 }
667 }
668
669 void
670 lp_setup_set_stencil_ref_values( struct lp_setup_context *setup,
671 const ubyte refs[2] )
672 {
673 LP_DBG(DEBUG_SETUP, "%s %d %d\n", __FUNCTION__, refs[0], refs[1]);
674
675 if (setup->fs.current.jit_context.stencil_ref_front != refs[0] ||
676 setup->fs.current.jit_context.stencil_ref_back != refs[1]) {
677 setup->fs.current.jit_context.stencil_ref_front = refs[0];
678 setup->fs.current.jit_context.stencil_ref_back = refs[1];
679 setup->dirty |= LP_SETUP_NEW_FS;
680 }
681 }
682
683 void
684 lp_setup_set_blend_color( struct lp_setup_context *setup,
685 const struct pipe_blend_color *blend_color )
686 {
687 LP_DBG(DEBUG_SETUP, "%s\n", __FUNCTION__);
688
689 assert(blend_color);
690
691 if(memcmp(&setup->blend_color.current, blend_color, sizeof *blend_color) != 0) {
692 memcpy(&setup->blend_color.current, blend_color, sizeof *blend_color);
693 setup->dirty |= LP_SETUP_NEW_BLEND_COLOR;
694 }
695 }
696
697
698 void
699 lp_setup_set_scissors( struct lp_setup_context *setup,
700 const struct pipe_scissor_state *scissors )
701 {
702 unsigned i;
703 LP_DBG(DEBUG_SETUP, "%s\n", __FUNCTION__);
704
705 assert(scissors);
706
707 for (i = 0; i < PIPE_MAX_VIEWPORTS; ++i) {
708 setup->scissors[i].x0 = scissors[i].minx;
709 setup->scissors[i].x1 = scissors[i].maxx-1;
710 setup->scissors[i].y0 = scissors[i].miny;
711 setup->scissors[i].y1 = scissors[i].maxy-1;
712 }
713 setup->dirty |= LP_SETUP_NEW_SCISSOR;
714 }
715
716
717 void
718 lp_setup_set_flatshade_first( struct lp_setup_context *setup,
719 boolean flatshade_first )
720 {
721 setup->flatshade_first = flatshade_first;
722 }
723
724 void
725 lp_setup_set_rasterizer_discard( struct lp_setup_context *setup,
726 boolean rasterizer_discard )
727 {
728 if (setup->rasterizer_discard != rasterizer_discard) {
729 setup->rasterizer_discard = rasterizer_discard;
730 set_scene_state( setup, SETUP_FLUSHED, __FUNCTION__ );
731 }
732 }
733
734 void
735 lp_setup_set_vertex_info( struct lp_setup_context *setup,
736 struct vertex_info *vertex_info )
737 {
738 /* XXX: just silently holding onto the pointer:
739 */
740 setup->vertex_info = vertex_info;
741 }
742
743
744 /**
745 * Called during state validation when LP_NEW_VIEWPORT is set.
746 */
747 void
748 lp_setup_set_viewports(struct lp_setup_context *setup,
749 unsigned num_viewports,
750 const struct pipe_viewport_state *viewports)
751 {
752 struct llvmpipe_context *lp = llvmpipe_context(setup->pipe);
753 unsigned i;
754
755 LP_DBG(DEBUG_SETUP, "%s\n", __FUNCTION__);
756
757 assert(num_viewports <= PIPE_MAX_VIEWPORTS);
758 assert(viewports);
759
760 /*
761 * For use in lp_state_fs.c, propagate the viewport values for all viewports.
762 */
763 for (i = 0; i < num_viewports; i++) {
764 float min_depth;
765 float max_depth;
766
767 if (lp->rasterizer->clip_halfz == 0) {
768 float half_depth = viewports[i].scale[2];
769 min_depth = viewports[i].translate[2] - half_depth;
770 max_depth = min_depth + half_depth * 2.0f;
771 } else {
772 min_depth = viewports[i].translate[2];
773 max_depth = min_depth + viewports[i].scale[2];
774 }
775
776 if (setup->viewports[i].min_depth != min_depth ||
777 setup->viewports[i].max_depth != max_depth) {
778 setup->viewports[i].min_depth = min_depth;
779 setup->viewports[i].max_depth = max_depth;
780 setup->dirty |= LP_SETUP_NEW_VIEWPORTS;
781 }
782 }
783 }
784
785
786 /**
787 * Called during state validation when LP_NEW_SAMPLER_VIEW is set.
788 */
789 void
790 lp_setup_set_fragment_sampler_views(struct lp_setup_context *setup,
791 unsigned num,
792 struct pipe_sampler_view **views)
793 {
794 unsigned i;
795
796 LP_DBG(DEBUG_SETUP, "%s\n", __FUNCTION__);
797
798 assert(num <= PIPE_MAX_SHADER_SAMPLER_VIEWS);
799
800 for (i = 0; i < PIPE_MAX_SHADER_SAMPLER_VIEWS; i++) {
801 struct pipe_sampler_view *view = i < num ? views[i] : NULL;
802
803 if (view) {
804 struct pipe_resource *res = view->texture;
805 struct llvmpipe_resource *lp_tex = llvmpipe_resource(res);
806 struct lp_jit_texture *jit_tex;
807 jit_tex = &setup->fs.current.jit_context.textures[i];
808
809 /* We're referencing the texture's internal data, so save a
810 * reference to it.
811 */
812 pipe_resource_reference(&setup->fs.current_tex[i], res);
813
814 if (!lp_tex->dt) {
815 /* regular texture - setup array of mipmap level offsets */
816 int j;
817 unsigned first_level = 0;
818 unsigned last_level = 0;
819
820 if (llvmpipe_resource_is_texture(res)) {
821 first_level = view->u.tex.first_level;
822 last_level = view->u.tex.last_level;
823 assert(first_level <= last_level);
824 assert(last_level <= res->last_level);
825 jit_tex->base = lp_tex->tex_data;
826 }
827 else {
828 jit_tex->base = lp_tex->data;
829 }
830
831 if (LP_PERF & PERF_TEX_MEM) {
832 /* use dummy tile memory */
833 jit_tex->base = lp_dummy_tile;
834 jit_tex->width = TILE_SIZE/8;
835 jit_tex->height = TILE_SIZE/8;
836 jit_tex->depth = 1;
837 jit_tex->first_level = 0;
838 jit_tex->last_level = 0;
839 jit_tex->mip_offsets[0] = 0;
840 jit_tex->row_stride[0] = 0;
841 jit_tex->img_stride[0] = 0;
842 }
843 else {
844 jit_tex->width = res->width0;
845 jit_tex->height = res->height0;
846 jit_tex->depth = res->depth0;
847 jit_tex->first_level = first_level;
848 jit_tex->last_level = last_level;
849
850 if (llvmpipe_resource_is_texture(res)) {
851 for (j = first_level; j <= last_level; j++) {
852 jit_tex->mip_offsets[j] = lp_tex->mip_offsets[j];
853 jit_tex->row_stride[j] = lp_tex->row_stride[j];
854 jit_tex->img_stride[j] = lp_tex->img_stride[j];
855 }
856
857 if (view->target == PIPE_TEXTURE_1D_ARRAY ||
858 view->target == PIPE_TEXTURE_2D_ARRAY ||
859 view->target == PIPE_TEXTURE_CUBE ||
860 view->target == PIPE_TEXTURE_CUBE_ARRAY) {
861 /*
862 * For array textures, we don't have first_layer, instead
863 * adjust last_layer (stored as depth) plus the mip level offsets
864 * (as we have mip-first layout can't just adjust base ptr).
865 * XXX For mip levels, could do something similar.
866 */
867 jit_tex->depth = view->u.tex.last_layer - view->u.tex.first_layer + 1;
868 for (j = first_level; j <= last_level; j++) {
869 jit_tex->mip_offsets[j] += view->u.tex.first_layer *
870 lp_tex->img_stride[j];
871 }
872 if (view->target == PIPE_TEXTURE_CUBE ||
873 view->target == PIPE_TEXTURE_CUBE_ARRAY) {
874 assert(jit_tex->depth % 6 == 0);
875 }
876 assert(view->u.tex.first_layer <= view->u.tex.last_layer);
877 assert(view->u.tex.last_layer < res->array_size);
878 }
879 }
880 else {
881 /*
882 * For buffers, we don't have first_element, instead adjust
883 * last_element (stored as width) plus the base pointer.
884 */
885 unsigned view_blocksize = util_format_get_blocksize(view->format);
886 /* probably don't really need to fill that out */
887 jit_tex->mip_offsets[0] = 0;
888 jit_tex->row_stride[0] = 0;
889 jit_tex->img_stride[0] = 0;
890
891 /* everything specified in number of elements here. */
892 jit_tex->width = view->u.buf.last_element - view->u.buf.first_element + 1;
893 jit_tex->base = (uint8_t *)jit_tex->base + view->u.buf.first_element *
894 view_blocksize;
895 /* XXX Unsure if we need to sanitize parameters? */
896 assert(view->u.buf.first_element <= view->u.buf.last_element);
897 assert(view->u.buf.last_element * view_blocksize < res->width0);
898 }
899 }
900 }
901 else {
902 /* display target texture/surface */
903 /*
904 * XXX: Where should this be unmapped?
905 */
906 struct llvmpipe_screen *screen = llvmpipe_screen(res->screen);
907 struct sw_winsys *winsys = screen->winsys;
908 jit_tex->base = winsys->displaytarget_map(winsys, lp_tex->dt,
909 PIPE_TRANSFER_READ);
910 jit_tex->row_stride[0] = lp_tex->row_stride[0];
911 jit_tex->img_stride[0] = lp_tex->img_stride[0];
912 jit_tex->mip_offsets[0] = 0;
913 jit_tex->width = res->width0;
914 jit_tex->height = res->height0;
915 jit_tex->depth = res->depth0;
916 jit_tex->first_level = jit_tex->last_level = 0;
917 assert(jit_tex->base);
918 }
919 }
920 }
921
922 setup->dirty |= LP_SETUP_NEW_FS;
923 }
924
925
926 /**
927 * Called during state validation when LP_NEW_SAMPLER is set.
928 */
929 void
930 lp_setup_set_fragment_sampler_state(struct lp_setup_context *setup,
931 unsigned num,
932 struct pipe_sampler_state **samplers)
933 {
934 unsigned i;
935
936 LP_DBG(DEBUG_SETUP, "%s\n", __FUNCTION__);
937
938 assert(num <= PIPE_MAX_SAMPLERS);
939
940 for (i = 0; i < PIPE_MAX_SAMPLERS; i++) {
941 const struct pipe_sampler_state *sampler = i < num ? samplers[i] : NULL;
942
943 if (sampler) {
944 struct lp_jit_sampler *jit_sam;
945 jit_sam = &setup->fs.current.jit_context.samplers[i];
946
947 jit_sam->min_lod = sampler->min_lod;
948 jit_sam->max_lod = sampler->max_lod;
949 jit_sam->lod_bias = sampler->lod_bias;
950 COPY_4V(jit_sam->border_color, sampler->border_color.f);
951 }
952 }
953
954 setup->dirty |= LP_SETUP_NEW_FS;
955 }
956
957
958 /**
959 * Is the given texture referenced by any scene?
960 * Note: we have to check all scenes including any scenes currently
961 * being rendered and the current scene being built.
962 */
963 unsigned
964 lp_setup_is_resource_referenced( const struct lp_setup_context *setup,
965 const struct pipe_resource *texture )
966 {
967 unsigned i;
968
969 /* check the render targets */
970 for (i = 0; i < setup->fb.nr_cbufs; i++) {
971 if (setup->fb.cbufs[i] && setup->fb.cbufs[i]->texture == texture)
972 return LP_REFERENCED_FOR_READ | LP_REFERENCED_FOR_WRITE;
973 }
974 if (setup->fb.zsbuf && setup->fb.zsbuf->texture == texture) {
975 return LP_REFERENCED_FOR_READ | LP_REFERENCED_FOR_WRITE;
976 }
977
978 /* check textures referenced by the scene */
979 for (i = 0; i < Elements(setup->scenes); i++) {
980 if (lp_scene_is_resource_referenced(setup->scenes[i], texture)) {
981 return LP_REFERENCED_FOR_READ;
982 }
983 }
984
985 return LP_UNREFERENCED;
986 }
987
988
989 /**
990 * Called by vbuf code when we're about to draw something.
991 *
992 * This function stores all dirty state in the current scene's display list
993 * memory, via lp_scene_alloc(). We can not pass pointers of mutable state to
994 * the JIT functions, as the JIT functions will be called later on, most likely
995 * on a different thread.
996 *
997 * When processing dirty state it is imperative that we don't refer to any
998 * pointers previously allocated with lp_scene_alloc() in this function (or any
999 * function) as they may belong to a scene freed since then.
1000 */
1001 static boolean
1002 try_update_scene_state( struct lp_setup_context *setup )
1003 {
1004 static const float fake_const_buf[4];
1005 boolean new_scene = (setup->fs.stored == NULL);
1006 struct lp_scene *scene = setup->scene;
1007 unsigned i;
1008
1009 assert(scene);
1010
1011 if (setup->dirty & LP_SETUP_NEW_VIEWPORTS) {
1012 /*
1013 * Record new depth range state for changes due to viewport updates.
1014 *
1015 * TODO: Collapse the existing viewport and depth range information
1016 * into one structure, for access by JIT.
1017 */
1018 struct lp_jit_viewport *stored;
1019
1020 stored = (struct lp_jit_viewport *)
1021 lp_scene_alloc(scene, sizeof setup->viewports);
1022
1023 if (!stored) {
1024 assert(!new_scene);
1025 return FALSE;
1026 }
1027
1028 memcpy(stored, setup->viewports, sizeof setup->viewports);
1029
1030 setup->fs.current.jit_context.viewports = stored;
1031 setup->dirty |= LP_SETUP_NEW_FS;
1032 }
1033
1034 if(setup->dirty & LP_SETUP_NEW_BLEND_COLOR) {
1035 uint8_t *stored;
1036 float* fstored;
1037 unsigned i, j;
1038 unsigned size;
1039
1040 /* Alloc u8_blend_color (16 x i8) and f_blend_color (4 or 8 x f32) */
1041 size = 4 * 16 * sizeof(uint8_t);
1042 size += (LP_MAX_VECTOR_LENGTH / 4) * sizeof(float);
1043 stored = lp_scene_alloc_aligned(scene, size, LP_MIN_VECTOR_ALIGN);
1044
1045 if (!stored) {
1046 assert(!new_scene);
1047 return FALSE;
1048 }
1049
1050 /* Store floating point colour */
1051 fstored = (float*)(stored + 4*16);
1052 for (i = 0; i < (LP_MAX_VECTOR_LENGTH / 4); ++i) {
1053 fstored[i] = setup->blend_color.current.color[i % 4];
1054 }
1055
1056 /* smear each blend color component across 16 ubyte elements */
1057 for (i = 0; i < 4; ++i) {
1058 uint8_t c = float_to_ubyte(setup->blend_color.current.color[i]);
1059 for (j = 0; j < 16; ++j)
1060 stored[i*16 + j] = c;
1061 }
1062
1063 setup->blend_color.stored = stored;
1064 setup->fs.current.jit_context.u8_blend_color = stored;
1065 setup->fs.current.jit_context.f_blend_color = fstored;
1066 setup->dirty |= LP_SETUP_NEW_FS;
1067 }
1068
1069 if (setup->dirty & LP_SETUP_NEW_CONSTANTS) {
1070 for (i = 0; i < Elements(setup->constants); ++i) {
1071 struct pipe_resource *buffer = setup->constants[i].current.buffer;
1072 const unsigned current_size = MIN2(setup->constants[i].current.buffer_size,
1073 LP_MAX_TGSI_CONST_BUFFER_SIZE);
1074 const ubyte *current_data = NULL;
1075 int num_constants;
1076
1077 STATIC_ASSERT(DATA_BLOCK_SIZE >= LP_MAX_TGSI_CONST_BUFFER_SIZE);
1078
1079 if (buffer) {
1080 /* resource buffer */
1081 current_data = (ubyte *) llvmpipe_resource_data(buffer);
1082 }
1083 else if (setup->constants[i].current.user_buffer) {
1084 /* user-space buffer */
1085 current_data = (ubyte *) setup->constants[i].current.user_buffer;
1086 }
1087
1088 if (current_data) {
1089 current_data += setup->constants[i].current.buffer_offset;
1090
1091 /* TODO: copy only the actually used constants? */
1092
1093 if (setup->constants[i].stored_size != current_size ||
1094 !setup->constants[i].stored_data ||
1095 memcmp(setup->constants[i].stored_data,
1096 current_data,
1097 current_size) != 0) {
1098 void *stored;
1099
1100 stored = lp_scene_alloc(scene, current_size);
1101 if (!stored) {
1102 assert(!new_scene);
1103 return FALSE;
1104 }
1105
1106 memcpy(stored,
1107 current_data,
1108 current_size);
1109 setup->constants[i].stored_size = current_size;
1110 setup->constants[i].stored_data = stored;
1111 }
1112 setup->fs.current.jit_context.constants[i] =
1113 setup->constants[i].stored_data;
1114 }
1115 else {
1116 setup->constants[i].stored_size = 0;
1117 setup->constants[i].stored_data = NULL;
1118 setup->fs.current.jit_context.constants[i] = fake_const_buf;
1119 }
1120
1121 num_constants =
1122 setup->constants[i].stored_size / (sizeof(float) * 4);
1123 setup->fs.current.jit_context.num_constants[i] = num_constants;
1124 setup->dirty |= LP_SETUP_NEW_FS;
1125 }
1126 }
1127
1128
1129 if (setup->dirty & LP_SETUP_NEW_FS) {
1130 if (!setup->fs.stored ||
1131 memcmp(setup->fs.stored,
1132 &setup->fs.current,
1133 sizeof setup->fs.current) != 0)
1134 {
1135 struct lp_rast_state *stored;
1136
1137 /* The fs state that's been stored in the scene is different from
1138 * the new, current state. So allocate a new lp_rast_state object
1139 * and append it to the bin's setup data buffer.
1140 */
1141 stored = (struct lp_rast_state *) lp_scene_alloc(scene, sizeof *stored);
1142 if (!stored) {
1143 assert(!new_scene);
1144 return FALSE;
1145 }
1146
1147 memcpy(stored,
1148 &setup->fs.current,
1149 sizeof setup->fs.current);
1150 setup->fs.stored = stored;
1151
1152 /* The scene now references the textures in the rasterization
1153 * state record. Note that now.
1154 */
1155 for (i = 0; i < Elements(setup->fs.current_tex); i++) {
1156 if (setup->fs.current_tex[i]) {
1157 if (!lp_scene_add_resource_reference(scene,
1158 setup->fs.current_tex[i],
1159 new_scene)) {
1160 assert(!new_scene);
1161 return FALSE;
1162 }
1163 }
1164 }
1165 }
1166 }
1167
1168 if (setup->dirty & LP_SETUP_NEW_SCISSOR) {
1169 unsigned i;
1170 for (i = 0; i < PIPE_MAX_VIEWPORTS; ++i) {
1171 setup->draw_regions[i] = setup->framebuffer;
1172 if (setup->scissor_test) {
1173 u_rect_possible_intersection(&setup->scissors[i],
1174 &setup->draw_regions[i]);
1175 }
1176 }
1177 }
1178
1179 setup->dirty = 0;
1180
1181 assert(setup->fs.stored);
1182 return TRUE;
1183 }
1184
1185 boolean
1186 lp_setup_update_state( struct lp_setup_context *setup,
1187 boolean update_scene )
1188 {
1189 /* Some of the 'draw' pipeline stages may have changed some driver state.
1190 * Make sure we've processed those state changes before anything else.
1191 *
1192 * XXX this is the only place where llvmpipe_context is used in the
1193 * setup code. This may get refactored/changed...
1194 */
1195 {
1196 struct llvmpipe_context *lp = llvmpipe_context(setup->pipe);
1197 if (lp->dirty) {
1198 llvmpipe_update_derived(lp);
1199 }
1200
1201 if (lp->setup->dirty) {
1202 llvmpipe_update_setup(lp);
1203 }
1204
1205 assert(setup->setup.variant);
1206
1207 /* Will probably need to move this somewhere else, just need
1208 * to know about vertex shader point size attribute.
1209 */
1210 setup->psize = lp->psize_slot;
1211 setup->viewport_index_slot = lp->viewport_index_slot;
1212 setup->layer_slot = lp->layer_slot;
1213 setup->face_slot = lp->face_slot;
1214
1215 assert(lp->dirty == 0);
1216
1217 assert(lp->setup_variant.key.size ==
1218 setup->setup.variant->key.size);
1219
1220 assert(memcmp(&lp->setup_variant.key,
1221 &setup->setup.variant->key,
1222 setup->setup.variant->key.size) == 0);
1223 }
1224
1225 if (update_scene && setup->state != SETUP_ACTIVE) {
1226 if (!set_scene_state( setup, SETUP_ACTIVE, __FUNCTION__ ))
1227 return FALSE;
1228 }
1229
1230 /* Only call into update_scene_state() if we already have a
1231 * scene:
1232 */
1233 if (update_scene && setup->scene) {
1234 assert(setup->state == SETUP_ACTIVE);
1235
1236 if (try_update_scene_state(setup))
1237 return TRUE;
1238
1239 /* Update failed, try to restart the scene.
1240 *
1241 * Cannot call lp_setup_flush_and_restart() directly here
1242 * because of potential recursion.
1243 */
1244 if (!set_scene_state(setup, SETUP_FLUSHED, __FUNCTION__))
1245 return FALSE;
1246
1247 if (!set_scene_state(setup, SETUP_ACTIVE, __FUNCTION__))
1248 return FALSE;
1249
1250 if (!setup->scene)
1251 return FALSE;
1252
1253 return try_update_scene_state(setup);
1254 }
1255
1256 return TRUE;
1257 }
1258
1259
1260
1261 /* Only caller is lp_setup_vbuf_destroy()
1262 */
1263 void
1264 lp_setup_destroy( struct lp_setup_context *setup )
1265 {
1266 uint i;
1267
1268 lp_setup_reset( setup );
1269
1270 util_unreference_framebuffer_state(&setup->fb);
1271
1272 for (i = 0; i < Elements(setup->fs.current_tex); i++) {
1273 pipe_resource_reference(&setup->fs.current_tex[i], NULL);
1274 }
1275
1276 for (i = 0; i < Elements(setup->constants); i++) {
1277 pipe_resource_reference(&setup->constants[i].current.buffer, NULL);
1278 }
1279
1280 /* free the scenes in the 'empty' queue */
1281 for (i = 0; i < Elements(setup->scenes); i++) {
1282 struct lp_scene *scene = setup->scenes[i];
1283
1284 if (scene->fence)
1285 lp_fence_wait(scene->fence);
1286
1287 lp_scene_destroy(scene);
1288 }
1289
1290 lp_fence_reference(&setup->last_fence, NULL);
1291
1292 FREE( setup );
1293 }
1294
1295
1296 /**
1297 * Create a new primitive tiling engine. Plug it into the backend of
1298 * the draw module. Currently also creates a rasterizer to use with
1299 * it.
1300 */
1301 struct lp_setup_context *
1302 lp_setup_create( struct pipe_context *pipe,
1303 struct draw_context *draw )
1304 {
1305 struct llvmpipe_screen *screen = llvmpipe_screen(pipe->screen);
1306 struct lp_setup_context *setup;
1307 unsigned i;
1308
1309 setup = CALLOC_STRUCT(lp_setup_context);
1310 if (!setup) {
1311 goto no_setup;
1312 }
1313
1314 lp_setup_init_vbuf(setup);
1315
1316 /* Used only in update_state():
1317 */
1318 setup->pipe = pipe;
1319
1320
1321 setup->num_threads = screen->num_threads;
1322 setup->vbuf = draw_vbuf_stage(draw, &setup->base);
1323 if (!setup->vbuf) {
1324 goto no_vbuf;
1325 }
1326
1327 draw_set_rasterize_stage(draw, setup->vbuf);
1328 draw_set_render(draw, &setup->base);
1329
1330 /* create some empty scenes */
1331 for (i = 0; i < MAX_SCENES; i++) {
1332 setup->scenes[i] = lp_scene_create( pipe );
1333 if (!setup->scenes[i]) {
1334 goto no_scenes;
1335 }
1336 }
1337
1338 setup->triangle = first_triangle;
1339 setup->line = first_line;
1340 setup->point = first_point;
1341
1342 setup->dirty = ~0;
1343
1344 return setup;
1345
1346 no_scenes:
1347 for (i = 0; i < MAX_SCENES; i++) {
1348 if (setup->scenes[i]) {
1349 lp_scene_destroy(setup->scenes[i]);
1350 }
1351 }
1352
1353 setup->vbuf->destroy(setup->vbuf);
1354 no_vbuf:
1355 FREE(setup);
1356 no_setup:
1357 return NULL;
1358 }
1359
1360
1361 /**
1362 * Put a BeginQuery command into all bins.
1363 */
1364 void
1365 lp_setup_begin_query(struct lp_setup_context *setup,
1366 struct llvmpipe_query *pq)
1367 {
1368
1369 set_scene_state(setup, SETUP_ACTIVE, "begin_query");
1370
1371 if (!(pq->type == PIPE_QUERY_OCCLUSION_COUNTER ||
1372 pq->type == PIPE_QUERY_OCCLUSION_PREDICATE ||
1373 pq->type == PIPE_QUERY_PIPELINE_STATISTICS))
1374 return;
1375
1376 /* init the query to its beginning state */
1377 assert(setup->active_binned_queries < LP_MAX_ACTIVE_BINNED_QUERIES);
1378 /* exceeding list size so just ignore the query */
1379 if (setup->active_binned_queries >= LP_MAX_ACTIVE_BINNED_QUERIES) {
1380 return;
1381 }
1382 assert(setup->active_queries[setup->active_binned_queries] == NULL);
1383 setup->active_queries[setup->active_binned_queries] = pq;
1384 setup->active_binned_queries++;
1385
1386 assert(setup->scene);
1387 if (setup->scene) {
1388 if (!lp_scene_bin_everywhere(setup->scene,
1389 LP_RAST_OP_BEGIN_QUERY,
1390 lp_rast_arg_query(pq))) {
1391
1392 if (!lp_setup_flush_and_restart(setup))
1393 return;
1394
1395 if (!lp_scene_bin_everywhere(setup->scene,
1396 LP_RAST_OP_BEGIN_QUERY,
1397 lp_rast_arg_query(pq))) {
1398 return;
1399 }
1400 }
1401 setup->scene->had_queries |= TRUE;
1402 }
1403 }
1404
1405
1406 /**
1407 * Put an EndQuery command into all bins.
1408 */
1409 void
1410 lp_setup_end_query(struct lp_setup_context *setup, struct llvmpipe_query *pq)
1411 {
1412 set_scene_state(setup, SETUP_ACTIVE, "end_query");
1413
1414 assert(setup->scene);
1415 if (setup->scene) {
1416 /* pq->fence should be the fence of the *last* scene which
1417 * contributed to the query result.
1418 */
1419 lp_fence_reference(&pq->fence, setup->scene->fence);
1420
1421 if (pq->type == PIPE_QUERY_OCCLUSION_COUNTER ||
1422 pq->type == PIPE_QUERY_OCCLUSION_PREDICATE ||
1423 pq->type == PIPE_QUERY_PIPELINE_STATISTICS ||
1424 pq->type == PIPE_QUERY_TIMESTAMP) {
1425 if (pq->type == PIPE_QUERY_TIMESTAMP &&
1426 !(setup->scene->tiles_x | setup->scene->tiles_y)) {
1427 /*
1428 * If there's a zero width/height framebuffer, there's no bins and
1429 * hence no rast task is ever run. So fill in something here instead.
1430 */
1431 pq->end[0] = os_time_get_nano();
1432 }
1433
1434 if (!lp_scene_bin_everywhere(setup->scene,
1435 LP_RAST_OP_END_QUERY,
1436 lp_rast_arg_query(pq))) {
1437 if (!lp_setup_flush_and_restart(setup))
1438 goto fail;
1439
1440 if (!lp_scene_bin_everywhere(setup->scene,
1441 LP_RAST_OP_END_QUERY,
1442 lp_rast_arg_query(pq))) {
1443 goto fail;
1444 }
1445 }
1446 setup->scene->had_queries |= TRUE;
1447 }
1448 }
1449 else {
1450 lp_fence_reference(&pq->fence, setup->last_fence);
1451 }
1452
1453 fail:
1454 /* Need to do this now not earlier since it still needs to be marked as
1455 * active when binning it would cause a flush.
1456 */
1457 if (pq->type == PIPE_QUERY_OCCLUSION_COUNTER ||
1458 pq->type == PIPE_QUERY_OCCLUSION_PREDICATE ||
1459 pq->type == PIPE_QUERY_PIPELINE_STATISTICS) {
1460 unsigned i;
1461
1462 /* remove from active binned query list */
1463 for (i = 0; i < setup->active_binned_queries; i++) {
1464 if (setup->active_queries[i] == pq)
1465 break;
1466 }
1467 assert(i < setup->active_binned_queries);
1468 if (i == setup->active_binned_queries)
1469 return;
1470 setup->active_binned_queries--;
1471 setup->active_queries[i] = setup->active_queries[setup->active_binned_queries];
1472 setup->active_queries[setup->active_binned_queries] = NULL;
1473 }
1474 }
1475
1476
1477 boolean
1478 lp_setup_flush_and_restart(struct lp_setup_context *setup)
1479 {
1480 if (0) debug_printf("%s\n", __FUNCTION__);
1481
1482 assert(setup->state == SETUP_ACTIVE);
1483
1484 if (!set_scene_state(setup, SETUP_FLUSHED, __FUNCTION__))
1485 return FALSE;
1486
1487 if (!lp_setup_update_state(setup, TRUE))
1488 return FALSE;
1489
1490 return TRUE;
1491 }
1492
1493