llvmpipe: don't store eo as 64bit int
[mesa.git] / src / gallium / drivers / llvmpipe / lp_setup.c
1 /**************************************************************************
2 *
3 * Copyright 2007 VMware, Inc.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28 /**
29 * Tiling engine.
30 *
31 * Builds per-tile display lists and executes them on calls to
32 * lp_setup_flush().
33 */
34
35 #include <limits.h>
36
37 #include "pipe/p_defines.h"
38 #include "util/u_framebuffer.h"
39 #include "util/u_inlines.h"
40 #include "util/u_memory.h"
41 #include "util/u_pack_color.h"
42 #include "draw/draw_pipe.h"
43 #include "os/os_time.h"
44 #include "lp_context.h"
45 #include "lp_memory.h"
46 #include "lp_scene.h"
47 #include "lp_texture.h"
48 #include "lp_debug.h"
49 #include "lp_fence.h"
50 #include "lp_query.h"
51 #include "lp_rast.h"
52 #include "lp_setup_context.h"
53 #include "lp_screen.h"
54 #include "lp_state.h"
55 #include "state_tracker/sw_winsys.h"
56
57 #include "draw/draw_context.h"
58 #include "draw/draw_vbuf.h"
59
60
61 static boolean set_scene_state( struct lp_setup_context *, enum setup_state,
62 const char *reason);
63 static boolean try_update_scene_state( struct lp_setup_context *setup );
64
65
66 static void
67 lp_setup_get_empty_scene(struct lp_setup_context *setup)
68 {
69 assert(setup->scene == NULL);
70
71 setup->scene_idx++;
72 setup->scene_idx %= Elements(setup->scenes);
73
74 setup->scene = setup->scenes[setup->scene_idx];
75
76 if (setup->scene->fence) {
77 if (LP_DEBUG & DEBUG_SETUP)
78 debug_printf("%s: wait for scene %d\n",
79 __FUNCTION__, setup->scene->fence->id);
80
81 lp_fence_wait(setup->scene->fence);
82 }
83
84 lp_scene_begin_binning(setup->scene, &setup->fb, setup->rasterizer_discard);
85
86 }
87
88
89 static void
90 first_triangle( struct lp_setup_context *setup,
91 const float (*v0)[4],
92 const float (*v1)[4],
93 const float (*v2)[4])
94 {
95 assert(setup->state == SETUP_ACTIVE);
96 lp_setup_choose_triangle( setup );
97 setup->triangle( setup, v0, v1, v2 );
98 }
99
100 static void
101 first_line( struct lp_setup_context *setup,
102 const float (*v0)[4],
103 const float (*v1)[4])
104 {
105 assert(setup->state == SETUP_ACTIVE);
106 lp_setup_choose_line( setup );
107 setup->line( setup, v0, v1 );
108 }
109
110 static void
111 first_point( struct lp_setup_context *setup,
112 const float (*v0)[4])
113 {
114 assert(setup->state == SETUP_ACTIVE);
115 lp_setup_choose_point( setup );
116 setup->point( setup, v0 );
117 }
118
119 void lp_setup_reset( struct lp_setup_context *setup )
120 {
121 unsigned i;
122
123 LP_DBG(DEBUG_SETUP, "%s\n", __FUNCTION__);
124
125 /* Reset derived state */
126 for (i = 0; i < Elements(setup->constants); ++i) {
127 setup->constants[i].stored_size = 0;
128 setup->constants[i].stored_data = NULL;
129 }
130 setup->fs.stored = NULL;
131 setup->dirty = ~0;
132
133 /* no current bin */
134 setup->scene = NULL;
135
136 /* Reset some state:
137 */
138 memset(&setup->clear, 0, sizeof setup->clear);
139
140 /* Have an explicit "start-binning" call and get rid of this
141 * pointer twiddling?
142 */
143 setup->line = first_line;
144 setup->point = first_point;
145 setup->triangle = first_triangle;
146 }
147
148
149 /** Rasterize all scene's bins */
150 static void
151 lp_setup_rasterize_scene( struct lp_setup_context *setup )
152 {
153 struct lp_scene *scene = setup->scene;
154 struct llvmpipe_screen *screen = llvmpipe_screen(scene->pipe->screen);
155
156 scene->num_active_queries = setup->active_binned_queries;
157 memcpy(scene->active_queries, setup->active_queries,
158 scene->num_active_queries * sizeof(scene->active_queries[0]));
159
160 lp_scene_end_binning(scene);
161
162 lp_fence_reference(&setup->last_fence, scene->fence);
163
164 if (setup->last_fence)
165 setup->last_fence->issued = TRUE;
166
167 pipe_mutex_lock(screen->rast_mutex);
168
169 /* FIXME: We enqueue the scene then wait on the rasterizer to finish.
170 * This means we never actually run any vertex stuff in parallel to
171 * rasterization (not in the same context at least) which is what the
172 * multiple scenes per setup is about - when we get a new empty scene
173 * any old one is already empty again because we waited here for
174 * raster tasks to be finished. Ideally, we shouldn't need to wait here
175 * and rely on fences elsewhere when waiting is necessary.
176 * Certainly, lp_scene_end_rasterization() would need to be deferred too
177 * and there's probably other bits why this doesn't actually work.
178 */
179 lp_rast_queue_scene(screen->rast, scene);
180 lp_rast_finish(screen->rast);
181 pipe_mutex_unlock(screen->rast_mutex);
182
183 lp_scene_end_rasterization(setup->scene);
184 lp_setup_reset( setup );
185
186 LP_DBG(DEBUG_SETUP, "%s done \n", __FUNCTION__);
187 }
188
189
190
191 static boolean
192 begin_binning( struct lp_setup_context *setup )
193 {
194 struct lp_scene *scene = setup->scene;
195 boolean need_zsload = FALSE;
196 boolean ok;
197
198 assert(scene);
199 assert(scene->fence == NULL);
200
201 /* Always create a fence:
202 */
203 scene->fence = lp_fence_create(MAX2(1, setup->num_threads));
204 if (!scene->fence)
205 return FALSE;
206
207 ok = try_update_scene_state(setup);
208 if (!ok)
209 return FALSE;
210
211 if (setup->fb.zsbuf &&
212 ((setup->clear.flags & PIPE_CLEAR_DEPTHSTENCIL) != PIPE_CLEAR_DEPTHSTENCIL) &&
213 util_format_is_depth_and_stencil(setup->fb.zsbuf->format))
214 need_zsload = TRUE;
215
216 LP_DBG(DEBUG_SETUP, "%s color clear bufs: %x depth: %s\n", __FUNCTION__,
217 setup->clear.flags >> 2,
218 need_zsload ? "clear": "load");
219
220 if (setup->clear.flags & PIPE_CLEAR_COLOR) {
221 unsigned cbuf;
222 for (cbuf = 0; cbuf < setup->fb.nr_cbufs; cbuf++) {
223 assert(PIPE_CLEAR_COLOR0 == 1 << 2);
224 if (setup->clear.flags & (1 << (2 + cbuf))) {
225 union lp_rast_cmd_arg clearrb_arg;
226 struct lp_rast_clear_rb *cc_scene =
227 (struct lp_rast_clear_rb *)
228 lp_scene_alloc(scene, sizeof(struct lp_rast_clear_rb));
229
230 if (!cc_scene) {
231 return FALSE;
232 }
233
234 cc_scene->cbuf = cbuf;
235 cc_scene->color_val = setup->clear.color_val[cbuf];
236 clearrb_arg.clear_rb = cc_scene;
237
238 if (!lp_scene_bin_everywhere(scene,
239 LP_RAST_OP_CLEAR_COLOR,
240 clearrb_arg))
241 return FALSE;
242 }
243 }
244 }
245
246 if (setup->fb.zsbuf) {
247 if (setup->clear.flags & PIPE_CLEAR_DEPTHSTENCIL) {
248 ok = lp_scene_bin_everywhere( scene,
249 LP_RAST_OP_CLEAR_ZSTENCIL,
250 lp_rast_arg_clearzs(
251 setup->clear.zsvalue,
252 setup->clear.zsmask));
253 if (!ok)
254 return FALSE;
255 }
256 }
257
258 setup->clear.flags = 0;
259 setup->clear.zsmask = 0;
260 setup->clear.zsvalue = 0;
261
262 scene->had_queries = !!setup->active_binned_queries;
263
264 LP_DBG(DEBUG_SETUP, "%s done\n", __FUNCTION__);
265 return TRUE;
266 }
267
268
269 /* This basically bins and then flushes any outstanding full-screen
270 * clears.
271 *
272 * TODO: fast path for fullscreen clears and no triangles.
273 */
274 static boolean
275 execute_clears( struct lp_setup_context *setup )
276 {
277 LP_DBG(DEBUG_SETUP, "%s\n", __FUNCTION__);
278
279 return begin_binning( setup );
280 }
281
282 const char *states[] = {
283 "FLUSHED",
284 "CLEARED",
285 "ACTIVE "
286 };
287
288
289 static boolean
290 set_scene_state( struct lp_setup_context *setup,
291 enum setup_state new_state,
292 const char *reason)
293 {
294 unsigned old_state = setup->state;
295
296 if (old_state == new_state)
297 return TRUE;
298
299 if (LP_DEBUG & DEBUG_SCENE) {
300 debug_printf("%s old %s new %s%s%s\n",
301 __FUNCTION__,
302 states[old_state],
303 states[new_state],
304 (new_state == SETUP_FLUSHED) ? ": " : "",
305 (new_state == SETUP_FLUSHED) ? reason : "");
306
307 if (new_state == SETUP_FLUSHED && setup->scene)
308 lp_debug_draw_bins_by_cmd_length(setup->scene);
309 }
310
311 /* wait for a free/empty scene
312 */
313 if (old_state == SETUP_FLUSHED)
314 lp_setup_get_empty_scene(setup);
315
316 switch (new_state) {
317 case SETUP_CLEARED:
318 break;
319
320 case SETUP_ACTIVE:
321 if (!begin_binning( setup ))
322 goto fail;
323 break;
324
325 case SETUP_FLUSHED:
326 if (old_state == SETUP_CLEARED)
327 if (!execute_clears( setup ))
328 goto fail;
329
330 lp_setup_rasterize_scene( setup );
331 assert(setup->scene == NULL);
332 break;
333
334 default:
335 assert(0 && "invalid setup state mode");
336 goto fail;
337 }
338
339 setup->state = new_state;
340 return TRUE;
341
342 fail:
343 if (setup->scene) {
344 lp_scene_end_rasterization(setup->scene);
345 setup->scene = NULL;
346 }
347
348 setup->state = SETUP_FLUSHED;
349 lp_setup_reset( setup );
350 return FALSE;
351 }
352
353
354 void
355 lp_setup_flush( struct lp_setup_context *setup,
356 struct pipe_fence_handle **fence,
357 const char *reason)
358 {
359 set_scene_state( setup, SETUP_FLUSHED, reason );
360
361 if (fence) {
362 lp_fence_reference((struct lp_fence **)fence, setup->last_fence);
363 }
364 }
365
366
367 void
368 lp_setup_bind_framebuffer( struct lp_setup_context *setup,
369 const struct pipe_framebuffer_state *fb )
370 {
371 LP_DBG(DEBUG_SETUP, "%s\n", __FUNCTION__);
372
373 /* Flush any old scene.
374 */
375 set_scene_state( setup, SETUP_FLUSHED, __FUNCTION__ );
376
377 /*
378 * Ensure the old scene is not reused.
379 */
380 assert(!setup->scene);
381
382 /* Set new state. This will be picked up later when we next need a
383 * scene.
384 */
385 util_copy_framebuffer_state(&setup->fb, fb);
386 setup->framebuffer.x0 = 0;
387 setup->framebuffer.y0 = 0;
388 setup->framebuffer.x1 = fb->width-1;
389 setup->framebuffer.y1 = fb->height-1;
390 setup->dirty |= LP_SETUP_NEW_SCISSOR;
391 }
392
393
394 /*
395 * Try to clear one color buffer of the attached fb, either by binning a clear
396 * command or queuing up the clear for later (when binning is started).
397 */
398 static boolean
399 lp_setup_try_clear_color_buffer(struct lp_setup_context *setup,
400 const union pipe_color_union *color,
401 unsigned cbuf)
402 {
403 union lp_rast_cmd_arg clearrb_arg;
404 union util_color uc;
405 enum pipe_format format = setup->fb.cbufs[cbuf]->format;
406
407 LP_DBG(DEBUG_SETUP, "%s state %d\n", __FUNCTION__, setup->state);
408
409 if (util_format_is_pure_integer(format)) {
410 /*
411 * We expect int/uint clear values here, though some APIs
412 * might disagree (but in any case util_pack_color()
413 * couldn't handle it)...
414 */
415 if (util_format_is_pure_sint(format)) {
416 util_format_write_4i(format, color->i, 0, &uc, 0, 0, 0, 1, 1);
417 }
418 else {
419 assert(util_format_is_pure_uint(format));
420 util_format_write_4ui(format, color->ui, 0, &uc, 0, 0, 0, 1, 1);
421 }
422 }
423 else {
424 util_pack_color(color->f, format, &uc);
425 }
426
427 if (setup->state == SETUP_ACTIVE) {
428 struct lp_scene *scene = setup->scene;
429
430 /* Add the clear to existing scene. In the unusual case where
431 * both color and depth-stencil are being cleared when there's
432 * already been some rendering, we could discard the currently
433 * binned scene and start again, but I don't see that as being
434 * a common usage.
435 */
436 struct lp_rast_clear_rb *cc_scene =
437 (struct lp_rast_clear_rb *)
438 lp_scene_alloc_aligned(scene, sizeof(struct lp_rast_clear_rb), 8);
439
440 if (!cc_scene) {
441 return FALSE;
442 }
443
444 cc_scene->cbuf = cbuf;
445 cc_scene->color_val = uc;
446 clearrb_arg.clear_rb = cc_scene;
447
448 if (!lp_scene_bin_everywhere(scene,
449 LP_RAST_OP_CLEAR_COLOR,
450 clearrb_arg))
451 return FALSE;
452 }
453 else {
454 /* Put ourselves into the 'pre-clear' state, specifically to try
455 * and accumulate multiple clears to color and depth_stencil
456 * buffers which the app or state-tracker might issue
457 * separately.
458 */
459 set_scene_state( setup, SETUP_CLEARED, __FUNCTION__ );
460
461 assert(PIPE_CLEAR_COLOR0 == (1 << 2));
462 setup->clear.flags |= 1 << (cbuf + 2);
463 setup->clear.color_val[cbuf] = uc;
464 }
465
466 return TRUE;
467 }
468
469 static boolean
470 lp_setup_try_clear_zs(struct lp_setup_context *setup,
471 double depth,
472 unsigned stencil,
473 unsigned flags)
474 {
475 uint64_t zsmask = 0;
476 uint64_t zsvalue = 0;
477 uint32_t zmask32;
478 uint8_t smask8;
479
480 LP_DBG(DEBUG_SETUP, "%s state %d\n", __FUNCTION__, setup->state);
481
482 zmask32 = (flags & PIPE_CLEAR_DEPTH) ? ~0 : 0;
483 smask8 = (flags & PIPE_CLEAR_STENCIL) ? ~0 : 0;
484
485 zsvalue = util_pack64_z_stencil(setup->fb.zsbuf->format,
486 depth,
487 stencil);
488
489 /*
490 * XXX: should make a full mask here for things like D24X8,
491 * otherwise we'll do a read-modify-write clear later which
492 * should be unnecessary.
493 */
494 zsmask = util_pack64_mask_z_stencil(setup->fb.zsbuf->format,
495 zmask32,
496 smask8);
497
498 zsvalue &= zsmask;
499
500 if (setup->state == SETUP_ACTIVE) {
501 struct lp_scene *scene = setup->scene;
502
503 /* Add the clear to existing scene. In the unusual case where
504 * both color and depth-stencil are being cleared when there's
505 * already been some rendering, we could discard the currently
506 * binned scene and start again, but I don't see that as being
507 * a common usage.
508 */
509 if (!lp_scene_bin_everywhere(scene,
510 LP_RAST_OP_CLEAR_ZSTENCIL,
511 lp_rast_arg_clearzs(zsvalue, zsmask)))
512 return FALSE;
513 }
514 else {
515 /* Put ourselves into the 'pre-clear' state, specifically to try
516 * and accumulate multiple clears to color and depth_stencil
517 * buffers which the app or state-tracker might issue
518 * separately.
519 */
520 set_scene_state( setup, SETUP_CLEARED, __FUNCTION__ );
521
522 setup->clear.flags |= flags;
523
524 setup->clear.zsmask |= zsmask;
525 setup->clear.zsvalue =
526 (setup->clear.zsvalue & ~zsmask) | (zsvalue & zsmask);
527 }
528
529 return TRUE;
530 }
531
532 void
533 lp_setup_clear( struct lp_setup_context *setup,
534 const union pipe_color_union *color,
535 double depth,
536 unsigned stencil,
537 unsigned flags )
538 {
539 unsigned i;
540
541 /*
542 * Note any of these (max 9) clears could fail (but at most there should
543 * be just one failure!). This avoids doing the previous succeeded
544 * clears again (we still clear tiles twice if a clear command succeeded
545 * partially for one buffer).
546 */
547 if (flags & PIPE_CLEAR_DEPTHSTENCIL) {
548 unsigned flagszs = flags & PIPE_CLEAR_DEPTHSTENCIL;
549 if (!lp_setup_try_clear_zs(setup, depth, stencil, flagszs)) {
550 lp_setup_flush(setup, NULL, __FUNCTION__);
551
552 if (!lp_setup_try_clear_zs(setup, depth, stencil, flagszs))
553 assert(0);
554 }
555 }
556
557 if (flags & PIPE_CLEAR_COLOR) {
558 assert(PIPE_CLEAR_COLOR0 == (1 << 2));
559 for (i = 0; i < setup->fb.nr_cbufs; i++) {
560 if ((flags & (1 << (2 + i))) && setup->fb.cbufs[i]) {
561 if (!lp_setup_try_clear_color_buffer(setup, color, i)) {
562 lp_setup_flush(setup, NULL, __FUNCTION__);
563
564 if (!lp_setup_try_clear_color_buffer(setup, color, i))
565 assert(0);
566 }
567 }
568 }
569 }
570 }
571
572
573
574 void
575 lp_setup_set_triangle_state( struct lp_setup_context *setup,
576 unsigned cull_mode,
577 boolean ccw_is_frontface,
578 boolean scissor,
579 boolean half_pixel_center,
580 boolean bottom_edge_rule)
581 {
582 LP_DBG(DEBUG_SETUP, "%s\n", __FUNCTION__);
583
584 setup->ccw_is_frontface = ccw_is_frontface;
585 setup->cullmode = cull_mode;
586 setup->triangle = first_triangle;
587 setup->pixel_offset = half_pixel_center ? 0.5f : 0.0f;
588 setup->bottom_edge_rule = bottom_edge_rule;
589
590 if (setup->scissor_test != scissor) {
591 setup->dirty |= LP_SETUP_NEW_SCISSOR;
592 setup->scissor_test = scissor;
593 }
594 }
595
596 void
597 lp_setup_set_line_state( struct lp_setup_context *setup,
598 float line_width)
599 {
600 LP_DBG(DEBUG_SETUP, "%s\n", __FUNCTION__);
601
602 setup->line_width = line_width;
603 }
604
605 void
606 lp_setup_set_point_state( struct lp_setup_context *setup,
607 float point_size,
608 boolean point_size_per_vertex,
609 uint sprite_coord_enable,
610 uint sprite_coord_origin)
611 {
612 LP_DBG(DEBUG_SETUP, "%s\n", __FUNCTION__);
613
614 setup->point_size = point_size;
615 setup->sprite_coord_enable = sprite_coord_enable;
616 setup->sprite_coord_origin = sprite_coord_origin;
617 setup->point_size_per_vertex = point_size_per_vertex;
618 }
619
620 void
621 lp_setup_set_setup_variant( struct lp_setup_context *setup,
622 const struct lp_setup_variant *variant)
623 {
624 LP_DBG(DEBUG_SETUP, "%s\n", __FUNCTION__);
625
626 setup->setup.variant = variant;
627 }
628
629 void
630 lp_setup_set_fs_variant( struct lp_setup_context *setup,
631 struct lp_fragment_shader_variant *variant)
632 {
633 LP_DBG(DEBUG_SETUP, "%s %p\n", __FUNCTION__,
634 variant);
635 /* FIXME: reference count */
636
637 setup->fs.current.variant = variant;
638 setup->dirty |= LP_SETUP_NEW_FS;
639 }
640
641 void
642 lp_setup_set_fs_constants(struct lp_setup_context *setup,
643 unsigned num,
644 struct pipe_constant_buffer *buffers)
645 {
646 unsigned i;
647
648 LP_DBG(DEBUG_SETUP, "%s %p\n", __FUNCTION__, (void *) buffers);
649
650 assert(num <= Elements(setup->constants));
651
652 for (i = 0; i < num; ++i) {
653 util_copy_constant_buffer(&setup->constants[i].current, &buffers[i]);
654 }
655 for (; i < Elements(setup->constants); i++) {
656 util_copy_constant_buffer(&setup->constants[i].current, NULL);
657 }
658 setup->dirty |= LP_SETUP_NEW_CONSTANTS;
659 }
660
661
662 void
663 lp_setup_set_alpha_ref_value( struct lp_setup_context *setup,
664 float alpha_ref_value )
665 {
666 LP_DBG(DEBUG_SETUP, "%s %f\n", __FUNCTION__, alpha_ref_value);
667
668 if(setup->fs.current.jit_context.alpha_ref_value != alpha_ref_value) {
669 setup->fs.current.jit_context.alpha_ref_value = alpha_ref_value;
670 setup->dirty |= LP_SETUP_NEW_FS;
671 }
672 }
673
674 void
675 lp_setup_set_stencil_ref_values( struct lp_setup_context *setup,
676 const ubyte refs[2] )
677 {
678 LP_DBG(DEBUG_SETUP, "%s %d %d\n", __FUNCTION__, refs[0], refs[1]);
679
680 if (setup->fs.current.jit_context.stencil_ref_front != refs[0] ||
681 setup->fs.current.jit_context.stencil_ref_back != refs[1]) {
682 setup->fs.current.jit_context.stencil_ref_front = refs[0];
683 setup->fs.current.jit_context.stencil_ref_back = refs[1];
684 setup->dirty |= LP_SETUP_NEW_FS;
685 }
686 }
687
688 void
689 lp_setup_set_blend_color( struct lp_setup_context *setup,
690 const struct pipe_blend_color *blend_color )
691 {
692 LP_DBG(DEBUG_SETUP, "%s\n", __FUNCTION__);
693
694 assert(blend_color);
695
696 if(memcmp(&setup->blend_color.current, blend_color, sizeof *blend_color) != 0) {
697 memcpy(&setup->blend_color.current, blend_color, sizeof *blend_color);
698 setup->dirty |= LP_SETUP_NEW_BLEND_COLOR;
699 }
700 }
701
702
703 void
704 lp_setup_set_scissors( struct lp_setup_context *setup,
705 const struct pipe_scissor_state *scissors )
706 {
707 unsigned i;
708 LP_DBG(DEBUG_SETUP, "%s\n", __FUNCTION__);
709
710 assert(scissors);
711
712 for (i = 0; i < PIPE_MAX_VIEWPORTS; ++i) {
713 setup->scissors[i].x0 = scissors[i].minx;
714 setup->scissors[i].x1 = scissors[i].maxx-1;
715 setup->scissors[i].y0 = scissors[i].miny;
716 setup->scissors[i].y1 = scissors[i].maxy-1;
717 }
718 setup->dirty |= LP_SETUP_NEW_SCISSOR;
719 }
720
721
722 void
723 lp_setup_set_flatshade_first( struct lp_setup_context *setup,
724 boolean flatshade_first )
725 {
726 setup->flatshade_first = flatshade_first;
727 }
728
729 void
730 lp_setup_set_rasterizer_discard( struct lp_setup_context *setup,
731 boolean rasterizer_discard )
732 {
733 if (setup->rasterizer_discard != rasterizer_discard) {
734 setup->rasterizer_discard = rasterizer_discard;
735 set_scene_state( setup, SETUP_FLUSHED, __FUNCTION__ );
736 }
737 }
738
739 void
740 lp_setup_set_vertex_info( struct lp_setup_context *setup,
741 struct vertex_info *vertex_info )
742 {
743 /* XXX: just silently holding onto the pointer:
744 */
745 setup->vertex_info = vertex_info;
746 }
747
748
749 /**
750 * Called during state validation when LP_NEW_VIEWPORT is set.
751 */
752 void
753 lp_setup_set_viewports(struct lp_setup_context *setup,
754 unsigned num_viewports,
755 const struct pipe_viewport_state *viewports)
756 {
757 struct llvmpipe_context *lp = llvmpipe_context(setup->pipe);
758 unsigned i;
759
760 LP_DBG(DEBUG_SETUP, "%s\n", __FUNCTION__);
761
762 assert(num_viewports <= PIPE_MAX_VIEWPORTS);
763 assert(viewports);
764
765 /*
766 * For use in lp_state_fs.c, propagate the viewport values for all viewports.
767 */
768 for (i = 0; i < num_viewports; i++) {
769 float min_depth;
770 float max_depth;
771
772 if (lp->rasterizer->clip_halfz == 0) {
773 float half_depth = viewports[i].scale[2];
774 min_depth = viewports[i].translate[2] - half_depth;
775 max_depth = min_depth + half_depth * 2.0f;
776 } else {
777 min_depth = viewports[i].translate[2];
778 max_depth = min_depth + viewports[i].scale[2];
779 }
780
781 if (setup->viewports[i].min_depth != min_depth ||
782 setup->viewports[i].max_depth != max_depth) {
783 setup->viewports[i].min_depth = min_depth;
784 setup->viewports[i].max_depth = max_depth;
785 setup->dirty |= LP_SETUP_NEW_VIEWPORTS;
786 }
787 }
788 }
789
790
791 /**
792 * Called during state validation when LP_NEW_SAMPLER_VIEW is set.
793 */
794 void
795 lp_setup_set_fragment_sampler_views(struct lp_setup_context *setup,
796 unsigned num,
797 struct pipe_sampler_view **views)
798 {
799 unsigned i;
800
801 LP_DBG(DEBUG_SETUP, "%s\n", __FUNCTION__);
802
803 assert(num <= PIPE_MAX_SHADER_SAMPLER_VIEWS);
804
805 for (i = 0; i < PIPE_MAX_SHADER_SAMPLER_VIEWS; i++) {
806 struct pipe_sampler_view *view = i < num ? views[i] : NULL;
807
808 if (view) {
809 struct pipe_resource *res = view->texture;
810 struct llvmpipe_resource *lp_tex = llvmpipe_resource(res);
811 struct lp_jit_texture *jit_tex;
812 jit_tex = &setup->fs.current.jit_context.textures[i];
813
814 /* We're referencing the texture's internal data, so save a
815 * reference to it.
816 */
817 pipe_resource_reference(&setup->fs.current_tex[i], res);
818
819 if (!lp_tex->dt) {
820 /* regular texture - setup array of mipmap level offsets */
821 int j;
822 unsigned first_level = 0;
823 unsigned last_level = 0;
824
825 if (llvmpipe_resource_is_texture(res)) {
826 first_level = view->u.tex.first_level;
827 last_level = view->u.tex.last_level;
828 assert(first_level <= last_level);
829 assert(last_level <= res->last_level);
830 jit_tex->base = lp_tex->tex_data;
831 }
832 else {
833 jit_tex->base = lp_tex->data;
834 }
835
836 if (LP_PERF & PERF_TEX_MEM) {
837 /* use dummy tile memory */
838 jit_tex->base = lp_dummy_tile;
839 jit_tex->width = TILE_SIZE/8;
840 jit_tex->height = TILE_SIZE/8;
841 jit_tex->depth = 1;
842 jit_tex->first_level = 0;
843 jit_tex->last_level = 0;
844 jit_tex->mip_offsets[0] = 0;
845 jit_tex->row_stride[0] = 0;
846 jit_tex->img_stride[0] = 0;
847 }
848 else {
849 jit_tex->width = res->width0;
850 jit_tex->height = res->height0;
851 jit_tex->depth = res->depth0;
852 jit_tex->first_level = first_level;
853 jit_tex->last_level = last_level;
854
855 if (llvmpipe_resource_is_texture(res)) {
856 for (j = first_level; j <= last_level; j++) {
857 jit_tex->mip_offsets[j] = lp_tex->mip_offsets[j];
858 jit_tex->row_stride[j] = lp_tex->row_stride[j];
859 jit_tex->img_stride[j] = lp_tex->img_stride[j];
860 }
861
862 if (res->target == PIPE_TEXTURE_1D_ARRAY ||
863 res->target == PIPE_TEXTURE_2D_ARRAY ||
864 res->target == PIPE_TEXTURE_CUBE ||
865 res->target == PIPE_TEXTURE_CUBE_ARRAY) {
866 /*
867 * For array textures, we don't have first_layer, instead
868 * adjust last_layer (stored as depth) plus the mip level offsets
869 * (as we have mip-first layout can't just adjust base ptr).
870 * XXX For mip levels, could do something similar.
871 */
872 jit_tex->depth = view->u.tex.last_layer - view->u.tex.first_layer + 1;
873 for (j = first_level; j <= last_level; j++) {
874 jit_tex->mip_offsets[j] += view->u.tex.first_layer *
875 lp_tex->img_stride[j];
876 }
877 if (view->target == PIPE_TEXTURE_CUBE ||
878 view->target == PIPE_TEXTURE_CUBE_ARRAY) {
879 assert(jit_tex->depth % 6 == 0);
880 }
881 assert(view->u.tex.first_layer <= view->u.tex.last_layer);
882 assert(view->u.tex.last_layer < res->array_size);
883 }
884 }
885 else {
886 /*
887 * For buffers, we don't have first_element, instead adjust
888 * last_element (stored as width) plus the base pointer.
889 */
890 unsigned view_blocksize = util_format_get_blocksize(view->format);
891 /* probably don't really need to fill that out */
892 jit_tex->mip_offsets[0] = 0;
893 jit_tex->row_stride[0] = 0;
894 jit_tex->img_stride[0] = 0;
895
896 /* everything specified in number of elements here. */
897 jit_tex->width = view->u.buf.last_element - view->u.buf.first_element + 1;
898 jit_tex->base = (uint8_t *)jit_tex->base + view->u.buf.first_element *
899 view_blocksize;
900 /* XXX Unsure if we need to sanitize parameters? */
901 assert(view->u.buf.first_element <= view->u.buf.last_element);
902 assert(view->u.buf.last_element * view_blocksize < res->width0);
903 }
904 }
905 }
906 else {
907 /* display target texture/surface */
908 /*
909 * XXX: Where should this be unmapped?
910 */
911 struct llvmpipe_screen *screen = llvmpipe_screen(res->screen);
912 struct sw_winsys *winsys = screen->winsys;
913 jit_tex->base = winsys->displaytarget_map(winsys, lp_tex->dt,
914 PIPE_TRANSFER_READ);
915 jit_tex->row_stride[0] = lp_tex->row_stride[0];
916 jit_tex->img_stride[0] = lp_tex->img_stride[0];
917 jit_tex->mip_offsets[0] = 0;
918 jit_tex->width = res->width0;
919 jit_tex->height = res->height0;
920 jit_tex->depth = res->depth0;
921 jit_tex->first_level = jit_tex->last_level = 0;
922 assert(jit_tex->base);
923 }
924 }
925 }
926
927 setup->dirty |= LP_SETUP_NEW_FS;
928 }
929
930
931 /**
932 * Called during state validation when LP_NEW_SAMPLER is set.
933 */
934 void
935 lp_setup_set_fragment_sampler_state(struct lp_setup_context *setup,
936 unsigned num,
937 struct pipe_sampler_state **samplers)
938 {
939 unsigned i;
940
941 LP_DBG(DEBUG_SETUP, "%s\n", __FUNCTION__);
942
943 assert(num <= PIPE_MAX_SAMPLERS);
944
945 for (i = 0; i < PIPE_MAX_SAMPLERS; i++) {
946 const struct pipe_sampler_state *sampler = i < num ? samplers[i] : NULL;
947
948 if (sampler) {
949 struct lp_jit_sampler *jit_sam;
950 jit_sam = &setup->fs.current.jit_context.samplers[i];
951
952 jit_sam->min_lod = sampler->min_lod;
953 jit_sam->max_lod = sampler->max_lod;
954 jit_sam->lod_bias = sampler->lod_bias;
955 COPY_4V(jit_sam->border_color, sampler->border_color.f);
956 }
957 }
958
959 setup->dirty |= LP_SETUP_NEW_FS;
960 }
961
962
963 /**
964 * Is the given texture referenced by any scene?
965 * Note: we have to check all scenes including any scenes currently
966 * being rendered and the current scene being built.
967 */
968 unsigned
969 lp_setup_is_resource_referenced( const struct lp_setup_context *setup,
970 const struct pipe_resource *texture )
971 {
972 unsigned i;
973
974 /* check the render targets */
975 for (i = 0; i < setup->fb.nr_cbufs; i++) {
976 if (setup->fb.cbufs[i] && setup->fb.cbufs[i]->texture == texture)
977 return LP_REFERENCED_FOR_READ | LP_REFERENCED_FOR_WRITE;
978 }
979 if (setup->fb.zsbuf && setup->fb.zsbuf->texture == texture) {
980 return LP_REFERENCED_FOR_READ | LP_REFERENCED_FOR_WRITE;
981 }
982
983 /* check textures referenced by the scene */
984 for (i = 0; i < Elements(setup->scenes); i++) {
985 if (lp_scene_is_resource_referenced(setup->scenes[i], texture)) {
986 return LP_REFERENCED_FOR_READ;
987 }
988 }
989
990 return LP_UNREFERENCED;
991 }
992
993
994 /**
995 * Called by vbuf code when we're about to draw something.
996 *
997 * This function stores all dirty state in the current scene's display list
998 * memory, via lp_scene_alloc(). We can not pass pointers of mutable state to
999 * the JIT functions, as the JIT functions will be called later on, most likely
1000 * on a different thread.
1001 *
1002 * When processing dirty state it is imperative that we don't refer to any
1003 * pointers previously allocated with lp_scene_alloc() in this function (or any
1004 * function) as they may belong to a scene freed since then.
1005 */
1006 static boolean
1007 try_update_scene_state( struct lp_setup_context *setup )
1008 {
1009 static const float fake_const_buf[4];
1010 boolean new_scene = (setup->fs.stored == NULL);
1011 struct lp_scene *scene = setup->scene;
1012 unsigned i;
1013
1014 assert(scene);
1015
1016 if (setup->dirty & LP_SETUP_NEW_VIEWPORTS) {
1017 /*
1018 * Record new depth range state for changes due to viewport updates.
1019 *
1020 * TODO: Collapse the existing viewport and depth range information
1021 * into one structure, for access by JIT.
1022 */
1023 struct lp_jit_viewport *stored;
1024
1025 stored = (struct lp_jit_viewport *)
1026 lp_scene_alloc(scene, sizeof setup->viewports);
1027
1028 if (!stored) {
1029 assert(!new_scene);
1030 return FALSE;
1031 }
1032
1033 memcpy(stored, setup->viewports, sizeof setup->viewports);
1034
1035 setup->fs.current.jit_context.viewports = stored;
1036 setup->dirty |= LP_SETUP_NEW_FS;
1037 }
1038
1039 if(setup->dirty & LP_SETUP_NEW_BLEND_COLOR) {
1040 uint8_t *stored;
1041 float* fstored;
1042 unsigned i, j;
1043 unsigned size;
1044
1045 /* Alloc u8_blend_color (16 x i8) and f_blend_color (4 or 8 x f32) */
1046 size = 4 * 16 * sizeof(uint8_t);
1047 size += (LP_MAX_VECTOR_LENGTH / 4) * sizeof(float);
1048 stored = lp_scene_alloc_aligned(scene, size, LP_MIN_VECTOR_ALIGN);
1049
1050 if (!stored) {
1051 assert(!new_scene);
1052 return FALSE;
1053 }
1054
1055 /* Store floating point colour */
1056 fstored = (float*)(stored + 4*16);
1057 for (i = 0; i < (LP_MAX_VECTOR_LENGTH / 4); ++i) {
1058 fstored[i] = setup->blend_color.current.color[i % 4];
1059 }
1060
1061 /* smear each blend color component across 16 ubyte elements */
1062 for (i = 0; i < 4; ++i) {
1063 uint8_t c = float_to_ubyte(setup->blend_color.current.color[i]);
1064 for (j = 0; j < 16; ++j)
1065 stored[i*16 + j] = c;
1066 }
1067
1068 setup->blend_color.stored = stored;
1069 setup->fs.current.jit_context.u8_blend_color = stored;
1070 setup->fs.current.jit_context.f_blend_color = fstored;
1071 setup->dirty |= LP_SETUP_NEW_FS;
1072 }
1073
1074 if (setup->dirty & LP_SETUP_NEW_CONSTANTS) {
1075 for (i = 0; i < Elements(setup->constants); ++i) {
1076 struct pipe_resource *buffer = setup->constants[i].current.buffer;
1077 const unsigned current_size = MIN2(setup->constants[i].current.buffer_size,
1078 LP_MAX_TGSI_CONST_BUFFER_SIZE);
1079 const ubyte *current_data = NULL;
1080 int num_constants;
1081
1082 STATIC_ASSERT(DATA_BLOCK_SIZE >= LP_MAX_TGSI_CONST_BUFFER_SIZE);
1083
1084 if (buffer) {
1085 /* resource buffer */
1086 current_data = (ubyte *) llvmpipe_resource_data(buffer);
1087 }
1088 else if (setup->constants[i].current.user_buffer) {
1089 /* user-space buffer */
1090 current_data = (ubyte *) setup->constants[i].current.user_buffer;
1091 }
1092
1093 if (current_data) {
1094 current_data += setup->constants[i].current.buffer_offset;
1095
1096 /* TODO: copy only the actually used constants? */
1097
1098 if (setup->constants[i].stored_size != current_size ||
1099 !setup->constants[i].stored_data ||
1100 memcmp(setup->constants[i].stored_data,
1101 current_data,
1102 current_size) != 0) {
1103 void *stored;
1104
1105 stored = lp_scene_alloc(scene, current_size);
1106 if (!stored) {
1107 assert(!new_scene);
1108 return FALSE;
1109 }
1110
1111 memcpy(stored,
1112 current_data,
1113 current_size);
1114 setup->constants[i].stored_size = current_size;
1115 setup->constants[i].stored_data = stored;
1116 }
1117 setup->fs.current.jit_context.constants[i] =
1118 setup->constants[i].stored_data;
1119 }
1120 else {
1121 setup->constants[i].stored_size = 0;
1122 setup->constants[i].stored_data = NULL;
1123 setup->fs.current.jit_context.constants[i] = fake_const_buf;
1124 }
1125
1126 num_constants =
1127 setup->constants[i].stored_size / (sizeof(float) * 4);
1128 setup->fs.current.jit_context.num_constants[i] = num_constants;
1129 setup->dirty |= LP_SETUP_NEW_FS;
1130 }
1131 }
1132
1133
1134 if (setup->dirty & LP_SETUP_NEW_FS) {
1135 if (!setup->fs.stored ||
1136 memcmp(setup->fs.stored,
1137 &setup->fs.current,
1138 sizeof setup->fs.current) != 0)
1139 {
1140 struct lp_rast_state *stored;
1141
1142 /* The fs state that's been stored in the scene is different from
1143 * the new, current state. So allocate a new lp_rast_state object
1144 * and append it to the bin's setup data buffer.
1145 */
1146 stored = (struct lp_rast_state *) lp_scene_alloc(scene, sizeof *stored);
1147 if (!stored) {
1148 assert(!new_scene);
1149 return FALSE;
1150 }
1151
1152 memcpy(stored,
1153 &setup->fs.current,
1154 sizeof setup->fs.current);
1155 setup->fs.stored = stored;
1156
1157 /* The scene now references the textures in the rasterization
1158 * state record. Note that now.
1159 */
1160 for (i = 0; i < Elements(setup->fs.current_tex); i++) {
1161 if (setup->fs.current_tex[i]) {
1162 if (!lp_scene_add_resource_reference(scene,
1163 setup->fs.current_tex[i],
1164 new_scene)) {
1165 assert(!new_scene);
1166 return FALSE;
1167 }
1168 }
1169 }
1170 }
1171 }
1172
1173 if (setup->dirty & LP_SETUP_NEW_SCISSOR) {
1174 unsigned i;
1175 for (i = 0; i < PIPE_MAX_VIEWPORTS; ++i) {
1176 setup->draw_regions[i] = setup->framebuffer;
1177 if (setup->scissor_test) {
1178 u_rect_possible_intersection(&setup->scissors[i],
1179 &setup->draw_regions[i]);
1180 }
1181 }
1182 }
1183
1184 setup->dirty = 0;
1185
1186 assert(setup->fs.stored);
1187 return TRUE;
1188 }
1189
1190 boolean
1191 lp_setup_update_state( struct lp_setup_context *setup,
1192 boolean update_scene )
1193 {
1194 /* Some of the 'draw' pipeline stages may have changed some driver state.
1195 * Make sure we've processed those state changes before anything else.
1196 *
1197 * XXX this is the only place where llvmpipe_context is used in the
1198 * setup code. This may get refactored/changed...
1199 */
1200 {
1201 struct llvmpipe_context *lp = llvmpipe_context(setup->pipe);
1202 if (lp->dirty) {
1203 llvmpipe_update_derived(lp);
1204 }
1205
1206 if (lp->setup->dirty) {
1207 llvmpipe_update_setup(lp);
1208 }
1209
1210 assert(setup->setup.variant);
1211
1212 /* Will probably need to move this somewhere else, just need
1213 * to know about vertex shader point size attribute.
1214 */
1215 setup->psize_slot = lp->psize_slot;
1216 setup->viewport_index_slot = lp->viewport_index_slot;
1217 setup->layer_slot = lp->layer_slot;
1218 setup->face_slot = lp->face_slot;
1219
1220 assert(lp->dirty == 0);
1221
1222 assert(lp->setup_variant.key.size ==
1223 setup->setup.variant->key.size);
1224
1225 assert(memcmp(&lp->setup_variant.key,
1226 &setup->setup.variant->key,
1227 setup->setup.variant->key.size) == 0);
1228 }
1229
1230 if (update_scene && setup->state != SETUP_ACTIVE) {
1231 if (!set_scene_state( setup, SETUP_ACTIVE, __FUNCTION__ ))
1232 return FALSE;
1233 }
1234
1235 /* Only call into update_scene_state() if we already have a
1236 * scene:
1237 */
1238 if (update_scene && setup->scene) {
1239 assert(setup->state == SETUP_ACTIVE);
1240
1241 if (try_update_scene_state(setup))
1242 return TRUE;
1243
1244 /* Update failed, try to restart the scene.
1245 *
1246 * Cannot call lp_setup_flush_and_restart() directly here
1247 * because of potential recursion.
1248 */
1249 if (!set_scene_state(setup, SETUP_FLUSHED, __FUNCTION__))
1250 return FALSE;
1251
1252 if (!set_scene_state(setup, SETUP_ACTIVE, __FUNCTION__))
1253 return FALSE;
1254
1255 if (!setup->scene)
1256 return FALSE;
1257
1258 return try_update_scene_state(setup);
1259 }
1260
1261 return TRUE;
1262 }
1263
1264
1265
1266 /* Only caller is lp_setup_vbuf_destroy()
1267 */
1268 void
1269 lp_setup_destroy( struct lp_setup_context *setup )
1270 {
1271 uint i;
1272
1273 lp_setup_reset( setup );
1274
1275 util_unreference_framebuffer_state(&setup->fb);
1276
1277 for (i = 0; i < Elements(setup->fs.current_tex); i++) {
1278 pipe_resource_reference(&setup->fs.current_tex[i], NULL);
1279 }
1280
1281 for (i = 0; i < Elements(setup->constants); i++) {
1282 pipe_resource_reference(&setup->constants[i].current.buffer, NULL);
1283 }
1284
1285 /* free the scenes in the 'empty' queue */
1286 for (i = 0; i < Elements(setup->scenes); i++) {
1287 struct lp_scene *scene = setup->scenes[i];
1288
1289 if (scene->fence)
1290 lp_fence_wait(scene->fence);
1291
1292 lp_scene_destroy(scene);
1293 }
1294
1295 lp_fence_reference(&setup->last_fence, NULL);
1296
1297 FREE( setup );
1298 }
1299
1300
1301 /**
1302 * Create a new primitive tiling engine. Plug it into the backend of
1303 * the draw module. Currently also creates a rasterizer to use with
1304 * it.
1305 */
1306 struct lp_setup_context *
1307 lp_setup_create( struct pipe_context *pipe,
1308 struct draw_context *draw )
1309 {
1310 struct llvmpipe_screen *screen = llvmpipe_screen(pipe->screen);
1311 struct lp_setup_context *setup;
1312 unsigned i;
1313
1314 setup = CALLOC_STRUCT(lp_setup_context);
1315 if (!setup) {
1316 goto no_setup;
1317 }
1318
1319 lp_setup_init_vbuf(setup);
1320
1321 /* Used only in update_state():
1322 */
1323 setup->pipe = pipe;
1324
1325
1326 setup->num_threads = screen->num_threads;
1327 setup->vbuf = draw_vbuf_stage(draw, &setup->base);
1328 if (!setup->vbuf) {
1329 goto no_vbuf;
1330 }
1331
1332 draw_set_rasterize_stage(draw, setup->vbuf);
1333 draw_set_render(draw, &setup->base);
1334
1335 /* create some empty scenes */
1336 for (i = 0; i < MAX_SCENES; i++) {
1337 setup->scenes[i] = lp_scene_create( pipe );
1338 if (!setup->scenes[i]) {
1339 goto no_scenes;
1340 }
1341 }
1342
1343 setup->triangle = first_triangle;
1344 setup->line = first_line;
1345 setup->point = first_point;
1346
1347 setup->dirty = ~0;
1348
1349 return setup;
1350
1351 no_scenes:
1352 for (i = 0; i < MAX_SCENES; i++) {
1353 if (setup->scenes[i]) {
1354 lp_scene_destroy(setup->scenes[i]);
1355 }
1356 }
1357
1358 setup->vbuf->destroy(setup->vbuf);
1359 no_vbuf:
1360 FREE(setup);
1361 no_setup:
1362 return NULL;
1363 }
1364
1365
1366 /**
1367 * Put a BeginQuery command into all bins.
1368 */
1369 void
1370 lp_setup_begin_query(struct lp_setup_context *setup,
1371 struct llvmpipe_query *pq)
1372 {
1373
1374 set_scene_state(setup, SETUP_ACTIVE, "begin_query");
1375
1376 if (!(pq->type == PIPE_QUERY_OCCLUSION_COUNTER ||
1377 pq->type == PIPE_QUERY_OCCLUSION_PREDICATE ||
1378 pq->type == PIPE_QUERY_PIPELINE_STATISTICS))
1379 return;
1380
1381 /* init the query to its beginning state */
1382 assert(setup->active_binned_queries < LP_MAX_ACTIVE_BINNED_QUERIES);
1383 /* exceeding list size so just ignore the query */
1384 if (setup->active_binned_queries >= LP_MAX_ACTIVE_BINNED_QUERIES) {
1385 return;
1386 }
1387 assert(setup->active_queries[setup->active_binned_queries] == NULL);
1388 setup->active_queries[setup->active_binned_queries] = pq;
1389 setup->active_binned_queries++;
1390
1391 assert(setup->scene);
1392 if (setup->scene) {
1393 if (!lp_scene_bin_everywhere(setup->scene,
1394 LP_RAST_OP_BEGIN_QUERY,
1395 lp_rast_arg_query(pq))) {
1396
1397 if (!lp_setup_flush_and_restart(setup))
1398 return;
1399
1400 if (!lp_scene_bin_everywhere(setup->scene,
1401 LP_RAST_OP_BEGIN_QUERY,
1402 lp_rast_arg_query(pq))) {
1403 return;
1404 }
1405 }
1406 setup->scene->had_queries |= TRUE;
1407 }
1408 }
1409
1410
1411 /**
1412 * Put an EndQuery command into all bins.
1413 */
1414 void
1415 lp_setup_end_query(struct lp_setup_context *setup, struct llvmpipe_query *pq)
1416 {
1417 set_scene_state(setup, SETUP_ACTIVE, "end_query");
1418
1419 assert(setup->scene);
1420 if (setup->scene) {
1421 /* pq->fence should be the fence of the *last* scene which
1422 * contributed to the query result.
1423 */
1424 lp_fence_reference(&pq->fence, setup->scene->fence);
1425
1426 if (pq->type == PIPE_QUERY_OCCLUSION_COUNTER ||
1427 pq->type == PIPE_QUERY_OCCLUSION_PREDICATE ||
1428 pq->type == PIPE_QUERY_PIPELINE_STATISTICS ||
1429 pq->type == PIPE_QUERY_TIMESTAMP) {
1430 if (pq->type == PIPE_QUERY_TIMESTAMP &&
1431 !(setup->scene->tiles_x | setup->scene->tiles_y)) {
1432 /*
1433 * If there's a zero width/height framebuffer, there's no bins and
1434 * hence no rast task is ever run. So fill in something here instead.
1435 */
1436 pq->end[0] = os_time_get_nano();
1437 }
1438
1439 if (!lp_scene_bin_everywhere(setup->scene,
1440 LP_RAST_OP_END_QUERY,
1441 lp_rast_arg_query(pq))) {
1442 if (!lp_setup_flush_and_restart(setup))
1443 goto fail;
1444
1445 if (!lp_scene_bin_everywhere(setup->scene,
1446 LP_RAST_OP_END_QUERY,
1447 lp_rast_arg_query(pq))) {
1448 goto fail;
1449 }
1450 }
1451 setup->scene->had_queries |= TRUE;
1452 }
1453 }
1454 else {
1455 lp_fence_reference(&pq->fence, setup->last_fence);
1456 }
1457
1458 fail:
1459 /* Need to do this now not earlier since it still needs to be marked as
1460 * active when binning it would cause a flush.
1461 */
1462 if (pq->type == PIPE_QUERY_OCCLUSION_COUNTER ||
1463 pq->type == PIPE_QUERY_OCCLUSION_PREDICATE ||
1464 pq->type == PIPE_QUERY_PIPELINE_STATISTICS) {
1465 unsigned i;
1466
1467 /* remove from active binned query list */
1468 for (i = 0; i < setup->active_binned_queries; i++) {
1469 if (setup->active_queries[i] == pq)
1470 break;
1471 }
1472 assert(i < setup->active_binned_queries);
1473 if (i == setup->active_binned_queries)
1474 return;
1475 setup->active_binned_queries--;
1476 setup->active_queries[i] = setup->active_queries[setup->active_binned_queries];
1477 setup->active_queries[setup->active_binned_queries] = NULL;
1478 }
1479 }
1480
1481
1482 boolean
1483 lp_setup_flush_and_restart(struct lp_setup_context *setup)
1484 {
1485 if (0) debug_printf("%s\n", __FUNCTION__);
1486
1487 assert(setup->state == SETUP_ACTIVE);
1488
1489 if (!set_scene_state(setup, SETUP_FLUSHED, __FUNCTION__))
1490 return FALSE;
1491
1492 if (!lp_setup_update_state(setup, TRUE))
1493 return FALSE;
1494
1495 return TRUE;
1496 }
1497
1498