panfrost: Implement panfrost_bo_cache_put
[mesa.git] / src / gallium / drivers / llvmpipe / lp_setup.c
1 /**************************************************************************
2 *
3 * Copyright 2007 VMware, Inc.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28 /**
29 * Tiling engine.
30 *
31 * Builds per-tile display lists and executes them on calls to
32 * lp_setup_flush().
33 */
34
35 #include <limits.h>
36
37 #include "pipe/p_defines.h"
38 #include "util/u_framebuffer.h"
39 #include "util/u_inlines.h"
40 #include "util/u_memory.h"
41 #include "util/u_pack_color.h"
42 #include "util/u_viewport.h"
43 #include "draw/draw_pipe.h"
44 #include "util/os_time.h"
45 #include "lp_context.h"
46 #include "lp_memory.h"
47 #include "lp_scene.h"
48 #include "lp_texture.h"
49 #include "lp_debug.h"
50 #include "lp_fence.h"
51 #include "lp_query.h"
52 #include "lp_rast.h"
53 #include "lp_setup_context.h"
54 #include "lp_screen.h"
55 #include "lp_state.h"
56 #include "state_tracker/sw_winsys.h"
57
58 #include "draw/draw_context.h"
59 #include "draw/draw_vbuf.h"
60
61
62 static boolean set_scene_state( struct lp_setup_context *, enum setup_state,
63 const char *reason);
64 static boolean try_update_scene_state( struct lp_setup_context *setup );
65
66
67 static void
68 lp_setup_get_empty_scene(struct lp_setup_context *setup)
69 {
70 assert(setup->scene == NULL);
71
72 setup->scene_idx++;
73 setup->scene_idx %= ARRAY_SIZE(setup->scenes);
74
75 setup->scene = setup->scenes[setup->scene_idx];
76
77 if (setup->scene->fence) {
78 if (LP_DEBUG & DEBUG_SETUP)
79 debug_printf("%s: wait for scene %d\n",
80 __FUNCTION__, setup->scene->fence->id);
81
82 lp_fence_wait(setup->scene->fence);
83 }
84
85 lp_scene_begin_binning(setup->scene, &setup->fb);
86
87 }
88
89
90 static void
91 first_triangle( struct lp_setup_context *setup,
92 const float (*v0)[4],
93 const float (*v1)[4],
94 const float (*v2)[4])
95 {
96 assert(setup->state == SETUP_ACTIVE);
97 lp_setup_choose_triangle( setup );
98 setup->triangle( setup, v0, v1, v2 );
99 }
100
101 static void
102 first_line( struct lp_setup_context *setup,
103 const float (*v0)[4],
104 const float (*v1)[4])
105 {
106 assert(setup->state == SETUP_ACTIVE);
107 lp_setup_choose_line( setup );
108 setup->line( setup, v0, v1 );
109 }
110
111 static void
112 first_point( struct lp_setup_context *setup,
113 const float (*v0)[4])
114 {
115 assert(setup->state == SETUP_ACTIVE);
116 lp_setup_choose_point( setup );
117 setup->point( setup, v0 );
118 }
119
120 void lp_setup_reset( struct lp_setup_context *setup )
121 {
122 unsigned i;
123
124 LP_DBG(DEBUG_SETUP, "%s\n", __FUNCTION__);
125
126 /* Reset derived state */
127 for (i = 0; i < ARRAY_SIZE(setup->constants); ++i) {
128 setup->constants[i].stored_size = 0;
129 setup->constants[i].stored_data = NULL;
130 }
131 setup->fs.stored = NULL;
132 setup->dirty = ~0;
133
134 /* no current bin */
135 setup->scene = NULL;
136
137 /* Reset some state:
138 */
139 memset(&setup->clear, 0, sizeof setup->clear);
140
141 /* Have an explicit "start-binning" call and get rid of this
142 * pointer twiddling?
143 */
144 setup->line = first_line;
145 setup->point = first_point;
146 setup->triangle = first_triangle;
147 }
148
149
150 /** Rasterize all scene's bins */
151 static void
152 lp_setup_rasterize_scene( struct lp_setup_context *setup )
153 {
154 struct lp_scene *scene = setup->scene;
155 struct llvmpipe_screen *screen = llvmpipe_screen(scene->pipe->screen);
156
157 scene->num_active_queries = setup->active_binned_queries;
158 memcpy(scene->active_queries, setup->active_queries,
159 scene->num_active_queries * sizeof(scene->active_queries[0]));
160
161 lp_scene_end_binning(scene);
162
163 lp_fence_reference(&setup->last_fence, scene->fence);
164
165 if (setup->last_fence)
166 setup->last_fence->issued = TRUE;
167
168 mtx_lock(&screen->rast_mutex);
169
170 /* FIXME: We enqueue the scene then wait on the rasterizer to finish.
171 * This means we never actually run any vertex stuff in parallel to
172 * rasterization (not in the same context at least) which is what the
173 * multiple scenes per setup is about - when we get a new empty scene
174 * any old one is already empty again because we waited here for
175 * raster tasks to be finished. Ideally, we shouldn't need to wait here
176 * and rely on fences elsewhere when waiting is necessary.
177 * Certainly, lp_scene_end_rasterization() would need to be deferred too
178 * and there's probably other bits why this doesn't actually work.
179 */
180 lp_rast_queue_scene(screen->rast, scene);
181 lp_rast_finish(screen->rast);
182 mtx_unlock(&screen->rast_mutex);
183
184 lp_scene_end_rasterization(setup->scene);
185 lp_setup_reset( setup );
186
187 LP_DBG(DEBUG_SETUP, "%s done \n", __FUNCTION__);
188 }
189
190
191
192 static boolean
193 begin_binning( struct lp_setup_context *setup )
194 {
195 struct lp_scene *scene = setup->scene;
196 boolean need_zsload = FALSE;
197 boolean ok;
198
199 assert(scene);
200 assert(scene->fence == NULL);
201
202 /* Always create a fence:
203 */
204 scene->fence = lp_fence_create(MAX2(1, setup->num_threads));
205 if (!scene->fence)
206 return FALSE;
207
208 ok = try_update_scene_state(setup);
209 if (!ok)
210 return FALSE;
211
212 if (setup->fb.zsbuf &&
213 ((setup->clear.flags & PIPE_CLEAR_DEPTHSTENCIL) != PIPE_CLEAR_DEPTHSTENCIL) &&
214 util_format_is_depth_and_stencil(setup->fb.zsbuf->format))
215 need_zsload = TRUE;
216
217 LP_DBG(DEBUG_SETUP, "%s color clear bufs: %x depth: %s\n", __FUNCTION__,
218 setup->clear.flags >> 2,
219 need_zsload ? "clear": "load");
220
221 if (setup->clear.flags & PIPE_CLEAR_COLOR) {
222 unsigned cbuf;
223 for (cbuf = 0; cbuf < setup->fb.nr_cbufs; cbuf++) {
224 assert(PIPE_CLEAR_COLOR0 == 1 << 2);
225 if (setup->clear.flags & (1 << (2 + cbuf))) {
226 union lp_rast_cmd_arg clearrb_arg;
227 struct lp_rast_clear_rb *cc_scene =
228 (struct lp_rast_clear_rb *)
229 lp_scene_alloc(scene, sizeof(struct lp_rast_clear_rb));
230
231 if (!cc_scene) {
232 return FALSE;
233 }
234
235 cc_scene->cbuf = cbuf;
236 cc_scene->color_val = setup->clear.color_val[cbuf];
237 clearrb_arg.clear_rb = cc_scene;
238
239 if (!lp_scene_bin_everywhere(scene,
240 LP_RAST_OP_CLEAR_COLOR,
241 clearrb_arg))
242 return FALSE;
243 }
244 }
245 }
246
247 if (setup->fb.zsbuf) {
248 if (setup->clear.flags & PIPE_CLEAR_DEPTHSTENCIL) {
249 ok = lp_scene_bin_everywhere( scene,
250 LP_RAST_OP_CLEAR_ZSTENCIL,
251 lp_rast_arg_clearzs(
252 setup->clear.zsvalue,
253 setup->clear.zsmask));
254 if (!ok)
255 return FALSE;
256 }
257 }
258
259 setup->clear.flags = 0;
260 setup->clear.zsmask = 0;
261 setup->clear.zsvalue = 0;
262
263 scene->had_queries = !!setup->active_binned_queries;
264
265 LP_DBG(DEBUG_SETUP, "%s done\n", __FUNCTION__);
266 return TRUE;
267 }
268
269
270 /* This basically bins and then flushes any outstanding full-screen
271 * clears.
272 *
273 * TODO: fast path for fullscreen clears and no triangles.
274 */
275 static boolean
276 execute_clears( struct lp_setup_context *setup )
277 {
278 LP_DBG(DEBUG_SETUP, "%s\n", __FUNCTION__);
279
280 return begin_binning( setup );
281 }
282
283 const char *states[] = {
284 "FLUSHED",
285 "CLEARED",
286 "ACTIVE "
287 };
288
289
290 static boolean
291 set_scene_state( struct lp_setup_context *setup,
292 enum setup_state new_state,
293 const char *reason)
294 {
295 unsigned old_state = setup->state;
296
297 if (old_state == new_state)
298 return TRUE;
299
300 if (LP_DEBUG & DEBUG_SCENE) {
301 debug_printf("%s old %s new %s%s%s\n",
302 __FUNCTION__,
303 states[old_state],
304 states[new_state],
305 (new_state == SETUP_FLUSHED) ? ": " : "",
306 (new_state == SETUP_FLUSHED) ? reason : "");
307
308 if (new_state == SETUP_FLUSHED && setup->scene)
309 lp_debug_draw_bins_by_cmd_length(setup->scene);
310 }
311
312 /* wait for a free/empty scene
313 */
314 if (old_state == SETUP_FLUSHED)
315 lp_setup_get_empty_scene(setup);
316
317 switch (new_state) {
318 case SETUP_CLEARED:
319 break;
320
321 case SETUP_ACTIVE:
322 if (!begin_binning( setup ))
323 goto fail;
324 break;
325
326 case SETUP_FLUSHED:
327 if (old_state == SETUP_CLEARED)
328 if (!execute_clears( setup ))
329 goto fail;
330
331 lp_setup_rasterize_scene( setup );
332 assert(setup->scene == NULL);
333 break;
334
335 default:
336 assert(0 && "invalid setup state mode");
337 goto fail;
338 }
339
340 setup->state = new_state;
341 return TRUE;
342
343 fail:
344 if (setup->scene) {
345 lp_scene_end_rasterization(setup->scene);
346 setup->scene = NULL;
347 }
348
349 setup->state = SETUP_FLUSHED;
350 lp_setup_reset( setup );
351 return FALSE;
352 }
353
354
355 void
356 lp_setup_flush( struct lp_setup_context *setup,
357 struct pipe_fence_handle **fence,
358 const char *reason)
359 {
360 set_scene_state( setup, SETUP_FLUSHED, reason );
361
362 if (fence) {
363 lp_fence_reference((struct lp_fence **)fence, setup->last_fence);
364 if (!*fence)
365 *fence = (struct pipe_fence_handle *)lp_fence_create(0);
366 }
367 }
368
369
370 void
371 lp_setup_bind_framebuffer( struct lp_setup_context *setup,
372 const struct pipe_framebuffer_state *fb )
373 {
374 LP_DBG(DEBUG_SETUP, "%s\n", __FUNCTION__);
375
376 /* Flush any old scene.
377 */
378 set_scene_state( setup, SETUP_FLUSHED, __FUNCTION__ );
379
380 /*
381 * Ensure the old scene is not reused.
382 */
383 assert(!setup->scene);
384
385 /* Set new state. This will be picked up later when we next need a
386 * scene.
387 */
388 util_copy_framebuffer_state(&setup->fb, fb);
389 setup->framebuffer.x0 = 0;
390 setup->framebuffer.y0 = 0;
391 setup->framebuffer.x1 = fb->width-1;
392 setup->framebuffer.y1 = fb->height-1;
393 setup->dirty |= LP_SETUP_NEW_SCISSOR;
394 }
395
396
397 /*
398 * Try to clear one color buffer of the attached fb, either by binning a clear
399 * command or queuing up the clear for later (when binning is started).
400 */
401 static boolean
402 lp_setup_try_clear_color_buffer(struct lp_setup_context *setup,
403 const union pipe_color_union *color,
404 unsigned cbuf)
405 {
406 union lp_rast_cmd_arg clearrb_arg;
407 union util_color uc;
408 enum pipe_format format = setup->fb.cbufs[cbuf]->format;
409
410 LP_DBG(DEBUG_SETUP, "%s state %d\n", __FUNCTION__, setup->state);
411
412 if (util_format_is_pure_integer(format)) {
413 /*
414 * We expect int/uint clear values here, though some APIs
415 * might disagree (but in any case util_pack_color()
416 * couldn't handle it)...
417 */
418 if (util_format_is_pure_sint(format)) {
419 util_format_write_4i(format, color->i, 0, &uc, 0, 0, 0, 1, 1);
420 }
421 else {
422 assert(util_format_is_pure_uint(format));
423 util_format_write_4ui(format, color->ui, 0, &uc, 0, 0, 0, 1, 1);
424 }
425 }
426 else {
427 util_pack_color(color->f, format, &uc);
428 }
429
430 if (setup->state == SETUP_ACTIVE) {
431 struct lp_scene *scene = setup->scene;
432
433 /* Add the clear to existing scene. In the unusual case where
434 * both color and depth-stencil are being cleared when there's
435 * already been some rendering, we could discard the currently
436 * binned scene and start again, but I don't see that as being
437 * a common usage.
438 */
439 struct lp_rast_clear_rb *cc_scene =
440 (struct lp_rast_clear_rb *)
441 lp_scene_alloc_aligned(scene, sizeof(struct lp_rast_clear_rb), 8);
442
443 if (!cc_scene) {
444 return FALSE;
445 }
446
447 cc_scene->cbuf = cbuf;
448 cc_scene->color_val = uc;
449 clearrb_arg.clear_rb = cc_scene;
450
451 if (!lp_scene_bin_everywhere(scene,
452 LP_RAST_OP_CLEAR_COLOR,
453 clearrb_arg))
454 return FALSE;
455 }
456 else {
457 /* Put ourselves into the 'pre-clear' state, specifically to try
458 * and accumulate multiple clears to color and depth_stencil
459 * buffers which the app or state-tracker might issue
460 * separately.
461 */
462 set_scene_state( setup, SETUP_CLEARED, __FUNCTION__ );
463
464 assert(PIPE_CLEAR_COLOR0 == (1 << 2));
465 setup->clear.flags |= 1 << (cbuf + 2);
466 setup->clear.color_val[cbuf] = uc;
467 }
468
469 return TRUE;
470 }
471
472 static boolean
473 lp_setup_try_clear_zs(struct lp_setup_context *setup,
474 double depth,
475 unsigned stencil,
476 unsigned flags)
477 {
478 uint64_t zsmask = 0;
479 uint64_t zsvalue = 0;
480 uint32_t zmask32;
481 uint8_t smask8;
482 enum pipe_format format = setup->fb.zsbuf->format;
483
484 LP_DBG(DEBUG_SETUP, "%s state %d\n", __FUNCTION__, setup->state);
485
486 zmask32 = (flags & PIPE_CLEAR_DEPTH) ? ~0 : 0;
487 smask8 = (flags & PIPE_CLEAR_STENCIL) ? ~0 : 0;
488
489 zsvalue = util_pack64_z_stencil(format, depth, stencil);
490
491 zsmask = util_pack64_mask_z_stencil(format, zmask32, smask8);
492
493 zsvalue &= zsmask;
494
495 if (format == PIPE_FORMAT_Z24X8_UNORM ||
496 format == PIPE_FORMAT_X8Z24_UNORM) {
497 /*
498 * Make full mask if there's "X" bits so we can do full
499 * clear (without rmw).
500 */
501 uint32_t zsmask_full = 0;
502 zsmask_full = util_pack_mask_z_stencil(format, ~0, ~0);
503 zsmask |= ~zsmask_full;
504 }
505
506 if (setup->state == SETUP_ACTIVE) {
507 struct lp_scene *scene = setup->scene;
508
509 /* Add the clear to existing scene. In the unusual case where
510 * both color and depth-stencil are being cleared when there's
511 * already been some rendering, we could discard the currently
512 * binned scene and start again, but I don't see that as being
513 * a common usage.
514 */
515 if (!lp_scene_bin_everywhere(scene,
516 LP_RAST_OP_CLEAR_ZSTENCIL,
517 lp_rast_arg_clearzs(zsvalue, zsmask)))
518 return FALSE;
519 }
520 else {
521 /* Put ourselves into the 'pre-clear' state, specifically to try
522 * and accumulate multiple clears to color and depth_stencil
523 * buffers which the app or state-tracker might issue
524 * separately.
525 */
526 set_scene_state( setup, SETUP_CLEARED, __FUNCTION__ );
527
528 setup->clear.flags |= flags;
529
530 setup->clear.zsmask |= zsmask;
531 setup->clear.zsvalue =
532 (setup->clear.zsvalue & ~zsmask) | (zsvalue & zsmask);
533 }
534
535 return TRUE;
536 }
537
538 void
539 lp_setup_clear( struct lp_setup_context *setup,
540 const union pipe_color_union *color,
541 double depth,
542 unsigned stencil,
543 unsigned flags )
544 {
545 unsigned i;
546
547 /*
548 * Note any of these (max 9) clears could fail (but at most there should
549 * be just one failure!). This avoids doing the previous succeeded
550 * clears again (we still clear tiles twice if a clear command succeeded
551 * partially for one buffer).
552 */
553 if (flags & PIPE_CLEAR_DEPTHSTENCIL) {
554 unsigned flagszs = flags & PIPE_CLEAR_DEPTHSTENCIL;
555 if (!lp_setup_try_clear_zs(setup, depth, stencil, flagszs)) {
556 lp_setup_flush(setup, NULL, __FUNCTION__);
557
558 if (!lp_setup_try_clear_zs(setup, depth, stencil, flagszs))
559 assert(0);
560 }
561 }
562
563 if (flags & PIPE_CLEAR_COLOR) {
564 assert(PIPE_CLEAR_COLOR0 == (1 << 2));
565 for (i = 0; i < setup->fb.nr_cbufs; i++) {
566 if ((flags & (1 << (2 + i))) && setup->fb.cbufs[i]) {
567 if (!lp_setup_try_clear_color_buffer(setup, color, i)) {
568 lp_setup_flush(setup, NULL, __FUNCTION__);
569
570 if (!lp_setup_try_clear_color_buffer(setup, color, i))
571 assert(0);
572 }
573 }
574 }
575 }
576 }
577
578
579
580 void
581 lp_setup_set_triangle_state( struct lp_setup_context *setup,
582 unsigned cull_mode,
583 boolean ccw_is_frontface,
584 boolean scissor,
585 boolean half_pixel_center,
586 boolean bottom_edge_rule)
587 {
588 LP_DBG(DEBUG_SETUP, "%s\n", __FUNCTION__);
589
590 setup->ccw_is_frontface = ccw_is_frontface;
591 setup->cullmode = cull_mode;
592 setup->triangle = first_triangle;
593 setup->pixel_offset = half_pixel_center ? 0.5f : 0.0f;
594 setup->bottom_edge_rule = bottom_edge_rule;
595
596 if (setup->scissor_test != scissor) {
597 setup->dirty |= LP_SETUP_NEW_SCISSOR;
598 setup->scissor_test = scissor;
599 }
600 }
601
602 void
603 lp_setup_set_line_state( struct lp_setup_context *setup,
604 float line_width)
605 {
606 LP_DBG(DEBUG_SETUP, "%s\n", __FUNCTION__);
607
608 setup->line_width = line_width;
609 }
610
611 void
612 lp_setup_set_point_state( struct lp_setup_context *setup,
613 float point_size,
614 boolean point_size_per_vertex,
615 uint sprite_coord_enable,
616 uint sprite_coord_origin)
617 {
618 LP_DBG(DEBUG_SETUP, "%s\n", __FUNCTION__);
619
620 setup->point_size = point_size;
621 setup->sprite_coord_enable = sprite_coord_enable;
622 setup->sprite_coord_origin = sprite_coord_origin;
623 setup->point_size_per_vertex = point_size_per_vertex;
624 }
625
626 void
627 lp_setup_set_setup_variant( struct lp_setup_context *setup,
628 const struct lp_setup_variant *variant)
629 {
630 LP_DBG(DEBUG_SETUP, "%s\n", __FUNCTION__);
631
632 setup->setup.variant = variant;
633 }
634
635 void
636 lp_setup_set_fs_variant( struct lp_setup_context *setup,
637 struct lp_fragment_shader_variant *variant)
638 {
639 LP_DBG(DEBUG_SETUP, "%s %p\n", __FUNCTION__,
640 variant);
641 /* FIXME: reference count */
642
643 setup->fs.current.variant = variant;
644 setup->dirty |= LP_SETUP_NEW_FS;
645 }
646
647 void
648 lp_setup_set_fs_constants(struct lp_setup_context *setup,
649 unsigned num,
650 struct pipe_constant_buffer *buffers)
651 {
652 unsigned i;
653
654 LP_DBG(DEBUG_SETUP, "%s %p\n", __FUNCTION__, (void *) buffers);
655
656 assert(num <= ARRAY_SIZE(setup->constants));
657
658 for (i = 0; i < num; ++i) {
659 util_copy_constant_buffer(&setup->constants[i].current, &buffers[i]);
660 }
661 for (; i < ARRAY_SIZE(setup->constants); i++) {
662 util_copy_constant_buffer(&setup->constants[i].current, NULL);
663 }
664 setup->dirty |= LP_SETUP_NEW_CONSTANTS;
665 }
666
667 void
668 lp_setup_set_fs_ssbos(struct lp_setup_context *setup,
669 unsigned num,
670 struct pipe_shader_buffer *buffers)
671 {
672 unsigned i;
673
674 LP_DBG(DEBUG_SETUP, "%s %p\n", __FUNCTION__, (void *) buffers);
675
676 assert(num <= ARRAY_SIZE(setup->ssbos));
677
678 for (i = 0; i < num; ++i) {
679 util_copy_shader_buffer(&setup->ssbos[i].current, &buffers[i]);
680 }
681 for (; i < ARRAY_SIZE(setup->ssbos); i++) {
682 util_copy_shader_buffer(&setup->ssbos[i].current, NULL);
683 }
684 setup->dirty |= LP_SETUP_NEW_SSBOS;
685 }
686
687
688 void
689 lp_setup_set_alpha_ref_value( struct lp_setup_context *setup,
690 float alpha_ref_value )
691 {
692 LP_DBG(DEBUG_SETUP, "%s %f\n", __FUNCTION__, alpha_ref_value);
693
694 if(setup->fs.current.jit_context.alpha_ref_value != alpha_ref_value) {
695 setup->fs.current.jit_context.alpha_ref_value = alpha_ref_value;
696 setup->dirty |= LP_SETUP_NEW_FS;
697 }
698 }
699
700 void
701 lp_setup_set_stencil_ref_values( struct lp_setup_context *setup,
702 const ubyte refs[2] )
703 {
704 LP_DBG(DEBUG_SETUP, "%s %d %d\n", __FUNCTION__, refs[0], refs[1]);
705
706 if (setup->fs.current.jit_context.stencil_ref_front != refs[0] ||
707 setup->fs.current.jit_context.stencil_ref_back != refs[1]) {
708 setup->fs.current.jit_context.stencil_ref_front = refs[0];
709 setup->fs.current.jit_context.stencil_ref_back = refs[1];
710 setup->dirty |= LP_SETUP_NEW_FS;
711 }
712 }
713
714 void
715 lp_setup_set_blend_color( struct lp_setup_context *setup,
716 const struct pipe_blend_color *blend_color )
717 {
718 LP_DBG(DEBUG_SETUP, "%s\n", __FUNCTION__);
719
720 assert(blend_color);
721
722 if(memcmp(&setup->blend_color.current, blend_color, sizeof *blend_color) != 0) {
723 memcpy(&setup->blend_color.current, blend_color, sizeof *blend_color);
724 setup->dirty |= LP_SETUP_NEW_BLEND_COLOR;
725 }
726 }
727
728
729 void
730 lp_setup_set_scissors( struct lp_setup_context *setup,
731 const struct pipe_scissor_state *scissors )
732 {
733 unsigned i;
734 LP_DBG(DEBUG_SETUP, "%s\n", __FUNCTION__);
735
736 assert(scissors);
737
738 for (i = 0; i < PIPE_MAX_VIEWPORTS; ++i) {
739 setup->scissors[i].x0 = scissors[i].minx;
740 setup->scissors[i].x1 = scissors[i].maxx-1;
741 setup->scissors[i].y0 = scissors[i].miny;
742 setup->scissors[i].y1 = scissors[i].maxy-1;
743 }
744 setup->dirty |= LP_SETUP_NEW_SCISSOR;
745 }
746
747
748 void
749 lp_setup_set_flatshade_first(struct lp_setup_context *setup,
750 boolean flatshade_first)
751 {
752 setup->flatshade_first = flatshade_first;
753 }
754
755 void
756 lp_setup_set_rasterizer_discard(struct lp_setup_context *setup,
757 boolean rasterizer_discard)
758 {
759 if (setup->rasterizer_discard != rasterizer_discard) {
760 setup->rasterizer_discard = rasterizer_discard;
761 setup->line = first_line;
762 setup->point = first_point;
763 setup->triangle = first_triangle;
764 }
765 }
766
767 void
768 lp_setup_set_vertex_info(struct lp_setup_context *setup,
769 struct vertex_info *vertex_info)
770 {
771 /* XXX: just silently holding onto the pointer:
772 */
773 setup->vertex_info = vertex_info;
774 }
775
776
777 /**
778 * Called during state validation when LP_NEW_VIEWPORT is set.
779 */
780 void
781 lp_setup_set_viewports(struct lp_setup_context *setup,
782 unsigned num_viewports,
783 const struct pipe_viewport_state *viewports)
784 {
785 struct llvmpipe_context *lp = llvmpipe_context(setup->pipe);
786 unsigned i;
787
788 LP_DBG(DEBUG_SETUP, "%s\n", __FUNCTION__);
789
790 assert(num_viewports <= PIPE_MAX_VIEWPORTS);
791 assert(viewports);
792
793 /*
794 * For use in lp_state_fs.c, propagate the viewport values for all viewports.
795 */
796 for (i = 0; i < num_viewports; i++) {
797 float min_depth;
798 float max_depth;
799 util_viewport_zmin_zmax(&viewports[i], lp->rasterizer->clip_halfz,
800 &min_depth, &max_depth);
801
802 if (setup->viewports[i].min_depth != min_depth ||
803 setup->viewports[i].max_depth != max_depth) {
804 setup->viewports[i].min_depth = min_depth;
805 setup->viewports[i].max_depth = max_depth;
806 setup->dirty |= LP_SETUP_NEW_VIEWPORTS;
807 }
808 }
809 }
810
811
812 /**
813 * Called during state validation when LP_NEW_SAMPLER_VIEW is set.
814 */
815 void
816 lp_setup_set_fragment_sampler_views(struct lp_setup_context *setup,
817 unsigned num,
818 struct pipe_sampler_view **views)
819 {
820 unsigned i, max_tex_num;
821
822 LP_DBG(DEBUG_SETUP, "%s\n", __FUNCTION__);
823
824 assert(num <= PIPE_MAX_SHADER_SAMPLER_VIEWS);
825
826 max_tex_num = MAX2(num, setup->fs.current_tex_num);
827
828 for (i = 0; i < max_tex_num; i++) {
829 struct pipe_sampler_view *view = i < num ? views[i] : NULL;
830
831 if (view) {
832 struct pipe_resource *res = view->texture;
833 struct llvmpipe_resource *lp_tex = llvmpipe_resource(res);
834 struct lp_jit_texture *jit_tex;
835 jit_tex = &setup->fs.current.jit_context.textures[i];
836
837 /* We're referencing the texture's internal data, so save a
838 * reference to it.
839 */
840 pipe_resource_reference(&setup->fs.current_tex[i], res);
841
842 if (!lp_tex->dt) {
843 /* regular texture - setup array of mipmap level offsets */
844 int j;
845 unsigned first_level = 0;
846 unsigned last_level = 0;
847
848 if (llvmpipe_resource_is_texture(res)) {
849 first_level = view->u.tex.first_level;
850 last_level = view->u.tex.last_level;
851 assert(first_level <= last_level);
852 assert(last_level <= res->last_level);
853 jit_tex->base = lp_tex->tex_data;
854 }
855 else {
856 jit_tex->base = lp_tex->data;
857 }
858
859 if (LP_PERF & PERF_TEX_MEM) {
860 /* use dummy tile memory */
861 jit_tex->base = lp_dummy_tile;
862 jit_tex->width = TILE_SIZE/8;
863 jit_tex->height = TILE_SIZE/8;
864 jit_tex->depth = 1;
865 jit_tex->first_level = 0;
866 jit_tex->last_level = 0;
867 jit_tex->mip_offsets[0] = 0;
868 jit_tex->row_stride[0] = 0;
869 jit_tex->img_stride[0] = 0;
870 }
871 else {
872 jit_tex->width = res->width0;
873 jit_tex->height = res->height0;
874 jit_tex->depth = res->depth0;
875 jit_tex->first_level = first_level;
876 jit_tex->last_level = last_level;
877
878 if (llvmpipe_resource_is_texture(res)) {
879 for (j = first_level; j <= last_level; j++) {
880 jit_tex->mip_offsets[j] = lp_tex->mip_offsets[j];
881 jit_tex->row_stride[j] = lp_tex->row_stride[j];
882 jit_tex->img_stride[j] = lp_tex->img_stride[j];
883 }
884
885 if (res->target == PIPE_TEXTURE_1D_ARRAY ||
886 res->target == PIPE_TEXTURE_2D_ARRAY ||
887 res->target == PIPE_TEXTURE_CUBE ||
888 res->target == PIPE_TEXTURE_CUBE_ARRAY) {
889 /*
890 * For array textures, we don't have first_layer, instead
891 * adjust last_layer (stored as depth) plus the mip level offsets
892 * (as we have mip-first layout can't just adjust base ptr).
893 * XXX For mip levels, could do something similar.
894 */
895 jit_tex->depth = view->u.tex.last_layer - view->u.tex.first_layer + 1;
896 for (j = first_level; j <= last_level; j++) {
897 jit_tex->mip_offsets[j] += view->u.tex.first_layer *
898 lp_tex->img_stride[j];
899 }
900 if (view->target == PIPE_TEXTURE_CUBE ||
901 view->target == PIPE_TEXTURE_CUBE_ARRAY) {
902 assert(jit_tex->depth % 6 == 0);
903 }
904 assert(view->u.tex.first_layer <= view->u.tex.last_layer);
905 assert(view->u.tex.last_layer < res->array_size);
906 }
907 }
908 else {
909 /*
910 * For buffers, we don't have "offset", instead adjust
911 * the size (stored as width) plus the base pointer.
912 */
913 unsigned view_blocksize = util_format_get_blocksize(view->format);
914 /* probably don't really need to fill that out */
915 jit_tex->mip_offsets[0] = 0;
916 jit_tex->row_stride[0] = 0;
917 jit_tex->img_stride[0] = 0;
918
919 /* everything specified in number of elements here. */
920 jit_tex->width = view->u.buf.size / view_blocksize;
921 jit_tex->base = (uint8_t *)jit_tex->base + view->u.buf.offset;
922 /* XXX Unsure if we need to sanitize parameters? */
923 assert(view->u.buf.offset + view->u.buf.size <= res->width0);
924 }
925 }
926 }
927 else {
928 /* display target texture/surface */
929 /*
930 * XXX: Where should this be unmapped?
931 */
932 struct llvmpipe_screen *screen = llvmpipe_screen(res->screen);
933 struct sw_winsys *winsys = screen->winsys;
934 jit_tex->base = winsys->displaytarget_map(winsys, lp_tex->dt,
935 PIPE_TRANSFER_READ);
936 jit_tex->row_stride[0] = lp_tex->row_stride[0];
937 jit_tex->img_stride[0] = lp_tex->img_stride[0];
938 jit_tex->mip_offsets[0] = 0;
939 jit_tex->width = res->width0;
940 jit_tex->height = res->height0;
941 jit_tex->depth = res->depth0;
942 jit_tex->first_level = jit_tex->last_level = 0;
943 assert(jit_tex->base);
944 }
945 }
946 else {
947 pipe_resource_reference(&setup->fs.current_tex[i], NULL);
948 }
949 }
950 setup->fs.current_tex_num = num;
951
952 setup->dirty |= LP_SETUP_NEW_FS;
953 }
954
955
956 /**
957 * Called during state validation when LP_NEW_SAMPLER is set.
958 */
959 void
960 lp_setup_set_fragment_sampler_state(struct lp_setup_context *setup,
961 unsigned num,
962 struct pipe_sampler_state **samplers)
963 {
964 unsigned i;
965
966 LP_DBG(DEBUG_SETUP, "%s\n", __FUNCTION__);
967
968 assert(num <= PIPE_MAX_SAMPLERS);
969
970 for (i = 0; i < PIPE_MAX_SAMPLERS; i++) {
971 const struct pipe_sampler_state *sampler = i < num ? samplers[i] : NULL;
972
973 if (sampler) {
974 struct lp_jit_sampler *jit_sam;
975 jit_sam = &setup->fs.current.jit_context.samplers[i];
976
977 jit_sam->min_lod = sampler->min_lod;
978 jit_sam->max_lod = sampler->max_lod;
979 jit_sam->lod_bias = sampler->lod_bias;
980 COPY_4V(jit_sam->border_color, sampler->border_color.f);
981 }
982 }
983
984 setup->dirty |= LP_SETUP_NEW_FS;
985 }
986
987
988 /**
989 * Is the given texture referenced by any scene?
990 * Note: we have to check all scenes including any scenes currently
991 * being rendered and the current scene being built.
992 */
993 unsigned
994 lp_setup_is_resource_referenced( const struct lp_setup_context *setup,
995 const struct pipe_resource *texture )
996 {
997 unsigned i;
998
999 /* check the render targets */
1000 for (i = 0; i < setup->fb.nr_cbufs; i++) {
1001 if (setup->fb.cbufs[i] && setup->fb.cbufs[i]->texture == texture)
1002 return LP_REFERENCED_FOR_READ | LP_REFERENCED_FOR_WRITE;
1003 }
1004 if (setup->fb.zsbuf && setup->fb.zsbuf->texture == texture) {
1005 return LP_REFERENCED_FOR_READ | LP_REFERENCED_FOR_WRITE;
1006 }
1007
1008 /* check textures referenced by the scene */
1009 for (i = 0; i < ARRAY_SIZE(setup->scenes); i++) {
1010 if (lp_scene_is_resource_referenced(setup->scenes[i], texture)) {
1011 return LP_REFERENCED_FOR_READ;
1012 }
1013 }
1014
1015 for (i = 0; i < ARRAY_SIZE(setup->ssbos); i++) {
1016 if (setup->ssbos[i].current.buffer == texture)
1017 return LP_REFERENCED_FOR_READ | LP_REFERENCED_FOR_WRITE;
1018 }
1019
1020 return LP_UNREFERENCED;
1021 }
1022
1023
1024 /**
1025 * Called by vbuf code when we're about to draw something.
1026 *
1027 * This function stores all dirty state in the current scene's display list
1028 * memory, via lp_scene_alloc(). We can not pass pointers of mutable state to
1029 * the JIT functions, as the JIT functions will be called later on, most likely
1030 * on a different thread.
1031 *
1032 * When processing dirty state it is imperative that we don't refer to any
1033 * pointers previously allocated with lp_scene_alloc() in this function (or any
1034 * function) as they may belong to a scene freed since then.
1035 */
1036 static boolean
1037 try_update_scene_state( struct lp_setup_context *setup )
1038 {
1039 static const float fake_const_buf[4];
1040 boolean new_scene = (setup->fs.stored == NULL);
1041 struct lp_scene *scene = setup->scene;
1042 unsigned i;
1043
1044 assert(scene);
1045
1046 if (setup->dirty & LP_SETUP_NEW_VIEWPORTS) {
1047 /*
1048 * Record new depth range state for changes due to viewport updates.
1049 *
1050 * TODO: Collapse the existing viewport and depth range information
1051 * into one structure, for access by JIT.
1052 */
1053 struct lp_jit_viewport *stored;
1054
1055 stored = (struct lp_jit_viewport *)
1056 lp_scene_alloc(scene, sizeof setup->viewports);
1057
1058 if (!stored) {
1059 assert(!new_scene);
1060 return FALSE;
1061 }
1062
1063 memcpy(stored, setup->viewports, sizeof setup->viewports);
1064
1065 setup->fs.current.jit_context.viewports = stored;
1066 setup->dirty |= LP_SETUP_NEW_FS;
1067 }
1068
1069 if(setup->dirty & LP_SETUP_NEW_BLEND_COLOR) {
1070 uint8_t *stored;
1071 float* fstored;
1072 unsigned i, j;
1073 unsigned size;
1074
1075 /* Alloc u8_blend_color (16 x i8) and f_blend_color (4 or 8 x f32) */
1076 size = 4 * 16 * sizeof(uint8_t);
1077 size += (LP_MAX_VECTOR_LENGTH / 4) * sizeof(float);
1078 stored = lp_scene_alloc_aligned(scene, size, LP_MIN_VECTOR_ALIGN);
1079
1080 if (!stored) {
1081 assert(!new_scene);
1082 return FALSE;
1083 }
1084
1085 /* Store floating point colour */
1086 fstored = (float*)(stored + 4*16);
1087 for (i = 0; i < (LP_MAX_VECTOR_LENGTH / 4); ++i) {
1088 fstored[i] = setup->blend_color.current.color[i % 4];
1089 }
1090
1091 /* smear each blend color component across 16 ubyte elements */
1092 for (i = 0; i < 4; ++i) {
1093 uint8_t c = float_to_ubyte(setup->blend_color.current.color[i]);
1094 for (j = 0; j < 16; ++j)
1095 stored[i*16 + j] = c;
1096 }
1097
1098 setup->blend_color.stored = stored;
1099 setup->fs.current.jit_context.u8_blend_color = stored;
1100 setup->fs.current.jit_context.f_blend_color = fstored;
1101 setup->dirty |= LP_SETUP_NEW_FS;
1102 }
1103
1104 if (setup->dirty & LP_SETUP_NEW_CONSTANTS) {
1105 for (i = 0; i < ARRAY_SIZE(setup->constants); ++i) {
1106 struct pipe_resource *buffer = setup->constants[i].current.buffer;
1107 const unsigned current_size = MIN2(setup->constants[i].current.buffer_size,
1108 LP_MAX_TGSI_CONST_BUFFER_SIZE);
1109 const ubyte *current_data = NULL;
1110 int num_constants;
1111
1112 STATIC_ASSERT(DATA_BLOCK_SIZE >= LP_MAX_TGSI_CONST_BUFFER_SIZE);
1113
1114 if (buffer) {
1115 /* resource buffer */
1116 current_data = (ubyte *) llvmpipe_resource_data(buffer);
1117 }
1118 else if (setup->constants[i].current.user_buffer) {
1119 /* user-space buffer */
1120 current_data = (ubyte *) setup->constants[i].current.user_buffer;
1121 }
1122
1123 if (current_data) {
1124 current_data += setup->constants[i].current.buffer_offset;
1125
1126 /* TODO: copy only the actually used constants? */
1127
1128 if (setup->constants[i].stored_size != current_size ||
1129 !setup->constants[i].stored_data ||
1130 memcmp(setup->constants[i].stored_data,
1131 current_data,
1132 current_size) != 0) {
1133 void *stored;
1134
1135 stored = lp_scene_alloc(scene, current_size);
1136 if (!stored) {
1137 assert(!new_scene);
1138 return FALSE;
1139 }
1140
1141 memcpy(stored,
1142 current_data,
1143 current_size);
1144 setup->constants[i].stored_size = current_size;
1145 setup->constants[i].stored_data = stored;
1146 }
1147 setup->fs.current.jit_context.constants[i] =
1148 setup->constants[i].stored_data;
1149 }
1150 else {
1151 setup->constants[i].stored_size = 0;
1152 setup->constants[i].stored_data = NULL;
1153 setup->fs.current.jit_context.constants[i] = fake_const_buf;
1154 }
1155
1156 num_constants =
1157 setup->constants[i].stored_size / (sizeof(float) * 4);
1158 setup->fs.current.jit_context.num_constants[i] = num_constants;
1159 setup->dirty |= LP_SETUP_NEW_FS;
1160 }
1161 }
1162
1163 if (setup->dirty & LP_SETUP_NEW_SSBOS) {
1164 for (i = 0; i < ARRAY_SIZE(setup->ssbos); ++i) {
1165 struct pipe_resource *buffer = setup->ssbos[i].current.buffer;
1166 const ubyte *current_data = NULL;
1167
1168 if (!buffer)
1169 continue;
1170 /* resource buffer */
1171 current_data = (ubyte *) llvmpipe_resource_data(buffer);
1172 if (current_data) {
1173 current_data += setup->ssbos[i].current.buffer_offset;
1174
1175 setup->fs.current.jit_context.ssbos[i] = (const uint32_t *)current_data;
1176 setup->fs.current.jit_context.num_ssbos[i] = setup->ssbos[i].current.buffer_size;
1177 } else {
1178 setup->fs.current.jit_context.ssbos[i] = NULL;
1179 setup->fs.current.jit_context.num_ssbos[i] = 0;
1180 }
1181 setup->dirty |= LP_SETUP_NEW_FS;
1182 }
1183 }
1184 if (setup->dirty & LP_SETUP_NEW_FS) {
1185 if (!setup->fs.stored ||
1186 memcmp(setup->fs.stored,
1187 &setup->fs.current,
1188 sizeof setup->fs.current) != 0)
1189 {
1190 struct lp_rast_state *stored;
1191
1192 /* The fs state that's been stored in the scene is different from
1193 * the new, current state. So allocate a new lp_rast_state object
1194 * and append it to the bin's setup data buffer.
1195 */
1196 stored = (struct lp_rast_state *) lp_scene_alloc(scene, sizeof *stored);
1197 if (!stored) {
1198 assert(!new_scene);
1199 return FALSE;
1200 }
1201
1202 memcpy(stored,
1203 &setup->fs.current,
1204 sizeof setup->fs.current);
1205 setup->fs.stored = stored;
1206
1207 /* The scene now references the textures in the rasterization
1208 * state record. Note that now.
1209 */
1210 for (i = 0; i < ARRAY_SIZE(setup->fs.current_tex); i++) {
1211 if (setup->fs.current_tex[i]) {
1212 if (!lp_scene_add_resource_reference(scene,
1213 setup->fs.current_tex[i],
1214 new_scene)) {
1215 assert(!new_scene);
1216 return FALSE;
1217 }
1218 }
1219 }
1220 }
1221 }
1222
1223 if (setup->dirty & LP_SETUP_NEW_SCISSOR) {
1224 unsigned i;
1225 for (i = 0; i < PIPE_MAX_VIEWPORTS; ++i) {
1226 setup->draw_regions[i] = setup->framebuffer;
1227 if (setup->scissor_test) {
1228 u_rect_possible_intersection(&setup->scissors[i],
1229 &setup->draw_regions[i]);
1230 }
1231 }
1232 }
1233
1234 setup->dirty = 0;
1235
1236 assert(setup->fs.stored);
1237 return TRUE;
1238 }
1239
1240 boolean
1241 lp_setup_update_state( struct lp_setup_context *setup,
1242 boolean update_scene )
1243 {
1244 /* Some of the 'draw' pipeline stages may have changed some driver state.
1245 * Make sure we've processed those state changes before anything else.
1246 *
1247 * XXX this is the only place where llvmpipe_context is used in the
1248 * setup code. This may get refactored/changed...
1249 */
1250 {
1251 struct llvmpipe_context *lp = llvmpipe_context(setup->pipe);
1252 if (lp->dirty) {
1253 llvmpipe_update_derived(lp);
1254 }
1255
1256 if (lp->setup->dirty) {
1257 llvmpipe_update_setup(lp);
1258 }
1259
1260 assert(setup->setup.variant);
1261
1262 /* Will probably need to move this somewhere else, just need
1263 * to know about vertex shader point size attribute.
1264 */
1265 setup->psize_slot = lp->psize_slot;
1266 setup->viewport_index_slot = lp->viewport_index_slot;
1267 setup->layer_slot = lp->layer_slot;
1268 setup->face_slot = lp->face_slot;
1269
1270 assert(lp->dirty == 0);
1271
1272 assert(lp->setup_variant.key.size ==
1273 setup->setup.variant->key.size);
1274
1275 assert(memcmp(&lp->setup_variant.key,
1276 &setup->setup.variant->key,
1277 setup->setup.variant->key.size) == 0);
1278 }
1279
1280 if (update_scene && setup->state != SETUP_ACTIVE) {
1281 if (!set_scene_state( setup, SETUP_ACTIVE, __FUNCTION__ ))
1282 return FALSE;
1283 }
1284
1285 /* Only call into update_scene_state() if we already have a
1286 * scene:
1287 */
1288 if (update_scene && setup->scene) {
1289 assert(setup->state == SETUP_ACTIVE);
1290
1291 if (try_update_scene_state(setup))
1292 return TRUE;
1293
1294 /* Update failed, try to restart the scene.
1295 *
1296 * Cannot call lp_setup_flush_and_restart() directly here
1297 * because of potential recursion.
1298 */
1299 if (!set_scene_state(setup, SETUP_FLUSHED, __FUNCTION__))
1300 return FALSE;
1301
1302 if (!set_scene_state(setup, SETUP_ACTIVE, __FUNCTION__))
1303 return FALSE;
1304
1305 if (!setup->scene)
1306 return FALSE;
1307
1308 return try_update_scene_state(setup);
1309 }
1310
1311 return TRUE;
1312 }
1313
1314
1315
1316 /* Only caller is lp_setup_vbuf_destroy()
1317 */
1318 void
1319 lp_setup_destroy( struct lp_setup_context *setup )
1320 {
1321 uint i;
1322
1323 lp_setup_reset( setup );
1324
1325 util_unreference_framebuffer_state(&setup->fb);
1326
1327 for (i = 0; i < ARRAY_SIZE(setup->fs.current_tex); i++) {
1328 pipe_resource_reference(&setup->fs.current_tex[i], NULL);
1329 }
1330
1331 for (i = 0; i < ARRAY_SIZE(setup->constants); i++) {
1332 pipe_resource_reference(&setup->constants[i].current.buffer, NULL);
1333 }
1334
1335 for (i = 0; i < ARRAY_SIZE(setup->ssbos); i++) {
1336 pipe_resource_reference(&setup->ssbos[i].current.buffer, NULL);
1337 }
1338
1339 /* free the scenes in the 'empty' queue */
1340 for (i = 0; i < ARRAY_SIZE(setup->scenes); i++) {
1341 struct lp_scene *scene = setup->scenes[i];
1342
1343 if (scene->fence)
1344 lp_fence_wait(scene->fence);
1345
1346 lp_scene_destroy(scene);
1347 }
1348
1349 lp_fence_reference(&setup->last_fence, NULL);
1350
1351 FREE( setup );
1352 }
1353
1354
1355 /**
1356 * Create a new primitive tiling engine. Plug it into the backend of
1357 * the draw module. Currently also creates a rasterizer to use with
1358 * it.
1359 */
1360 struct lp_setup_context *
1361 lp_setup_create( struct pipe_context *pipe,
1362 struct draw_context *draw )
1363 {
1364 struct llvmpipe_screen *screen = llvmpipe_screen(pipe->screen);
1365 struct lp_setup_context *setup;
1366 unsigned i;
1367
1368 setup = CALLOC_STRUCT(lp_setup_context);
1369 if (!setup) {
1370 goto no_setup;
1371 }
1372
1373 lp_setup_init_vbuf(setup);
1374
1375 /* Used only in update_state():
1376 */
1377 setup->pipe = pipe;
1378
1379
1380 setup->num_threads = screen->num_threads;
1381 setup->vbuf = draw_vbuf_stage(draw, &setup->base);
1382 if (!setup->vbuf) {
1383 goto no_vbuf;
1384 }
1385
1386 draw_set_rasterize_stage(draw, setup->vbuf);
1387 draw_set_render(draw, &setup->base);
1388
1389 /* create some empty scenes */
1390 for (i = 0; i < MAX_SCENES; i++) {
1391 setup->scenes[i] = lp_scene_create( pipe );
1392 if (!setup->scenes[i]) {
1393 goto no_scenes;
1394 }
1395 }
1396
1397 setup->triangle = first_triangle;
1398 setup->line = first_line;
1399 setup->point = first_point;
1400
1401 setup->dirty = ~0;
1402
1403 /* Initialize empty default fb correctly, so the rect is empty */
1404 setup->framebuffer.x1 = -1;
1405 setup->framebuffer.y1 = -1;
1406
1407 return setup;
1408
1409 no_scenes:
1410 for (i = 0; i < MAX_SCENES; i++) {
1411 if (setup->scenes[i]) {
1412 lp_scene_destroy(setup->scenes[i]);
1413 }
1414 }
1415
1416 setup->vbuf->destroy(setup->vbuf);
1417 no_vbuf:
1418 FREE(setup);
1419 no_setup:
1420 return NULL;
1421 }
1422
1423
1424 /**
1425 * Put a BeginQuery command into all bins.
1426 */
1427 void
1428 lp_setup_begin_query(struct lp_setup_context *setup,
1429 struct llvmpipe_query *pq)
1430 {
1431
1432 set_scene_state(setup, SETUP_ACTIVE, "begin_query");
1433
1434 if (!(pq->type == PIPE_QUERY_OCCLUSION_COUNTER ||
1435 pq->type == PIPE_QUERY_OCCLUSION_PREDICATE ||
1436 pq->type == PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE ||
1437 pq->type == PIPE_QUERY_PIPELINE_STATISTICS))
1438 return;
1439
1440 /* init the query to its beginning state */
1441 assert(setup->active_binned_queries < LP_MAX_ACTIVE_BINNED_QUERIES);
1442 /* exceeding list size so just ignore the query */
1443 if (setup->active_binned_queries >= LP_MAX_ACTIVE_BINNED_QUERIES) {
1444 return;
1445 }
1446 assert(setup->active_queries[setup->active_binned_queries] == NULL);
1447 setup->active_queries[setup->active_binned_queries] = pq;
1448 setup->active_binned_queries++;
1449
1450 assert(setup->scene);
1451 if (setup->scene) {
1452 if (!lp_scene_bin_everywhere(setup->scene,
1453 LP_RAST_OP_BEGIN_QUERY,
1454 lp_rast_arg_query(pq))) {
1455
1456 if (!lp_setup_flush_and_restart(setup))
1457 return;
1458
1459 if (!lp_scene_bin_everywhere(setup->scene,
1460 LP_RAST_OP_BEGIN_QUERY,
1461 lp_rast_arg_query(pq))) {
1462 return;
1463 }
1464 }
1465 setup->scene->had_queries |= TRUE;
1466 }
1467 }
1468
1469
1470 /**
1471 * Put an EndQuery command into all bins.
1472 */
1473 void
1474 lp_setup_end_query(struct lp_setup_context *setup, struct llvmpipe_query *pq)
1475 {
1476 set_scene_state(setup, SETUP_ACTIVE, "end_query");
1477
1478 assert(setup->scene);
1479 if (setup->scene) {
1480 /* pq->fence should be the fence of the *last* scene which
1481 * contributed to the query result.
1482 */
1483 lp_fence_reference(&pq->fence, setup->scene->fence);
1484
1485 if (pq->type == PIPE_QUERY_OCCLUSION_COUNTER ||
1486 pq->type == PIPE_QUERY_OCCLUSION_PREDICATE ||
1487 pq->type == PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE ||
1488 pq->type == PIPE_QUERY_PIPELINE_STATISTICS ||
1489 pq->type == PIPE_QUERY_TIMESTAMP) {
1490 if (pq->type == PIPE_QUERY_TIMESTAMP &&
1491 !(setup->scene->tiles_x | setup->scene->tiles_y)) {
1492 /*
1493 * If there's a zero width/height framebuffer, there's no bins and
1494 * hence no rast task is ever run. So fill in something here instead.
1495 */
1496 pq->end[0] = os_time_get_nano();
1497 }
1498
1499 if (!lp_scene_bin_everywhere(setup->scene,
1500 LP_RAST_OP_END_QUERY,
1501 lp_rast_arg_query(pq))) {
1502 if (!lp_setup_flush_and_restart(setup))
1503 goto fail;
1504
1505 if (!lp_scene_bin_everywhere(setup->scene,
1506 LP_RAST_OP_END_QUERY,
1507 lp_rast_arg_query(pq))) {
1508 goto fail;
1509 }
1510 }
1511 setup->scene->had_queries |= TRUE;
1512 }
1513 }
1514 else {
1515 lp_fence_reference(&pq->fence, setup->last_fence);
1516 }
1517
1518 fail:
1519 /* Need to do this now not earlier since it still needs to be marked as
1520 * active when binning it would cause a flush.
1521 */
1522 if (pq->type == PIPE_QUERY_OCCLUSION_COUNTER ||
1523 pq->type == PIPE_QUERY_OCCLUSION_PREDICATE ||
1524 pq->type == PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE ||
1525 pq->type == PIPE_QUERY_PIPELINE_STATISTICS) {
1526 unsigned i;
1527
1528 /* remove from active binned query list */
1529 for (i = 0; i < setup->active_binned_queries; i++) {
1530 if (setup->active_queries[i] == pq)
1531 break;
1532 }
1533 assert(i < setup->active_binned_queries);
1534 if (i == setup->active_binned_queries)
1535 return;
1536 setup->active_binned_queries--;
1537 setup->active_queries[i] = setup->active_queries[setup->active_binned_queries];
1538 setup->active_queries[setup->active_binned_queries] = NULL;
1539 }
1540 }
1541
1542
1543 boolean
1544 lp_setup_flush_and_restart(struct lp_setup_context *setup)
1545 {
1546 if (0) debug_printf("%s\n", __FUNCTION__);
1547
1548 assert(setup->state == SETUP_ACTIVE);
1549
1550 if (!set_scene_state(setup, SETUP_FLUSHED, __FUNCTION__))
1551 return FALSE;
1552
1553 if (!lp_setup_update_state(setup, TRUE))
1554 return FALSE;
1555
1556 return TRUE;
1557 }
1558
1559