gallium: change comments to remove 'state tracker'
[mesa.git] / src / gallium / drivers / llvmpipe / lp_setup.c
1 /**************************************************************************
2 *
3 * Copyright 2007 VMware, Inc.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28 /**
29 * Tiling engine.
30 *
31 * Builds per-tile display lists and executes them on calls to
32 * lp_setup_flush().
33 */
34
35 #include <limits.h>
36
37 #include "pipe/p_defines.h"
38 #include "util/u_framebuffer.h"
39 #include "util/u_inlines.h"
40 #include "util/u_memory.h"
41 #include "util/u_pack_color.h"
42 #include "util/u_viewport.h"
43 #include "draw/draw_pipe.h"
44 #include "util/os_time.h"
45 #include "lp_context.h"
46 #include "lp_memory.h"
47 #include "lp_scene.h"
48 #include "lp_texture.h"
49 #include "lp_debug.h"
50 #include "lp_fence.h"
51 #include "lp_query.h"
52 #include "lp_rast.h"
53 #include "lp_setup_context.h"
54 #include "lp_screen.h"
55 #include "lp_state.h"
56 #include "frontend/sw_winsys.h"
57
58 #include "draw/draw_context.h"
59 #include "draw/draw_vbuf.h"
60
61
62 static boolean set_scene_state( struct lp_setup_context *, enum setup_state,
63 const char *reason);
64 static boolean try_update_scene_state( struct lp_setup_context *setup );
65
66
67 static void
68 lp_setup_get_empty_scene(struct lp_setup_context *setup)
69 {
70 assert(setup->scene == NULL);
71
72 setup->scene_idx++;
73 setup->scene_idx %= ARRAY_SIZE(setup->scenes);
74
75 setup->scene = setup->scenes[setup->scene_idx];
76
77 if (setup->scene->fence) {
78 if (LP_DEBUG & DEBUG_SETUP)
79 debug_printf("%s: wait for scene %d\n",
80 __FUNCTION__, setup->scene->fence->id);
81
82 lp_fence_wait(setup->scene->fence);
83 }
84
85 lp_scene_begin_binning(setup->scene, &setup->fb);
86
87 }
88
89
90 static void
91 first_triangle( struct lp_setup_context *setup,
92 const float (*v0)[4],
93 const float (*v1)[4],
94 const float (*v2)[4])
95 {
96 assert(setup->state == SETUP_ACTIVE);
97 lp_setup_choose_triangle( setup );
98 setup->triangle( setup, v0, v1, v2 );
99 }
100
101 static void
102 first_line( struct lp_setup_context *setup,
103 const float (*v0)[4],
104 const float (*v1)[4])
105 {
106 assert(setup->state == SETUP_ACTIVE);
107 lp_setup_choose_line( setup );
108 setup->line( setup, v0, v1 );
109 }
110
111 static void
112 first_point( struct lp_setup_context *setup,
113 const float (*v0)[4])
114 {
115 assert(setup->state == SETUP_ACTIVE);
116 lp_setup_choose_point( setup );
117 setup->point( setup, v0 );
118 }
119
120 void lp_setup_reset( struct lp_setup_context *setup )
121 {
122 unsigned i;
123
124 LP_DBG(DEBUG_SETUP, "%s\n", __FUNCTION__);
125
126 /* Reset derived state */
127 for (i = 0; i < ARRAY_SIZE(setup->constants); ++i) {
128 setup->constants[i].stored_size = 0;
129 setup->constants[i].stored_data = NULL;
130 }
131 setup->fs.stored = NULL;
132 setup->dirty = ~0;
133
134 /* no current bin */
135 setup->scene = NULL;
136
137 /* Reset some state:
138 */
139 memset(&setup->clear, 0, sizeof setup->clear);
140
141 /* Have an explicit "start-binning" call and get rid of this
142 * pointer twiddling?
143 */
144 setup->line = first_line;
145 setup->point = first_point;
146 setup->triangle = first_triangle;
147 }
148
149
150 /** Rasterize all scene's bins */
151 static void
152 lp_setup_rasterize_scene( struct lp_setup_context *setup )
153 {
154 struct lp_scene *scene = setup->scene;
155 struct llvmpipe_screen *screen = llvmpipe_screen(scene->pipe->screen);
156
157 scene->num_active_queries = setup->active_binned_queries;
158 memcpy(scene->active_queries, setup->active_queries,
159 scene->num_active_queries * sizeof(scene->active_queries[0]));
160
161 lp_scene_end_binning(scene);
162
163 lp_fence_reference(&setup->last_fence, scene->fence);
164
165 if (setup->last_fence)
166 setup->last_fence->issued = TRUE;
167
168 mtx_lock(&screen->rast_mutex);
169
170 /* FIXME: We enqueue the scene then wait on the rasterizer to finish.
171 * This means we never actually run any vertex stuff in parallel to
172 * rasterization (not in the same context at least) which is what the
173 * multiple scenes per setup is about - when we get a new empty scene
174 * any old one is already empty again because we waited here for
175 * raster tasks to be finished. Ideally, we shouldn't need to wait here
176 * and rely on fences elsewhere when waiting is necessary.
177 * Certainly, lp_scene_end_rasterization() would need to be deferred too
178 * and there's probably other bits why this doesn't actually work.
179 */
180 lp_rast_queue_scene(screen->rast, scene);
181 lp_rast_finish(screen->rast);
182 mtx_unlock(&screen->rast_mutex);
183
184 lp_scene_end_rasterization(setup->scene);
185 lp_setup_reset( setup );
186
187 LP_DBG(DEBUG_SETUP, "%s done \n", __FUNCTION__);
188 }
189
190
191
192 static boolean
193 begin_binning( struct lp_setup_context *setup )
194 {
195 struct lp_scene *scene = setup->scene;
196 boolean need_zsload = FALSE;
197 boolean ok;
198
199 assert(scene);
200 assert(scene->fence == NULL);
201
202 /* Always create a fence:
203 */
204 scene->fence = lp_fence_create(MAX2(1, setup->num_threads));
205 if (!scene->fence)
206 return FALSE;
207
208 ok = try_update_scene_state(setup);
209 if (!ok)
210 return FALSE;
211
212 if (setup->fb.zsbuf &&
213 ((setup->clear.flags & PIPE_CLEAR_DEPTHSTENCIL) != PIPE_CLEAR_DEPTHSTENCIL) &&
214 util_format_is_depth_and_stencil(setup->fb.zsbuf->format))
215 need_zsload = TRUE;
216
217 LP_DBG(DEBUG_SETUP, "%s color clear bufs: %x depth: %s\n", __FUNCTION__,
218 setup->clear.flags >> 2,
219 need_zsload ? "clear": "load");
220
221 if (setup->clear.flags & PIPE_CLEAR_COLOR) {
222 unsigned cbuf;
223 for (cbuf = 0; cbuf < setup->fb.nr_cbufs; cbuf++) {
224 assert(PIPE_CLEAR_COLOR0 == 1 << 2);
225 if (setup->clear.flags & (1 << (2 + cbuf))) {
226 union lp_rast_cmd_arg clearrb_arg;
227 struct lp_rast_clear_rb *cc_scene =
228 (struct lp_rast_clear_rb *)
229 lp_scene_alloc(scene, sizeof(struct lp_rast_clear_rb));
230
231 if (!cc_scene) {
232 return FALSE;
233 }
234
235 cc_scene->cbuf = cbuf;
236 cc_scene->color_val = setup->clear.color_val[cbuf];
237 clearrb_arg.clear_rb = cc_scene;
238
239 if (!lp_scene_bin_everywhere(scene,
240 LP_RAST_OP_CLEAR_COLOR,
241 clearrb_arg))
242 return FALSE;
243 }
244 }
245 }
246
247 if (setup->fb.zsbuf) {
248 if (setup->clear.flags & PIPE_CLEAR_DEPTHSTENCIL) {
249 ok = lp_scene_bin_everywhere( scene,
250 LP_RAST_OP_CLEAR_ZSTENCIL,
251 lp_rast_arg_clearzs(
252 setup->clear.zsvalue,
253 setup->clear.zsmask));
254 if (!ok)
255 return FALSE;
256 }
257 }
258
259 setup->clear.flags = 0;
260 setup->clear.zsmask = 0;
261 setup->clear.zsvalue = 0;
262
263 scene->had_queries = !!setup->active_binned_queries;
264
265 LP_DBG(DEBUG_SETUP, "%s done\n", __FUNCTION__);
266 return TRUE;
267 }
268
269
270 /* This basically bins and then flushes any outstanding full-screen
271 * clears.
272 *
273 * TODO: fast path for fullscreen clears and no triangles.
274 */
275 static boolean
276 execute_clears( struct lp_setup_context *setup )
277 {
278 LP_DBG(DEBUG_SETUP, "%s\n", __FUNCTION__);
279
280 return begin_binning( setup );
281 }
282
283 const char *states[] = {
284 "FLUSHED",
285 "CLEARED",
286 "ACTIVE "
287 };
288
289
290 static boolean
291 set_scene_state( struct lp_setup_context *setup,
292 enum setup_state new_state,
293 const char *reason)
294 {
295 unsigned old_state = setup->state;
296
297 if (old_state == new_state)
298 return TRUE;
299
300 if (LP_DEBUG & DEBUG_SCENE) {
301 debug_printf("%s old %s new %s%s%s\n",
302 __FUNCTION__,
303 states[old_state],
304 states[new_state],
305 (new_state == SETUP_FLUSHED) ? ": " : "",
306 (new_state == SETUP_FLUSHED) ? reason : "");
307
308 if (new_state == SETUP_FLUSHED && setup->scene)
309 lp_debug_draw_bins_by_cmd_length(setup->scene);
310 }
311
312 /* wait for a free/empty scene
313 */
314 if (old_state == SETUP_FLUSHED)
315 lp_setup_get_empty_scene(setup);
316
317 switch (new_state) {
318 case SETUP_CLEARED:
319 break;
320
321 case SETUP_ACTIVE:
322 if (!begin_binning( setup ))
323 goto fail;
324 break;
325
326 case SETUP_FLUSHED:
327 if (old_state == SETUP_CLEARED)
328 if (!execute_clears( setup ))
329 goto fail;
330
331 lp_setup_rasterize_scene( setup );
332 assert(setup->scene == NULL);
333 break;
334
335 default:
336 assert(0 && "invalid setup state mode");
337 goto fail;
338 }
339
340 setup->state = new_state;
341 return TRUE;
342
343 fail:
344 if (setup->scene) {
345 lp_scene_end_rasterization(setup->scene);
346 setup->scene = NULL;
347 }
348
349 setup->state = SETUP_FLUSHED;
350 lp_setup_reset( setup );
351 return FALSE;
352 }
353
354
355 void
356 lp_setup_flush( struct lp_setup_context *setup,
357 struct pipe_fence_handle **fence,
358 const char *reason)
359 {
360 set_scene_state( setup, SETUP_FLUSHED, reason );
361
362 if (fence) {
363 lp_fence_reference((struct lp_fence **)fence, setup->last_fence);
364 if (!*fence)
365 *fence = (struct pipe_fence_handle *)lp_fence_create(0);
366 }
367 }
368
369
370 void
371 lp_setup_bind_framebuffer( struct lp_setup_context *setup,
372 const struct pipe_framebuffer_state *fb )
373 {
374 LP_DBG(DEBUG_SETUP, "%s\n", __FUNCTION__);
375
376 /* Flush any old scene.
377 */
378 set_scene_state( setup, SETUP_FLUSHED, __FUNCTION__ );
379
380 /*
381 * Ensure the old scene is not reused.
382 */
383 assert(!setup->scene);
384
385 /* Set new state. This will be picked up later when we next need a
386 * scene.
387 */
388 util_copy_framebuffer_state(&setup->fb, fb);
389 setup->framebuffer.x0 = 0;
390 setup->framebuffer.y0 = 0;
391 setup->framebuffer.x1 = fb->width-1;
392 setup->framebuffer.y1 = fb->height-1;
393 setup->dirty |= LP_SETUP_NEW_SCISSOR;
394 }
395
396
397 /*
398 * Try to clear one color buffer of the attached fb, either by binning a clear
399 * command or queuing up the clear for later (when binning is started).
400 */
401 static boolean
402 lp_setup_try_clear_color_buffer(struct lp_setup_context *setup,
403 const union pipe_color_union *color,
404 unsigned cbuf)
405 {
406 union lp_rast_cmd_arg clearrb_arg;
407 union util_color uc;
408 enum pipe_format format = setup->fb.cbufs[cbuf]->format;
409
410 LP_DBG(DEBUG_SETUP, "%s state %d\n", __FUNCTION__, setup->state);
411
412 util_pack_color_union(format, &uc, color);
413
414 if (setup->state == SETUP_ACTIVE) {
415 struct lp_scene *scene = setup->scene;
416
417 /* Add the clear to existing scene. In the unusual case where
418 * both color and depth-stencil are being cleared when there's
419 * already been some rendering, we could discard the currently
420 * binned scene and start again, but I don't see that as being
421 * a common usage.
422 */
423 struct lp_rast_clear_rb *cc_scene =
424 (struct lp_rast_clear_rb *)
425 lp_scene_alloc_aligned(scene, sizeof(struct lp_rast_clear_rb), 8);
426
427 if (!cc_scene) {
428 return FALSE;
429 }
430
431 cc_scene->cbuf = cbuf;
432 cc_scene->color_val = uc;
433 clearrb_arg.clear_rb = cc_scene;
434
435 if (!lp_scene_bin_everywhere(scene,
436 LP_RAST_OP_CLEAR_COLOR,
437 clearrb_arg))
438 return FALSE;
439 }
440 else {
441 /* Put ourselves into the 'pre-clear' state, specifically to try
442 * and accumulate multiple clears to color and depth_stencil
443 * buffers which the app or gallium frontend might issue
444 * separately.
445 */
446 set_scene_state( setup, SETUP_CLEARED, __FUNCTION__ );
447
448 assert(PIPE_CLEAR_COLOR0 == (1 << 2));
449 setup->clear.flags |= 1 << (cbuf + 2);
450 setup->clear.color_val[cbuf] = uc;
451 }
452
453 return TRUE;
454 }
455
456 static boolean
457 lp_setup_try_clear_zs(struct lp_setup_context *setup,
458 double depth,
459 unsigned stencil,
460 unsigned flags)
461 {
462 uint64_t zsmask = 0;
463 uint64_t zsvalue = 0;
464 uint32_t zmask32;
465 uint8_t smask8;
466 enum pipe_format format = setup->fb.zsbuf->format;
467
468 LP_DBG(DEBUG_SETUP, "%s state %d\n", __FUNCTION__, setup->state);
469
470 zmask32 = (flags & PIPE_CLEAR_DEPTH) ? ~0 : 0;
471 smask8 = (flags & PIPE_CLEAR_STENCIL) ? ~0 : 0;
472
473 zsvalue = util_pack64_z_stencil(format, depth, stencil);
474
475 zsmask = util_pack64_mask_z_stencil(format, zmask32, smask8);
476
477 zsvalue &= zsmask;
478
479 if (format == PIPE_FORMAT_Z24X8_UNORM ||
480 format == PIPE_FORMAT_X8Z24_UNORM) {
481 /*
482 * Make full mask if there's "X" bits so we can do full
483 * clear (without rmw).
484 */
485 uint32_t zsmask_full = 0;
486 zsmask_full = util_pack_mask_z_stencil(format, ~0, ~0);
487 zsmask |= ~zsmask_full;
488 }
489
490 if (setup->state == SETUP_ACTIVE) {
491 struct lp_scene *scene = setup->scene;
492
493 /* Add the clear to existing scene. In the unusual case where
494 * both color and depth-stencil are being cleared when there's
495 * already been some rendering, we could discard the currently
496 * binned scene and start again, but I don't see that as being
497 * a common usage.
498 */
499 if (!lp_scene_bin_everywhere(scene,
500 LP_RAST_OP_CLEAR_ZSTENCIL,
501 lp_rast_arg_clearzs(zsvalue, zsmask)))
502 return FALSE;
503 }
504 else {
505 /* Put ourselves into the 'pre-clear' state, specifically to try
506 * and accumulate multiple clears to color and depth_stencil
507 * buffers which the app or gallium frontend might issue
508 * separately.
509 */
510 set_scene_state( setup, SETUP_CLEARED, __FUNCTION__ );
511
512 setup->clear.flags |= flags;
513
514 setup->clear.zsmask |= zsmask;
515 setup->clear.zsvalue =
516 (setup->clear.zsvalue & ~zsmask) | (zsvalue & zsmask);
517 }
518
519 return TRUE;
520 }
521
522 void
523 lp_setup_clear( struct lp_setup_context *setup,
524 const union pipe_color_union *color,
525 double depth,
526 unsigned stencil,
527 unsigned flags )
528 {
529 unsigned i;
530
531 /*
532 * Note any of these (max 9) clears could fail (but at most there should
533 * be just one failure!). This avoids doing the previous succeeded
534 * clears again (we still clear tiles twice if a clear command succeeded
535 * partially for one buffer).
536 */
537 if (flags & PIPE_CLEAR_DEPTHSTENCIL) {
538 unsigned flagszs = flags & PIPE_CLEAR_DEPTHSTENCIL;
539 if (!lp_setup_try_clear_zs(setup, depth, stencil, flagszs)) {
540 lp_setup_flush(setup, NULL, __FUNCTION__);
541
542 if (!lp_setup_try_clear_zs(setup, depth, stencil, flagszs))
543 assert(0);
544 }
545 }
546
547 if (flags & PIPE_CLEAR_COLOR) {
548 assert(PIPE_CLEAR_COLOR0 == (1 << 2));
549 for (i = 0; i < setup->fb.nr_cbufs; i++) {
550 if ((flags & (1 << (2 + i))) && setup->fb.cbufs[i]) {
551 if (!lp_setup_try_clear_color_buffer(setup, color, i)) {
552 lp_setup_flush(setup, NULL, __FUNCTION__);
553
554 if (!lp_setup_try_clear_color_buffer(setup, color, i))
555 assert(0);
556 }
557 }
558 }
559 }
560 }
561
562
563
564 void
565 lp_setup_set_triangle_state( struct lp_setup_context *setup,
566 unsigned cull_mode,
567 boolean ccw_is_frontface,
568 boolean scissor,
569 boolean half_pixel_center,
570 boolean bottom_edge_rule,
571 boolean multisample)
572 {
573 LP_DBG(DEBUG_SETUP, "%s\n", __FUNCTION__);
574
575 setup->ccw_is_frontface = ccw_is_frontface;
576 setup->cullmode = cull_mode;
577 setup->triangle = first_triangle;
578 setup->multisample = multisample;
579 setup->pixel_offset = half_pixel_center ? 0.5f : 0.0f;
580 setup->bottom_edge_rule = bottom_edge_rule;
581
582 if (setup->scissor_test != scissor) {
583 setup->dirty |= LP_SETUP_NEW_SCISSOR;
584 setup->scissor_test = scissor;
585 }
586 }
587
588 void
589 lp_setup_set_line_state( struct lp_setup_context *setup,
590 float line_width)
591 {
592 LP_DBG(DEBUG_SETUP, "%s\n", __FUNCTION__);
593
594 setup->line_width = line_width;
595 }
596
597 void
598 lp_setup_set_point_state( struct lp_setup_context *setup,
599 float point_size,
600 boolean point_size_per_vertex,
601 uint sprite_coord_enable,
602 uint sprite_coord_origin)
603 {
604 LP_DBG(DEBUG_SETUP, "%s\n", __FUNCTION__);
605
606 setup->point_size = point_size;
607 setup->sprite_coord_enable = sprite_coord_enable;
608 setup->sprite_coord_origin = sprite_coord_origin;
609 setup->point_size_per_vertex = point_size_per_vertex;
610 }
611
612 void
613 lp_setup_set_setup_variant( struct lp_setup_context *setup,
614 const struct lp_setup_variant *variant)
615 {
616 LP_DBG(DEBUG_SETUP, "%s\n", __FUNCTION__);
617
618 setup->setup.variant = variant;
619 }
620
621 void
622 lp_setup_set_fs_variant( struct lp_setup_context *setup,
623 struct lp_fragment_shader_variant *variant)
624 {
625 LP_DBG(DEBUG_SETUP, "%s %p\n", __FUNCTION__,
626 variant);
627 /* FIXME: reference count */
628
629 setup->fs.current.variant = variant;
630 setup->dirty |= LP_SETUP_NEW_FS;
631 }
632
633 void
634 lp_setup_set_fs_constants(struct lp_setup_context *setup,
635 unsigned num,
636 struct pipe_constant_buffer *buffers)
637 {
638 unsigned i;
639
640 LP_DBG(DEBUG_SETUP, "%s %p\n", __FUNCTION__, (void *) buffers);
641
642 assert(num <= ARRAY_SIZE(setup->constants));
643
644 for (i = 0; i < num; ++i) {
645 util_copy_constant_buffer(&setup->constants[i].current, &buffers[i]);
646 }
647 for (; i < ARRAY_SIZE(setup->constants); i++) {
648 util_copy_constant_buffer(&setup->constants[i].current, NULL);
649 }
650 setup->dirty |= LP_SETUP_NEW_CONSTANTS;
651 }
652
653 void
654 lp_setup_set_fs_ssbos(struct lp_setup_context *setup,
655 unsigned num,
656 struct pipe_shader_buffer *buffers)
657 {
658 unsigned i;
659
660 LP_DBG(DEBUG_SETUP, "%s %p\n", __FUNCTION__, (void *) buffers);
661
662 assert(num <= ARRAY_SIZE(setup->ssbos));
663
664 for (i = 0; i < num; ++i) {
665 util_copy_shader_buffer(&setup->ssbos[i].current, &buffers[i]);
666 }
667 for (; i < ARRAY_SIZE(setup->ssbos); i++) {
668 util_copy_shader_buffer(&setup->ssbos[i].current, NULL);
669 }
670 setup->dirty |= LP_SETUP_NEW_SSBOS;
671 }
672
673 void
674 lp_setup_set_fs_images(struct lp_setup_context *setup,
675 unsigned num,
676 struct pipe_image_view *images)
677 {
678 unsigned i;
679
680 LP_DBG(DEBUG_SETUP, "%s %p\n", __FUNCTION__, (void *) images);
681
682 assert(num <= ARRAY_SIZE(setup->images));
683
684 for (i = 0; i < num; ++i) {
685 struct pipe_image_view *image = &images[i];
686 util_copy_image_view(&setup->images[i].current, &images[i]);
687
688 struct pipe_resource *res = image->resource;
689 struct llvmpipe_resource *lp_res = llvmpipe_resource(res);
690 struct lp_jit_image *jit_image;
691
692 jit_image = &setup->fs.current.jit_context.images[i];
693 if (!lp_res)
694 continue;
695 if (!lp_res->dt) {
696 /* regular texture - setup array of mipmap level offsets */
697 if (llvmpipe_resource_is_texture(res)) {
698 jit_image->base = lp_res->tex_data;
699 } else
700 jit_image->base = lp_res->data;
701
702 jit_image->width = res->width0;
703 jit_image->height = res->height0;
704 jit_image->depth = res->depth0;
705 jit_image->num_samples = res->nr_samples;
706
707 if (llvmpipe_resource_is_texture(res)) {
708 uint32_t mip_offset = lp_res->mip_offsets[image->u.tex.level];
709
710 jit_image->width = u_minify(jit_image->width, image->u.tex.level);
711 jit_image->height = u_minify(jit_image->height, image->u.tex.level);
712
713 if (res->target == PIPE_TEXTURE_1D_ARRAY ||
714 res->target == PIPE_TEXTURE_2D_ARRAY ||
715 res->target == PIPE_TEXTURE_3D ||
716 res->target == PIPE_TEXTURE_CUBE ||
717 res->target == PIPE_TEXTURE_CUBE_ARRAY) {
718 /*
719 * For array textures, we don't have first_layer, instead
720 * adjust last_layer (stored as depth) plus the mip level offsets
721 * (as we have mip-first layout can't just adjust base ptr).
722 * XXX For mip levels, could do something similar.
723 */
724 jit_image->depth = image->u.tex.last_layer - image->u.tex.first_layer + 1;
725 mip_offset += image->u.tex.first_layer * lp_res->img_stride[image->u.tex.level];
726 } else
727 jit_image->depth = u_minify(jit_image->depth, image->u.tex.level);
728
729 jit_image->row_stride = lp_res->row_stride[image->u.tex.level];
730 jit_image->img_stride = lp_res->img_stride[image->u.tex.level];
731 jit_image->sample_stride = lp_res->sample_stride;
732 jit_image->base = (uint8_t *)jit_image->base + mip_offset;
733 }
734 else {
735 unsigned view_blocksize = util_format_get_blocksize(image->format);
736 jit_image->width = image->u.buf.size / view_blocksize;
737 jit_image->base = (uint8_t *)jit_image->base + image->u.buf.offset;
738 }
739 }
740 }
741 for (; i < ARRAY_SIZE(setup->images); i++) {
742 util_copy_image_view(&setup->images[i].current, NULL);
743 }
744 setup->dirty |= LP_SETUP_NEW_IMAGES;
745 }
746
747 void
748 lp_setup_set_alpha_ref_value( struct lp_setup_context *setup,
749 float alpha_ref_value )
750 {
751 LP_DBG(DEBUG_SETUP, "%s %f\n", __FUNCTION__, alpha_ref_value);
752
753 if(setup->fs.current.jit_context.alpha_ref_value != alpha_ref_value) {
754 setup->fs.current.jit_context.alpha_ref_value = alpha_ref_value;
755 setup->dirty |= LP_SETUP_NEW_FS;
756 }
757 }
758
759 void
760 lp_setup_set_stencil_ref_values( struct lp_setup_context *setup,
761 const ubyte refs[2] )
762 {
763 LP_DBG(DEBUG_SETUP, "%s %d %d\n", __FUNCTION__, refs[0], refs[1]);
764
765 if (setup->fs.current.jit_context.stencil_ref_front != refs[0] ||
766 setup->fs.current.jit_context.stencil_ref_back != refs[1]) {
767 setup->fs.current.jit_context.stencil_ref_front = refs[0];
768 setup->fs.current.jit_context.stencil_ref_back = refs[1];
769 setup->dirty |= LP_SETUP_NEW_FS;
770 }
771 }
772
773 void
774 lp_setup_set_blend_color( struct lp_setup_context *setup,
775 const struct pipe_blend_color *blend_color )
776 {
777 LP_DBG(DEBUG_SETUP, "%s\n", __FUNCTION__);
778
779 assert(blend_color);
780
781 if(memcmp(&setup->blend_color.current, blend_color, sizeof *blend_color) != 0) {
782 memcpy(&setup->blend_color.current, blend_color, sizeof *blend_color);
783 setup->dirty |= LP_SETUP_NEW_BLEND_COLOR;
784 }
785 }
786
787
788 void
789 lp_setup_set_scissors( struct lp_setup_context *setup,
790 const struct pipe_scissor_state *scissors )
791 {
792 unsigned i;
793 LP_DBG(DEBUG_SETUP, "%s\n", __FUNCTION__);
794
795 assert(scissors);
796
797 for (i = 0; i < PIPE_MAX_VIEWPORTS; ++i) {
798 setup->scissors[i].x0 = scissors[i].minx;
799 setup->scissors[i].x1 = scissors[i].maxx-1;
800 setup->scissors[i].y0 = scissors[i].miny;
801 setup->scissors[i].y1 = scissors[i].maxy-1;
802 }
803 setup->dirty |= LP_SETUP_NEW_SCISSOR;
804 }
805
806 void
807 lp_setup_set_sample_mask(struct lp_setup_context *setup,
808 uint32_t sample_mask)
809 {
810 if (setup->fs.current.jit_context.sample_mask != sample_mask) {
811 setup->fs.current.jit_context.sample_mask = sample_mask;
812 setup->dirty |= LP_SETUP_NEW_FS;
813 }
814 }
815
816 void
817 lp_setup_set_flatshade_first(struct lp_setup_context *setup,
818 boolean flatshade_first)
819 {
820 setup->flatshade_first = flatshade_first;
821 }
822
823 void
824 lp_setup_set_rasterizer_discard(struct lp_setup_context *setup,
825 boolean rasterizer_discard)
826 {
827 if (setup->rasterizer_discard != rasterizer_discard) {
828 setup->rasterizer_discard = rasterizer_discard;
829 setup->line = first_line;
830 setup->point = first_point;
831 setup->triangle = first_triangle;
832 }
833 }
834
835 void
836 lp_setup_set_vertex_info(struct lp_setup_context *setup,
837 struct vertex_info *vertex_info)
838 {
839 /* XXX: just silently holding onto the pointer:
840 */
841 setup->vertex_info = vertex_info;
842 }
843
844
845 /**
846 * Called during state validation when LP_NEW_VIEWPORT is set.
847 */
848 void
849 lp_setup_set_viewports(struct lp_setup_context *setup,
850 unsigned num_viewports,
851 const struct pipe_viewport_state *viewports)
852 {
853 struct llvmpipe_context *lp = llvmpipe_context(setup->pipe);
854 unsigned i;
855
856 LP_DBG(DEBUG_SETUP, "%s\n", __FUNCTION__);
857
858 assert(num_viewports <= PIPE_MAX_VIEWPORTS);
859 assert(viewports);
860
861 /*
862 * For use in lp_state_fs.c, propagate the viewport values for all viewports.
863 */
864 for (i = 0; i < num_viewports; i++) {
865 float min_depth;
866 float max_depth;
867 util_viewport_zmin_zmax(&viewports[i], lp->rasterizer->clip_halfz,
868 &min_depth, &max_depth);
869
870 if (setup->viewports[i].min_depth != min_depth ||
871 setup->viewports[i].max_depth != max_depth) {
872 setup->viewports[i].min_depth = min_depth;
873 setup->viewports[i].max_depth = max_depth;
874 setup->dirty |= LP_SETUP_NEW_VIEWPORTS;
875 }
876 }
877 }
878
879
880 /**
881 * Called during state validation when LP_NEW_SAMPLER_VIEW is set.
882 */
883 void
884 lp_setup_set_fragment_sampler_views(struct lp_setup_context *setup,
885 unsigned num,
886 struct pipe_sampler_view **views)
887 {
888 unsigned i, max_tex_num;
889
890 LP_DBG(DEBUG_SETUP, "%s\n", __FUNCTION__);
891
892 assert(num <= PIPE_MAX_SHADER_SAMPLER_VIEWS);
893
894 max_tex_num = MAX2(num, setup->fs.current_tex_num);
895
896 for (i = 0; i < max_tex_num; i++) {
897 struct pipe_sampler_view *view = i < num ? views[i] : NULL;
898
899 if (view) {
900 struct pipe_resource *res = view->texture;
901 struct llvmpipe_resource *lp_tex = llvmpipe_resource(res);
902 struct lp_jit_texture *jit_tex;
903 jit_tex = &setup->fs.current.jit_context.textures[i];
904
905 /* We're referencing the texture's internal data, so save a
906 * reference to it.
907 */
908 pipe_resource_reference(&setup->fs.current_tex[i], res);
909
910 if (!lp_tex->dt) {
911 /* regular texture - setup array of mipmap level offsets */
912 int j;
913 unsigned first_level = 0;
914 unsigned last_level = 0;
915
916 if (llvmpipe_resource_is_texture(res)) {
917 first_level = view->u.tex.first_level;
918 last_level = view->u.tex.last_level;
919 assert(first_level <= last_level);
920 assert(last_level <= res->last_level);
921 jit_tex->base = lp_tex->tex_data;
922 }
923 else {
924 jit_tex->base = lp_tex->data;
925 }
926
927 if (LP_PERF & PERF_TEX_MEM) {
928 /* use dummy tile memory */
929 jit_tex->base = lp_dummy_tile;
930 jit_tex->width = TILE_SIZE/8;
931 jit_tex->height = TILE_SIZE/8;
932 jit_tex->depth = 1;
933 jit_tex->first_level = 0;
934 jit_tex->last_level = 0;
935 jit_tex->mip_offsets[0] = 0;
936 jit_tex->row_stride[0] = 0;
937 jit_tex->img_stride[0] = 0;
938 jit_tex->num_samples = 0;
939 jit_tex->sample_stride = 0;
940 }
941 else {
942 jit_tex->width = res->width0;
943 jit_tex->height = res->height0;
944 jit_tex->depth = res->depth0;
945 jit_tex->first_level = first_level;
946 jit_tex->last_level = last_level;
947 jit_tex->num_samples = res->nr_samples;
948 jit_tex->sample_stride = 0;
949
950 if (llvmpipe_resource_is_texture(res)) {
951 for (j = first_level; j <= last_level; j++) {
952 jit_tex->mip_offsets[j] = lp_tex->mip_offsets[j];
953 jit_tex->row_stride[j] = lp_tex->row_stride[j];
954 jit_tex->img_stride[j] = lp_tex->img_stride[j];
955 }
956
957 jit_tex->sample_stride = lp_tex->sample_stride;
958
959 if (res->target == PIPE_TEXTURE_1D_ARRAY ||
960 res->target == PIPE_TEXTURE_2D_ARRAY ||
961 res->target == PIPE_TEXTURE_CUBE ||
962 res->target == PIPE_TEXTURE_CUBE_ARRAY) {
963 /*
964 * For array textures, we don't have first_layer, instead
965 * adjust last_layer (stored as depth) plus the mip level offsets
966 * (as we have mip-first layout can't just adjust base ptr).
967 * XXX For mip levels, could do something similar.
968 */
969 jit_tex->depth = view->u.tex.last_layer - view->u.tex.first_layer + 1;
970 for (j = first_level; j <= last_level; j++) {
971 jit_tex->mip_offsets[j] += view->u.tex.first_layer *
972 lp_tex->img_stride[j];
973 }
974 if (view->target == PIPE_TEXTURE_CUBE ||
975 view->target == PIPE_TEXTURE_CUBE_ARRAY) {
976 assert(jit_tex->depth % 6 == 0);
977 }
978 assert(view->u.tex.first_layer <= view->u.tex.last_layer);
979 assert(view->u.tex.last_layer < res->array_size);
980 }
981 }
982 else {
983 /*
984 * For buffers, we don't have "offset", instead adjust
985 * the size (stored as width) plus the base pointer.
986 */
987 unsigned view_blocksize = util_format_get_blocksize(view->format);
988 /* probably don't really need to fill that out */
989 jit_tex->mip_offsets[0] = 0;
990 jit_tex->row_stride[0] = 0;
991 jit_tex->img_stride[0] = 0;
992
993 /* everything specified in number of elements here. */
994 jit_tex->width = view->u.buf.size / view_blocksize;
995 jit_tex->base = (uint8_t *)jit_tex->base + view->u.buf.offset;
996 /* XXX Unsure if we need to sanitize parameters? */
997 assert(view->u.buf.offset + view->u.buf.size <= res->width0);
998 }
999 }
1000 }
1001 else {
1002 /* display target texture/surface */
1003 /*
1004 * XXX: Where should this be unmapped?
1005 */
1006 struct llvmpipe_screen *screen = llvmpipe_screen(res->screen);
1007 struct sw_winsys *winsys = screen->winsys;
1008 jit_tex->base = winsys->displaytarget_map(winsys, lp_tex->dt,
1009 PIPE_TRANSFER_READ);
1010 jit_tex->row_stride[0] = lp_tex->row_stride[0];
1011 jit_tex->img_stride[0] = lp_tex->img_stride[0];
1012 jit_tex->mip_offsets[0] = 0;
1013 jit_tex->width = res->width0;
1014 jit_tex->height = res->height0;
1015 jit_tex->depth = res->depth0;
1016 jit_tex->first_level = jit_tex->last_level = 0;
1017 jit_tex->sample_stride = res->nr_samples;
1018 jit_tex->sample_stride = 0;
1019 assert(jit_tex->base);
1020 }
1021 }
1022 else {
1023 pipe_resource_reference(&setup->fs.current_tex[i], NULL);
1024 }
1025 }
1026 setup->fs.current_tex_num = num;
1027
1028 setup->dirty |= LP_SETUP_NEW_FS;
1029 }
1030
1031
1032 /**
1033 * Called during state validation when LP_NEW_SAMPLER is set.
1034 */
1035 void
1036 lp_setup_set_fragment_sampler_state(struct lp_setup_context *setup,
1037 unsigned num,
1038 struct pipe_sampler_state **samplers)
1039 {
1040 unsigned i;
1041
1042 LP_DBG(DEBUG_SETUP, "%s\n", __FUNCTION__);
1043
1044 assert(num <= PIPE_MAX_SAMPLERS);
1045
1046 for (i = 0; i < PIPE_MAX_SAMPLERS; i++) {
1047 const struct pipe_sampler_state *sampler = i < num ? samplers[i] : NULL;
1048
1049 if (sampler) {
1050 struct lp_jit_sampler *jit_sam;
1051 jit_sam = &setup->fs.current.jit_context.samplers[i];
1052
1053 jit_sam->min_lod = sampler->min_lod;
1054 jit_sam->max_lod = sampler->max_lod;
1055 jit_sam->lod_bias = sampler->lod_bias;
1056 COPY_4V(jit_sam->border_color, sampler->border_color.f);
1057 }
1058 }
1059
1060 setup->dirty |= LP_SETUP_NEW_FS;
1061 }
1062
1063
1064 /**
1065 * Is the given texture referenced by any scene?
1066 * Note: we have to check all scenes including any scenes currently
1067 * being rendered and the current scene being built.
1068 */
1069 unsigned
1070 lp_setup_is_resource_referenced( const struct lp_setup_context *setup,
1071 const struct pipe_resource *texture )
1072 {
1073 unsigned i;
1074
1075 /* check the render targets */
1076 for (i = 0; i < setup->fb.nr_cbufs; i++) {
1077 if (setup->fb.cbufs[i] && setup->fb.cbufs[i]->texture == texture)
1078 return LP_REFERENCED_FOR_READ | LP_REFERENCED_FOR_WRITE;
1079 }
1080 if (setup->fb.zsbuf && setup->fb.zsbuf->texture == texture) {
1081 return LP_REFERENCED_FOR_READ | LP_REFERENCED_FOR_WRITE;
1082 }
1083
1084 /* check textures referenced by the scene */
1085 for (i = 0; i < ARRAY_SIZE(setup->scenes); i++) {
1086 if (lp_scene_is_resource_referenced(setup->scenes[i], texture)) {
1087 return LP_REFERENCED_FOR_READ;
1088 }
1089 }
1090
1091 for (i = 0; i < ARRAY_SIZE(setup->ssbos); i++) {
1092 if (setup->ssbos[i].current.buffer == texture)
1093 return LP_REFERENCED_FOR_READ | LP_REFERENCED_FOR_WRITE;
1094 }
1095
1096 for (i = 0; i < ARRAY_SIZE(setup->images); i++) {
1097 if (setup->images[i].current.resource == texture)
1098 return LP_REFERENCED_FOR_READ | LP_REFERENCED_FOR_WRITE;
1099 }
1100
1101 return LP_UNREFERENCED;
1102 }
1103
1104
1105 /**
1106 * Called by vbuf code when we're about to draw something.
1107 *
1108 * This function stores all dirty state in the current scene's display list
1109 * memory, via lp_scene_alloc(). We can not pass pointers of mutable state to
1110 * the JIT functions, as the JIT functions will be called later on, most likely
1111 * on a different thread.
1112 *
1113 * When processing dirty state it is imperative that we don't refer to any
1114 * pointers previously allocated with lp_scene_alloc() in this function (or any
1115 * function) as they may belong to a scene freed since then.
1116 */
1117 static boolean
1118 try_update_scene_state( struct lp_setup_context *setup )
1119 {
1120 static const float fake_const_buf[4];
1121 boolean new_scene = (setup->fs.stored == NULL);
1122 struct lp_scene *scene = setup->scene;
1123 unsigned i;
1124
1125 assert(scene);
1126
1127 if (setup->dirty & LP_SETUP_NEW_VIEWPORTS) {
1128 /*
1129 * Record new depth range state for changes due to viewport updates.
1130 *
1131 * TODO: Collapse the existing viewport and depth range information
1132 * into one structure, for access by JIT.
1133 */
1134 struct lp_jit_viewport *stored;
1135
1136 stored = (struct lp_jit_viewport *)
1137 lp_scene_alloc(scene, sizeof setup->viewports);
1138
1139 if (!stored) {
1140 assert(!new_scene);
1141 return FALSE;
1142 }
1143
1144 memcpy(stored, setup->viewports, sizeof setup->viewports);
1145
1146 setup->fs.current.jit_context.viewports = stored;
1147 setup->dirty |= LP_SETUP_NEW_FS;
1148 }
1149
1150 if(setup->dirty & LP_SETUP_NEW_BLEND_COLOR) {
1151 uint8_t *stored;
1152 float* fstored;
1153 unsigned i, j;
1154 unsigned size;
1155
1156 /* Alloc u8_blend_color (16 x i8) and f_blend_color (4 or 8 x f32) */
1157 size = 4 * 16 * sizeof(uint8_t);
1158 size += (LP_MAX_VECTOR_LENGTH / 4) * sizeof(float);
1159 stored = lp_scene_alloc_aligned(scene, size, LP_MIN_VECTOR_ALIGN);
1160
1161 if (!stored) {
1162 assert(!new_scene);
1163 return FALSE;
1164 }
1165
1166 /* Store floating point colour */
1167 fstored = (float*)(stored + 4*16);
1168 for (i = 0; i < (LP_MAX_VECTOR_LENGTH / 4); ++i) {
1169 fstored[i] = setup->blend_color.current.color[i % 4];
1170 }
1171
1172 /* smear each blend color component across 16 ubyte elements */
1173 for (i = 0; i < 4; ++i) {
1174 uint8_t c = float_to_ubyte(setup->blend_color.current.color[i]);
1175 for (j = 0; j < 16; ++j)
1176 stored[i*16 + j] = c;
1177 }
1178
1179 setup->blend_color.stored = stored;
1180 setup->fs.current.jit_context.u8_blend_color = stored;
1181 setup->fs.current.jit_context.f_blend_color = fstored;
1182 setup->dirty |= LP_SETUP_NEW_FS;
1183 }
1184
1185 if (setup->dirty & LP_SETUP_NEW_CONSTANTS) {
1186 for (i = 0; i < ARRAY_SIZE(setup->constants); ++i) {
1187 struct pipe_resource *buffer = setup->constants[i].current.buffer;
1188 const unsigned current_size = MIN2(setup->constants[i].current.buffer_size,
1189 LP_MAX_TGSI_CONST_BUFFER_SIZE);
1190 const ubyte *current_data = NULL;
1191 int num_constants;
1192
1193 STATIC_ASSERT(DATA_BLOCK_SIZE >= LP_MAX_TGSI_CONST_BUFFER_SIZE);
1194
1195 if (buffer) {
1196 /* resource buffer */
1197 current_data = (ubyte *) llvmpipe_resource_data(buffer);
1198 }
1199 else if (setup->constants[i].current.user_buffer) {
1200 /* user-space buffer */
1201 current_data = (ubyte *) setup->constants[i].current.user_buffer;
1202 }
1203
1204 if (current_data) {
1205 current_data += setup->constants[i].current.buffer_offset;
1206
1207 /* TODO: copy only the actually used constants? */
1208
1209 if (setup->constants[i].stored_size != current_size ||
1210 !setup->constants[i].stored_data ||
1211 memcmp(setup->constants[i].stored_data,
1212 current_data,
1213 current_size) != 0) {
1214 void *stored;
1215
1216 stored = lp_scene_alloc(scene, current_size);
1217 if (!stored) {
1218 assert(!new_scene);
1219 return FALSE;
1220 }
1221
1222 memcpy(stored,
1223 current_data,
1224 current_size);
1225 setup->constants[i].stored_size = current_size;
1226 setup->constants[i].stored_data = stored;
1227 }
1228 setup->fs.current.jit_context.constants[i] =
1229 setup->constants[i].stored_data;
1230 }
1231 else {
1232 setup->constants[i].stored_size = 0;
1233 setup->constants[i].stored_data = NULL;
1234 setup->fs.current.jit_context.constants[i] = fake_const_buf;
1235 }
1236
1237 num_constants =
1238 DIV_ROUND_UP(setup->constants[i].stored_size, (sizeof(float) * 4));
1239 setup->fs.current.jit_context.num_constants[i] = num_constants;
1240 setup->dirty |= LP_SETUP_NEW_FS;
1241 }
1242 }
1243
1244 if (setup->dirty & LP_SETUP_NEW_SSBOS) {
1245 for (i = 0; i < ARRAY_SIZE(setup->ssbos); ++i) {
1246 struct pipe_resource *buffer = setup->ssbos[i].current.buffer;
1247 const ubyte *current_data = NULL;
1248
1249 if (!buffer)
1250 continue;
1251 /* resource buffer */
1252 current_data = (ubyte *) llvmpipe_resource_data(buffer);
1253 if (current_data) {
1254 current_data += setup->ssbos[i].current.buffer_offset;
1255
1256 setup->fs.current.jit_context.ssbos[i] = (const uint32_t *)current_data;
1257 setup->fs.current.jit_context.num_ssbos[i] = setup->ssbos[i].current.buffer_size;
1258 } else {
1259 setup->fs.current.jit_context.ssbos[i] = NULL;
1260 setup->fs.current.jit_context.num_ssbos[i] = 0;
1261 }
1262 setup->dirty |= LP_SETUP_NEW_FS;
1263 }
1264 }
1265 if (setup->dirty & LP_SETUP_NEW_FS) {
1266 if (!setup->fs.stored ||
1267 memcmp(setup->fs.stored,
1268 &setup->fs.current,
1269 sizeof setup->fs.current) != 0)
1270 {
1271 struct lp_rast_state *stored;
1272
1273 /* The fs state that's been stored in the scene is different from
1274 * the new, current state. So allocate a new lp_rast_state object
1275 * and append it to the bin's setup data buffer.
1276 */
1277 stored = (struct lp_rast_state *) lp_scene_alloc(scene, sizeof *stored);
1278 if (!stored) {
1279 assert(!new_scene);
1280 return FALSE;
1281 }
1282
1283 memcpy(stored,
1284 &setup->fs.current,
1285 sizeof setup->fs.current);
1286 setup->fs.stored = stored;
1287
1288 /* The scene now references the textures in the rasterization
1289 * state record. Note that now.
1290 */
1291 for (i = 0; i < ARRAY_SIZE(setup->fs.current_tex); i++) {
1292 if (setup->fs.current_tex[i]) {
1293 if (!lp_scene_add_resource_reference(scene,
1294 setup->fs.current_tex[i],
1295 new_scene)) {
1296 assert(!new_scene);
1297 return FALSE;
1298 }
1299 }
1300 }
1301 }
1302 }
1303
1304 if (setup->dirty & LP_SETUP_NEW_SCISSOR) {
1305 unsigned i;
1306 for (i = 0; i < PIPE_MAX_VIEWPORTS; ++i) {
1307 setup->draw_regions[i] = setup->framebuffer;
1308 if (setup->scissor_test) {
1309 u_rect_possible_intersection(&setup->scissors[i],
1310 &setup->draw_regions[i]);
1311 }
1312 }
1313 }
1314
1315 setup->dirty = 0;
1316
1317 assert(setup->fs.stored);
1318 return TRUE;
1319 }
1320
1321 boolean
1322 lp_setup_update_state( struct lp_setup_context *setup,
1323 boolean update_scene )
1324 {
1325 /* Some of the 'draw' pipeline stages may have changed some driver state.
1326 * Make sure we've processed those state changes before anything else.
1327 *
1328 * XXX this is the only place where llvmpipe_context is used in the
1329 * setup code. This may get refactored/changed...
1330 */
1331 {
1332 struct llvmpipe_context *lp = llvmpipe_context(setup->pipe);
1333 if (lp->dirty) {
1334 llvmpipe_update_derived(lp);
1335 }
1336
1337 if (lp->setup->dirty) {
1338 llvmpipe_update_setup(lp);
1339 }
1340
1341 assert(setup->setup.variant);
1342
1343 /* Will probably need to move this somewhere else, just need
1344 * to know about vertex shader point size attribute.
1345 */
1346 setup->psize_slot = lp->psize_slot;
1347 setup->viewport_index_slot = lp->viewport_index_slot;
1348 setup->layer_slot = lp->layer_slot;
1349 setup->face_slot = lp->face_slot;
1350
1351 assert(lp->dirty == 0);
1352
1353 assert(lp->setup_variant.key.size ==
1354 setup->setup.variant->key.size);
1355
1356 assert(memcmp(&lp->setup_variant.key,
1357 &setup->setup.variant->key,
1358 setup->setup.variant->key.size) == 0);
1359 }
1360
1361 if (update_scene && setup->state != SETUP_ACTIVE) {
1362 if (!set_scene_state( setup, SETUP_ACTIVE, __FUNCTION__ ))
1363 return FALSE;
1364 }
1365
1366 /* Only call into update_scene_state() if we already have a
1367 * scene:
1368 */
1369 if (update_scene && setup->scene) {
1370 assert(setup->state == SETUP_ACTIVE);
1371
1372 if (try_update_scene_state(setup))
1373 return TRUE;
1374
1375 /* Update failed, try to restart the scene.
1376 *
1377 * Cannot call lp_setup_flush_and_restart() directly here
1378 * because of potential recursion.
1379 */
1380 if (!set_scene_state(setup, SETUP_FLUSHED, __FUNCTION__))
1381 return FALSE;
1382
1383 if (!set_scene_state(setup, SETUP_ACTIVE, __FUNCTION__))
1384 return FALSE;
1385
1386 if (!setup->scene)
1387 return FALSE;
1388
1389 return try_update_scene_state(setup);
1390 }
1391
1392 return TRUE;
1393 }
1394
1395
1396
1397 /* Only caller is lp_setup_vbuf_destroy()
1398 */
1399 void
1400 lp_setup_destroy( struct lp_setup_context *setup )
1401 {
1402 uint i;
1403
1404 lp_setup_reset( setup );
1405
1406 util_unreference_framebuffer_state(&setup->fb);
1407
1408 for (i = 0; i < ARRAY_SIZE(setup->fs.current_tex); i++) {
1409 pipe_resource_reference(&setup->fs.current_tex[i], NULL);
1410 }
1411
1412 for (i = 0; i < ARRAY_SIZE(setup->constants); i++) {
1413 pipe_resource_reference(&setup->constants[i].current.buffer, NULL);
1414 }
1415
1416 for (i = 0; i < ARRAY_SIZE(setup->ssbos); i++) {
1417 pipe_resource_reference(&setup->ssbos[i].current.buffer, NULL);
1418 }
1419
1420 /* free the scenes in the 'empty' queue */
1421 for (i = 0; i < ARRAY_SIZE(setup->scenes); i++) {
1422 struct lp_scene *scene = setup->scenes[i];
1423
1424 if (scene->fence)
1425 lp_fence_wait(scene->fence);
1426
1427 lp_scene_destroy(scene);
1428 }
1429
1430 lp_fence_reference(&setup->last_fence, NULL);
1431
1432 FREE( setup );
1433 }
1434
1435
1436 /**
1437 * Create a new primitive tiling engine. Plug it into the backend of
1438 * the draw module. Currently also creates a rasterizer to use with
1439 * it.
1440 */
1441 struct lp_setup_context *
1442 lp_setup_create( struct pipe_context *pipe,
1443 struct draw_context *draw )
1444 {
1445 struct llvmpipe_screen *screen = llvmpipe_screen(pipe->screen);
1446 struct lp_setup_context *setup;
1447 unsigned i;
1448
1449 setup = CALLOC_STRUCT(lp_setup_context);
1450 if (!setup) {
1451 goto no_setup;
1452 }
1453
1454 lp_setup_init_vbuf(setup);
1455
1456 /* Used only in update_state():
1457 */
1458 setup->pipe = pipe;
1459
1460
1461 setup->num_threads = screen->num_threads;
1462 setup->vbuf = draw_vbuf_stage(draw, &setup->base);
1463 if (!setup->vbuf) {
1464 goto no_vbuf;
1465 }
1466
1467 draw_set_rasterize_stage(draw, setup->vbuf);
1468 draw_set_render(draw, &setup->base);
1469
1470 /* create some empty scenes */
1471 for (i = 0; i < MAX_SCENES; i++) {
1472 setup->scenes[i] = lp_scene_create( pipe );
1473 if (!setup->scenes[i]) {
1474 goto no_scenes;
1475 }
1476 }
1477
1478 setup->triangle = first_triangle;
1479 setup->line = first_line;
1480 setup->point = first_point;
1481
1482 setup->dirty = ~0;
1483
1484 /* Initialize empty default fb correctly, so the rect is empty */
1485 setup->framebuffer.x1 = -1;
1486 setup->framebuffer.y1 = -1;
1487
1488 return setup;
1489
1490 no_scenes:
1491 for (i = 0; i < MAX_SCENES; i++) {
1492 if (setup->scenes[i]) {
1493 lp_scene_destroy(setup->scenes[i]);
1494 }
1495 }
1496
1497 setup->vbuf->destroy(setup->vbuf);
1498 no_vbuf:
1499 FREE(setup);
1500 no_setup:
1501 return NULL;
1502 }
1503
1504
1505 /**
1506 * Put a BeginQuery command into all bins.
1507 */
1508 void
1509 lp_setup_begin_query(struct lp_setup_context *setup,
1510 struct llvmpipe_query *pq)
1511 {
1512
1513 set_scene_state(setup, SETUP_ACTIVE, "begin_query");
1514
1515 if (!(pq->type == PIPE_QUERY_OCCLUSION_COUNTER ||
1516 pq->type == PIPE_QUERY_OCCLUSION_PREDICATE ||
1517 pq->type == PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE ||
1518 pq->type == PIPE_QUERY_PIPELINE_STATISTICS))
1519 return;
1520
1521 /* init the query to its beginning state */
1522 assert(setup->active_binned_queries < LP_MAX_ACTIVE_BINNED_QUERIES);
1523 /* exceeding list size so just ignore the query */
1524 if (setup->active_binned_queries >= LP_MAX_ACTIVE_BINNED_QUERIES) {
1525 return;
1526 }
1527 assert(setup->active_queries[setup->active_binned_queries] == NULL);
1528 setup->active_queries[setup->active_binned_queries] = pq;
1529 setup->active_binned_queries++;
1530
1531 assert(setup->scene);
1532 if (setup->scene) {
1533 if (!lp_scene_bin_everywhere(setup->scene,
1534 LP_RAST_OP_BEGIN_QUERY,
1535 lp_rast_arg_query(pq))) {
1536
1537 if (!lp_setup_flush_and_restart(setup))
1538 return;
1539
1540 if (!lp_scene_bin_everywhere(setup->scene,
1541 LP_RAST_OP_BEGIN_QUERY,
1542 lp_rast_arg_query(pq))) {
1543 return;
1544 }
1545 }
1546 setup->scene->had_queries |= TRUE;
1547 }
1548 }
1549
1550
1551 /**
1552 * Put an EndQuery command into all bins.
1553 */
1554 void
1555 lp_setup_end_query(struct lp_setup_context *setup, struct llvmpipe_query *pq)
1556 {
1557 set_scene_state(setup, SETUP_ACTIVE, "end_query");
1558
1559 assert(setup->scene);
1560 if (setup->scene) {
1561 /* pq->fence should be the fence of the *last* scene which
1562 * contributed to the query result.
1563 */
1564 lp_fence_reference(&pq->fence, setup->scene->fence);
1565
1566 if (pq->type == PIPE_QUERY_OCCLUSION_COUNTER ||
1567 pq->type == PIPE_QUERY_OCCLUSION_PREDICATE ||
1568 pq->type == PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE ||
1569 pq->type == PIPE_QUERY_PIPELINE_STATISTICS ||
1570 pq->type == PIPE_QUERY_TIMESTAMP) {
1571 if (pq->type == PIPE_QUERY_TIMESTAMP &&
1572 !(setup->scene->tiles_x | setup->scene->tiles_y)) {
1573 /*
1574 * If there's a zero width/height framebuffer, there's no bins and
1575 * hence no rast task is ever run. So fill in something here instead.
1576 */
1577 pq->end[0] = os_time_get_nano();
1578 }
1579
1580 if (!lp_scene_bin_everywhere(setup->scene,
1581 LP_RAST_OP_END_QUERY,
1582 lp_rast_arg_query(pq))) {
1583 if (!lp_setup_flush_and_restart(setup))
1584 goto fail;
1585
1586 if (!lp_scene_bin_everywhere(setup->scene,
1587 LP_RAST_OP_END_QUERY,
1588 lp_rast_arg_query(pq))) {
1589 goto fail;
1590 }
1591 }
1592 setup->scene->had_queries |= TRUE;
1593 }
1594 }
1595 else {
1596 lp_fence_reference(&pq->fence, setup->last_fence);
1597 }
1598
1599 fail:
1600 /* Need to do this now not earlier since it still needs to be marked as
1601 * active when binning it would cause a flush.
1602 */
1603 if (pq->type == PIPE_QUERY_OCCLUSION_COUNTER ||
1604 pq->type == PIPE_QUERY_OCCLUSION_PREDICATE ||
1605 pq->type == PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE ||
1606 pq->type == PIPE_QUERY_PIPELINE_STATISTICS) {
1607 unsigned i;
1608
1609 /* remove from active binned query list */
1610 for (i = 0; i < setup->active_binned_queries; i++) {
1611 if (setup->active_queries[i] == pq)
1612 break;
1613 }
1614 assert(i < setup->active_binned_queries);
1615 if (i == setup->active_binned_queries)
1616 return;
1617 setup->active_binned_queries--;
1618 setup->active_queries[i] = setup->active_queries[setup->active_binned_queries];
1619 setup->active_queries[setup->active_binned_queries] = NULL;
1620 }
1621 }
1622
1623
1624 boolean
1625 lp_setup_flush_and_restart(struct lp_setup_context *setup)
1626 {
1627 if (0) debug_printf("%s\n", __FUNCTION__);
1628
1629 assert(setup->state == SETUP_ACTIVE);
1630
1631 if (!set_scene_state(setup, SETUP_FLUSHED, __FUNCTION__))
1632 return FALSE;
1633
1634 if (!lp_setup_update_state(setup, TRUE))
1635 return FALSE;
1636
1637 return TRUE;
1638 }
1639
1640