draw: add support for num_samples + sample_stride to the image paths
[mesa.git] / src / gallium / drivers / llvmpipe / lp_rast.c
1 /**************************************************************************
2 *
3 * Copyright 2009 VMware, Inc.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28 #include <limits.h>
29 #include "util/u_memory.h"
30 #include "util/u_math.h"
31 #include "util/u_rect.h"
32 #include "util/u_surface.h"
33 #include "util/u_pack_color.h"
34 #include "util/u_string.h"
35 #include "util/u_thread.h"
36 #include "util/u_memset.h"
37 #include "util/os_time.h"
38
39 #include "lp_scene_queue.h"
40 #include "lp_context.h"
41 #include "lp_debug.h"
42 #include "lp_fence.h"
43 #include "lp_perf.h"
44 #include "lp_query.h"
45 #include "lp_rast.h"
46 #include "lp_rast_priv.h"
47 #include "gallivm/lp_bld_format.h"
48 #include "gallivm/lp_bld_debug.h"
49 #include "lp_scene.h"
50 #include "lp_tex_sample.h"
51
52
53 #ifdef DEBUG
54 int jit_line = 0;
55 const struct lp_rast_state *jit_state = NULL;
56 const struct lp_rasterizer_task *jit_task = NULL;
57 #endif
58
59
60 /**
61 * Begin rasterizing a scene.
62 * Called once per scene by one thread.
63 */
64 static void
65 lp_rast_begin( struct lp_rasterizer *rast,
66 struct lp_scene *scene )
67 {
68 rast->curr_scene = scene;
69
70 LP_DBG(DEBUG_RAST, "%s\n", __FUNCTION__);
71
72 lp_scene_begin_rasterization( scene );
73 lp_scene_bin_iter_begin( scene );
74 }
75
76
77 static void
78 lp_rast_end( struct lp_rasterizer *rast )
79 {
80 lp_scene_end_rasterization( rast->curr_scene );
81
82 rast->curr_scene = NULL;
83 }
84
85
86 /**
87 * Beginning rasterization of a tile.
88 * \param x window X position of the tile, in pixels
89 * \param y window Y position of the tile, in pixels
90 */
91 static void
92 lp_rast_tile_begin(struct lp_rasterizer_task *task,
93 const struct cmd_bin *bin,
94 int x, int y)
95 {
96 unsigned i;
97 struct lp_scene *scene = task->scene;
98
99 LP_DBG(DEBUG_RAST, "%s %d,%d\n", __FUNCTION__, x, y);
100
101 task->bin = bin;
102 task->x = x * TILE_SIZE;
103 task->y = y * TILE_SIZE;
104 task->width = TILE_SIZE + x * TILE_SIZE > task->scene->fb.width ?
105 task->scene->fb.width - x * TILE_SIZE : TILE_SIZE;
106 task->height = TILE_SIZE + y * TILE_SIZE > task->scene->fb.height ?
107 task->scene->fb.height - y * TILE_SIZE : TILE_SIZE;
108
109 task->thread_data.vis_counter = 0;
110 task->thread_data.ps_invocations = 0;
111
112 for (i = 0; i < task->scene->fb.nr_cbufs; i++) {
113 if (task->scene->fb.cbufs[i]) {
114 task->color_tiles[i] = scene->cbufs[i].map +
115 scene->cbufs[i].stride * task->y +
116 scene->cbufs[i].format_bytes * task->x;
117 }
118 }
119 if (task->scene->fb.zsbuf) {
120 task->depth_tile = scene->zsbuf.map +
121 scene->zsbuf.stride * task->y +
122 scene->zsbuf.format_bytes * task->x;
123 }
124 }
125
126
127 /**
128 * Clear the rasterizer's current color tile.
129 * This is a bin command called during bin processing.
130 * Clear commands always clear all bound layers.
131 */
132 static void
133 lp_rast_clear_color(struct lp_rasterizer_task *task,
134 const union lp_rast_cmd_arg arg)
135 {
136 const struct lp_scene *scene = task->scene;
137 unsigned cbuf = arg.clear_rb->cbuf;
138 union util_color uc;
139 enum pipe_format format;
140
141 /* we never bin clear commands for non-existing buffers */
142 assert(cbuf < scene->fb.nr_cbufs);
143 assert(scene->fb.cbufs[cbuf]);
144
145 format = scene->fb.cbufs[cbuf]->format;
146 uc = arg.clear_rb->color_val;
147
148 /*
149 * this is pretty rough since we have target format (bunch of bytes...) here.
150 * dump it as raw 4 dwords.
151 */
152 LP_DBG(DEBUG_RAST, "%s clear value (target format %d) raw 0x%x,0x%x,0x%x,0x%x\n",
153 __FUNCTION__, format, uc.ui[0], uc.ui[1], uc.ui[2], uc.ui[3]);
154
155
156 util_fill_box(scene->cbufs[cbuf].map,
157 format,
158 scene->cbufs[cbuf].stride,
159 scene->cbufs[cbuf].layer_stride,
160 task->x,
161 task->y,
162 0,
163 task->width,
164 task->height,
165 scene->fb_max_layer + 1,
166 &uc);
167
168 /* this will increase for each rb which probably doesn't mean much */
169 LP_COUNT(nr_color_tile_clear);
170 }
171
172
173 /**
174 * Clear the rasterizer's current z/stencil tile.
175 * This is a bin command called during bin processing.
176 * Clear commands always clear all bound layers.
177 */
178 static void
179 lp_rast_clear_zstencil(struct lp_rasterizer_task *task,
180 const union lp_rast_cmd_arg arg)
181 {
182 const struct lp_scene *scene = task->scene;
183 uint64_t clear_value64 = arg.clear_zstencil.value;
184 uint64_t clear_mask64 = arg.clear_zstencil.mask;
185 uint32_t clear_value = (uint32_t) clear_value64;
186 uint32_t clear_mask = (uint32_t) clear_mask64;
187 const unsigned height = task->height;
188 const unsigned width = task->width;
189 const unsigned dst_stride = scene->zsbuf.stride;
190 uint8_t *dst;
191 unsigned i, j;
192 unsigned block_size;
193
194 LP_DBG(DEBUG_RAST, "%s: value=0x%08x, mask=0x%08x\n",
195 __FUNCTION__, clear_value, clear_mask);
196
197 /*
198 * Clear the area of the depth/depth buffer matching this tile.
199 */
200
201 if (scene->fb.zsbuf) {
202 unsigned layer;
203 uint8_t *dst_layer = task->depth_tile;
204 block_size = util_format_get_blocksize(scene->fb.zsbuf->format);
205
206 clear_value &= clear_mask;
207
208 for (layer = 0; layer <= scene->fb_max_layer; layer++) {
209 dst = dst_layer;
210
211 switch (block_size) {
212 case 1:
213 assert(clear_mask == 0xff);
214 for (i = 0; i < height; i++) {
215 uint8_t *row = (uint8_t *)dst;
216 memset(row, (uint8_t) clear_value, width);
217 dst += dst_stride;
218 }
219 break;
220 case 2:
221 if (clear_mask == 0xffff) {
222 for (i = 0; i < height; i++) {
223 uint16_t *row = (uint16_t *)dst;
224 for (j = 0; j < width; j++)
225 *row++ = (uint16_t) clear_value;
226 dst += dst_stride;
227 }
228 }
229 else {
230 for (i = 0; i < height; i++) {
231 uint16_t *row = (uint16_t *)dst;
232 for (j = 0; j < width; j++) {
233 uint16_t tmp = ~clear_mask & *row;
234 *row++ = clear_value | tmp;
235 }
236 dst += dst_stride;
237 }
238 }
239 break;
240 case 4:
241 if (clear_mask == 0xffffffff) {
242 for (i = 0; i < height; i++) {
243 util_memset32(dst, clear_value, width);
244 dst += dst_stride;
245 }
246 }
247 else {
248 for (i = 0; i < height; i++) {
249 uint32_t *row = (uint32_t *)dst;
250 for (j = 0; j < width; j++) {
251 uint32_t tmp = ~clear_mask & *row;
252 *row++ = clear_value | tmp;
253 }
254 dst += dst_stride;
255 }
256 }
257 break;
258 case 8:
259 clear_value64 &= clear_mask64;
260 if (clear_mask64 == 0xffffffffffULL) {
261 for (i = 0; i < height; i++) {
262 uint64_t *row = (uint64_t *)dst;
263 for (j = 0; j < width; j++)
264 *row++ = clear_value64;
265 dst += dst_stride;
266 }
267 }
268 else {
269 for (i = 0; i < height; i++) {
270 uint64_t *row = (uint64_t *)dst;
271 for (j = 0; j < width; j++) {
272 uint64_t tmp = ~clear_mask64 & *row;
273 *row++ = clear_value64 | tmp;
274 }
275 dst += dst_stride;
276 }
277 }
278 break;
279
280 default:
281 assert(0);
282 break;
283 }
284 dst_layer += scene->zsbuf.layer_stride;
285 }
286 }
287 }
288
289
290
291 /**
292 * Run the shader on all blocks in a tile. This is used when a tile is
293 * completely contained inside a triangle.
294 * This is a bin command called during bin processing.
295 */
296 static void
297 lp_rast_shade_tile(struct lp_rasterizer_task *task,
298 const union lp_rast_cmd_arg arg)
299 {
300 const struct lp_scene *scene = task->scene;
301 const struct lp_rast_shader_inputs *inputs = arg.shade_tile;
302 const struct lp_rast_state *state;
303 struct lp_fragment_shader_variant *variant;
304 const unsigned tile_x = task->x, tile_y = task->y;
305 unsigned x, y;
306
307 if (inputs->disable) {
308 /* This command was partially binned and has been disabled */
309 return;
310 }
311
312 LP_DBG(DEBUG_RAST, "%s\n", __FUNCTION__);
313
314 state = task->state;
315 assert(state);
316 if (!state) {
317 return;
318 }
319 variant = state->variant;
320
321 /* render the whole 64x64 tile in 4x4 chunks */
322 for (y = 0; y < task->height; y += 4){
323 for (x = 0; x < task->width; x += 4) {
324 uint8_t *color[PIPE_MAX_COLOR_BUFS];
325 unsigned stride[PIPE_MAX_COLOR_BUFS];
326 uint8_t *depth = NULL;
327 unsigned depth_stride = 0;
328 unsigned i;
329
330 /* color buffer */
331 for (i = 0; i < scene->fb.nr_cbufs; i++){
332 if (scene->fb.cbufs[i]) {
333 stride[i] = scene->cbufs[i].stride;
334 color[i] = lp_rast_get_color_block_pointer(task, i, tile_x + x,
335 tile_y + y, inputs->layer);
336 }
337 else {
338 stride[i] = 0;
339 color[i] = NULL;
340 }
341 }
342
343 /* depth buffer */
344 if (scene->zsbuf.map) {
345 depth = lp_rast_get_depth_block_pointer(task, tile_x + x,
346 tile_y + y, inputs->layer);
347 depth_stride = scene->zsbuf.stride;
348 }
349
350 /* Propagate non-interpolated raster state. */
351 task->thread_data.raster_state.viewport_index = inputs->viewport_index;
352
353 /* run shader on 4x4 block */
354 BEGIN_JIT_CALL(state, task);
355 variant->jit_function[RAST_WHOLE]( &state->jit_context,
356 tile_x + x, tile_y + y,
357 inputs->frontfacing,
358 GET_A0(inputs),
359 GET_DADX(inputs),
360 GET_DADY(inputs),
361 color,
362 depth,
363 0xffff,
364 &task->thread_data,
365 stride,
366 depth_stride);
367 END_JIT_CALL();
368 }
369 }
370 }
371
372
373 /**
374 * Run the shader on all blocks in a tile. This is used when a tile is
375 * completely contained inside a triangle, and the shader is opaque.
376 * This is a bin command called during bin processing.
377 */
378 static void
379 lp_rast_shade_tile_opaque(struct lp_rasterizer_task *task,
380 const union lp_rast_cmd_arg arg)
381 {
382 LP_DBG(DEBUG_RAST, "%s\n", __FUNCTION__);
383
384 assert(task->state);
385 if (!task->state) {
386 return;
387 }
388
389 lp_rast_shade_tile(task, arg);
390 }
391
392
393 /**
394 * Compute shading for a 4x4 block of pixels inside a triangle.
395 * This is a bin command called during bin processing.
396 * \param x X position of quad in window coords
397 * \param y Y position of quad in window coords
398 */
399 void
400 lp_rast_shade_quads_mask(struct lp_rasterizer_task *task,
401 const struct lp_rast_shader_inputs *inputs,
402 unsigned x, unsigned y,
403 unsigned mask)
404 {
405 const struct lp_rast_state *state = task->state;
406 struct lp_fragment_shader_variant *variant = state->variant;
407 const struct lp_scene *scene = task->scene;
408 uint8_t *color[PIPE_MAX_COLOR_BUFS];
409 unsigned stride[PIPE_MAX_COLOR_BUFS];
410 uint8_t *depth = NULL;
411 unsigned depth_stride = 0;
412 unsigned i;
413
414 assert(state);
415
416 /* Sanity checks */
417 assert(x < scene->tiles_x * TILE_SIZE);
418 assert(y < scene->tiles_y * TILE_SIZE);
419 assert(x % TILE_VECTOR_WIDTH == 0);
420 assert(y % TILE_VECTOR_HEIGHT == 0);
421
422 assert((x % 4) == 0);
423 assert((y % 4) == 0);
424
425 /* color buffer */
426 for (i = 0; i < scene->fb.nr_cbufs; i++) {
427 if (scene->fb.cbufs[i]) {
428 stride[i] = scene->cbufs[i].stride;
429 color[i] = lp_rast_get_color_block_pointer(task, i, x, y,
430 inputs->layer);
431 }
432 else {
433 stride[i] = 0;
434 color[i] = NULL;
435 }
436 }
437
438 /* depth buffer */
439 if (scene->zsbuf.map) {
440 depth_stride = scene->zsbuf.stride;
441 depth = lp_rast_get_depth_block_pointer(task, x, y, inputs->layer);
442 }
443
444 assert(lp_check_alignment(state->jit_context.u8_blend_color, 16));
445
446 /*
447 * The rasterizer may produce fragments outside our
448 * allocated 4x4 blocks hence need to filter them out here.
449 */
450 if ((x % TILE_SIZE) < task->width && (y % TILE_SIZE) < task->height) {
451 /* Propagate non-interpolated raster state. */
452 task->thread_data.raster_state.viewport_index = inputs->viewport_index;
453
454 /* run shader on 4x4 block */
455 BEGIN_JIT_CALL(state, task);
456 variant->jit_function[RAST_EDGE_TEST](&state->jit_context,
457 x, y,
458 inputs->frontfacing,
459 GET_A0(inputs),
460 GET_DADX(inputs),
461 GET_DADY(inputs),
462 color,
463 depth,
464 mask,
465 &task->thread_data,
466 stride,
467 depth_stride);
468 END_JIT_CALL();
469 }
470 }
471
472
473
474 /**
475 * Begin a new occlusion query.
476 * This is a bin command put in all bins.
477 * Called per thread.
478 */
479 static void
480 lp_rast_begin_query(struct lp_rasterizer_task *task,
481 const union lp_rast_cmd_arg arg)
482 {
483 struct llvmpipe_query *pq = arg.query_obj;
484
485 switch (pq->type) {
486 case PIPE_QUERY_OCCLUSION_COUNTER:
487 case PIPE_QUERY_OCCLUSION_PREDICATE:
488 case PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE:
489 pq->start[task->thread_index] = task->thread_data.vis_counter;
490 break;
491 case PIPE_QUERY_PIPELINE_STATISTICS:
492 pq->start[task->thread_index] = task->thread_data.ps_invocations;
493 break;
494 default:
495 assert(0);
496 break;
497 }
498 }
499
500
501 /**
502 * End the current occlusion query.
503 * This is a bin command put in all bins.
504 * Called per thread.
505 */
506 static void
507 lp_rast_end_query(struct lp_rasterizer_task *task,
508 const union lp_rast_cmd_arg arg)
509 {
510 struct llvmpipe_query *pq = arg.query_obj;
511
512 switch (pq->type) {
513 case PIPE_QUERY_OCCLUSION_COUNTER:
514 case PIPE_QUERY_OCCLUSION_PREDICATE:
515 case PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE:
516 pq->end[task->thread_index] +=
517 task->thread_data.vis_counter - pq->start[task->thread_index];
518 pq->start[task->thread_index] = 0;
519 break;
520 case PIPE_QUERY_TIMESTAMP:
521 pq->end[task->thread_index] = os_time_get_nano();
522 break;
523 case PIPE_QUERY_PIPELINE_STATISTICS:
524 pq->end[task->thread_index] +=
525 task->thread_data.ps_invocations - pq->start[task->thread_index];
526 pq->start[task->thread_index] = 0;
527 break;
528 default:
529 assert(0);
530 break;
531 }
532 }
533
534
535 void
536 lp_rast_set_state(struct lp_rasterizer_task *task,
537 const union lp_rast_cmd_arg arg)
538 {
539 task->state = arg.state;
540 }
541
542
543
544 /**
545 * Called when we're done writing to a color tile.
546 */
547 static void
548 lp_rast_tile_end(struct lp_rasterizer_task *task)
549 {
550 unsigned i;
551
552 for (i = 0; i < task->scene->num_active_queries; ++i) {
553 lp_rast_end_query(task, lp_rast_arg_query(task->scene->active_queries[i]));
554 }
555
556 /* debug */
557 memset(task->color_tiles, 0, sizeof(task->color_tiles));
558 task->depth_tile = NULL;
559
560 task->bin = NULL;
561 }
562
563 static lp_rast_cmd_func dispatch[LP_RAST_OP_MAX] =
564 {
565 lp_rast_clear_color,
566 lp_rast_clear_zstencil,
567 lp_rast_triangle_1,
568 lp_rast_triangle_2,
569 lp_rast_triangle_3,
570 lp_rast_triangle_4,
571 lp_rast_triangle_5,
572 lp_rast_triangle_6,
573 lp_rast_triangle_7,
574 lp_rast_triangle_8,
575 lp_rast_triangle_3_4,
576 lp_rast_triangle_3_16,
577 lp_rast_triangle_4_16,
578 lp_rast_shade_tile,
579 lp_rast_shade_tile_opaque,
580 lp_rast_begin_query,
581 lp_rast_end_query,
582 lp_rast_set_state,
583 lp_rast_triangle_32_1,
584 lp_rast_triangle_32_2,
585 lp_rast_triangle_32_3,
586 lp_rast_triangle_32_4,
587 lp_rast_triangle_32_5,
588 lp_rast_triangle_32_6,
589 lp_rast_triangle_32_7,
590 lp_rast_triangle_32_8,
591 lp_rast_triangle_32_3_4,
592 lp_rast_triangle_32_3_16,
593 lp_rast_triangle_32_4_16
594 };
595
596
597 static void
598 do_rasterize_bin(struct lp_rasterizer_task *task,
599 const struct cmd_bin *bin,
600 int x, int y)
601 {
602 const struct cmd_block *block;
603 unsigned k;
604
605 if (0)
606 lp_debug_bin(bin, x, y);
607
608 for (block = bin->head; block; block = block->next) {
609 for (k = 0; k < block->count; k++) {
610 dispatch[block->cmd[k]]( task, block->arg[k] );
611 }
612 }
613 }
614
615
616
617 /**
618 * Rasterize commands for a single bin.
619 * \param x, y position of the bin's tile in the framebuffer
620 * Must be called between lp_rast_begin() and lp_rast_end().
621 * Called per thread.
622 */
623 static void
624 rasterize_bin(struct lp_rasterizer_task *task,
625 const struct cmd_bin *bin, int x, int y )
626 {
627 lp_rast_tile_begin( task, bin, x, y );
628
629 do_rasterize_bin(task, bin, x, y);
630
631 lp_rast_tile_end(task);
632
633 #ifdef DEBUG
634 /* Debug/Perf flags:
635 */
636 if (bin->head->count == 1) {
637 if (bin->head->cmd[0] == LP_RAST_OP_SHADE_TILE_OPAQUE)
638 LP_COUNT(nr_pure_shade_opaque_64);
639 else if (bin->head->cmd[0] == LP_RAST_OP_SHADE_TILE)
640 LP_COUNT(nr_pure_shade_64);
641 }
642 #endif
643 }
644
645
646 /* An empty bin is one that just loads the contents of the tile and
647 * stores them again unchanged. This typically happens when bins have
648 * been flushed for some reason in the middle of a frame, or when
649 * incremental updates are being made to a render target.
650 *
651 * Try to avoid doing pointless work in this case.
652 */
653 static boolean
654 is_empty_bin( const struct cmd_bin *bin )
655 {
656 return bin->head == NULL;
657 }
658
659
660 /**
661 * Rasterize/execute all bins within a scene.
662 * Called per thread.
663 */
664 static void
665 rasterize_scene(struct lp_rasterizer_task *task,
666 struct lp_scene *scene)
667 {
668 task->scene = scene;
669
670 /* Clear the cache tags. This should not always be necessary but
671 simpler for now. */
672 #if LP_USE_TEXTURE_CACHE
673 memset(task->thread_data.cache->cache_tags, 0,
674 sizeof(task->thread_data.cache->cache_tags));
675 #if LP_BUILD_FORMAT_CACHE_DEBUG
676 task->thread_data.cache->cache_access_total = 0;
677 task->thread_data.cache->cache_access_miss = 0;
678 #endif
679 #endif
680
681 if (!task->rast->no_rast) {
682 /* loop over scene bins, rasterize each */
683 {
684 struct cmd_bin *bin;
685 int i, j;
686
687 assert(scene);
688 while ((bin = lp_scene_bin_iter_next(scene, &i, &j))) {
689 if (!is_empty_bin( bin ))
690 rasterize_bin(task, bin, i, j);
691 }
692 }
693 }
694
695
696 #if LP_BUILD_FORMAT_CACHE_DEBUG
697 {
698 uint64_t total, miss;
699 total = task->thread_data.cache->cache_access_total;
700 miss = task->thread_data.cache->cache_access_miss;
701 if (total) {
702 debug_printf("thread %d cache access %llu miss %llu hit rate %f\n",
703 task->thread_index, (long long unsigned)total,
704 (long long unsigned)miss,
705 (float)(total - miss)/(float)total);
706 }
707 }
708 #endif
709
710 if (scene->fence) {
711 lp_fence_signal(scene->fence);
712 }
713
714 task->scene = NULL;
715 }
716
717
718 /**
719 * Called by setup module when it has something for us to render.
720 */
721 void
722 lp_rast_queue_scene( struct lp_rasterizer *rast,
723 struct lp_scene *scene)
724 {
725 LP_DBG(DEBUG_SETUP, "%s\n", __FUNCTION__);
726
727 if (rast->num_threads == 0) {
728 /* no threading */
729 unsigned fpstate = util_fpstate_get();
730
731 /* Make sure that denorms are treated like zeros. This is
732 * the behavior required by D3D10. OpenGL doesn't care.
733 */
734 util_fpstate_set_denorms_to_zero(fpstate);
735
736 lp_rast_begin( rast, scene );
737
738 rasterize_scene( &rast->tasks[0], scene );
739
740 lp_rast_end( rast );
741
742 util_fpstate_set(fpstate);
743
744 rast->curr_scene = NULL;
745 }
746 else {
747 /* threaded rendering! */
748 unsigned i;
749
750 lp_scene_enqueue( rast->full_scenes, scene );
751
752 /* signal the threads that there's work to do */
753 for (i = 0; i < rast->num_threads; i++) {
754 pipe_semaphore_signal(&rast->tasks[i].work_ready);
755 }
756 }
757
758 LP_DBG(DEBUG_SETUP, "%s done \n", __FUNCTION__);
759 }
760
761
762 void
763 lp_rast_finish( struct lp_rasterizer *rast )
764 {
765 if (rast->num_threads == 0) {
766 /* nothing to do */
767 }
768 else {
769 int i;
770
771 /* wait for work to complete */
772 for (i = 0; i < rast->num_threads; i++) {
773 pipe_semaphore_wait(&rast->tasks[i].work_done);
774 }
775 }
776 }
777
778
779 /**
780 * This is the thread's main entrypoint.
781 * It's a simple loop:
782 * 1. wait for work
783 * 2. do work
784 * 3. signal that we're done
785 */
786 static int
787 thread_function(void *init_data)
788 {
789 struct lp_rasterizer_task *task = (struct lp_rasterizer_task *) init_data;
790 struct lp_rasterizer *rast = task->rast;
791 boolean debug = false;
792 char thread_name[16];
793 unsigned fpstate;
794
795 snprintf(thread_name, sizeof thread_name, "llvmpipe-%u", task->thread_index);
796 u_thread_setname(thread_name);
797
798 /* Make sure that denorms are treated like zeros. This is
799 * the behavior required by D3D10. OpenGL doesn't care.
800 */
801 fpstate = util_fpstate_get();
802 util_fpstate_set_denorms_to_zero(fpstate);
803
804 while (1) {
805 /* wait for work */
806 if (debug)
807 debug_printf("thread %d waiting for work\n", task->thread_index);
808 pipe_semaphore_wait(&task->work_ready);
809
810 if (rast->exit_flag)
811 break;
812
813 if (task->thread_index == 0) {
814 /* thread[0]:
815 * - get next scene to rasterize
816 * - map the framebuffer surfaces
817 */
818 lp_rast_begin( rast,
819 lp_scene_dequeue( rast->full_scenes, TRUE ) );
820 }
821
822 /* Wait for all threads to get here so that threads[1+] don't
823 * get a null rast->curr_scene pointer.
824 */
825 util_barrier_wait( &rast->barrier );
826
827 /* do work */
828 if (debug)
829 debug_printf("thread %d doing work\n", task->thread_index);
830
831 rasterize_scene(task,
832 rast->curr_scene);
833
834 /* wait for all threads to finish with this scene */
835 util_barrier_wait( &rast->barrier );
836
837 /* XXX: shouldn't be necessary:
838 */
839 if (task->thread_index == 0) {
840 lp_rast_end( rast );
841 }
842
843 /* signal done with work */
844 if (debug)
845 debug_printf("thread %d done working\n", task->thread_index);
846
847 pipe_semaphore_signal(&task->work_done);
848 }
849
850 #ifdef _WIN32
851 pipe_semaphore_signal(&task->work_done);
852 #endif
853
854 return 0;
855 }
856
857
858 /**
859 * Initialize semaphores and spawn the threads.
860 */
861 static void
862 create_rast_threads(struct lp_rasterizer *rast)
863 {
864 unsigned i;
865
866 /* NOTE: if num_threads is zero, we won't use any threads */
867 for (i = 0; i < rast->num_threads; i++) {
868 pipe_semaphore_init(&rast->tasks[i].work_ready, 0);
869 pipe_semaphore_init(&rast->tasks[i].work_done, 0);
870 rast->threads[i] = u_thread_create(thread_function,
871 (void *) &rast->tasks[i]);
872 if (!rast->threads[i]) {
873 rast->num_threads = i; /* previous thread is max */
874 break;
875 }
876 }
877 }
878
879
880
881 /**
882 * Create new lp_rasterizer. If num_threads is zero, don't create any
883 * new threads, do rendering synchronously.
884 * \param num_threads number of rasterizer threads to create
885 */
886 struct lp_rasterizer *
887 lp_rast_create( unsigned num_threads )
888 {
889 struct lp_rasterizer *rast;
890 unsigned i;
891
892 rast = CALLOC_STRUCT(lp_rasterizer);
893 if (!rast) {
894 goto no_rast;
895 }
896
897 rast->full_scenes = lp_scene_queue_create();
898 if (!rast->full_scenes) {
899 goto no_full_scenes;
900 }
901
902 for (i = 0; i < MAX2(1, num_threads); i++) {
903 struct lp_rasterizer_task *task = &rast->tasks[i];
904 task->rast = rast;
905 task->thread_index = i;
906 task->thread_data.cache = align_malloc(sizeof(struct lp_build_format_cache),
907 16);
908 if (!task->thread_data.cache) {
909 goto no_thread_data_cache;
910 }
911 }
912
913 rast->num_threads = num_threads;
914
915 rast->no_rast = debug_get_bool_option("LP_NO_RAST", FALSE);
916
917 create_rast_threads(rast);
918
919 /* for synchronizing rasterization threads */
920 if (rast->num_threads > 0) {
921 util_barrier_init( &rast->barrier, rast->num_threads );
922 }
923
924 memset(lp_dummy_tile, 0, sizeof lp_dummy_tile);
925
926 return rast;
927
928 no_thread_data_cache:
929 for (i = 0; i < MAX2(1, rast->num_threads); i++) {
930 if (rast->tasks[i].thread_data.cache) {
931 align_free(rast->tasks[i].thread_data.cache);
932 }
933 }
934
935 lp_scene_queue_destroy(rast->full_scenes);
936 no_full_scenes:
937 FREE(rast);
938 no_rast:
939 return NULL;
940 }
941
942
943 /* Shutdown:
944 */
945 void lp_rast_destroy( struct lp_rasterizer *rast )
946 {
947 unsigned i;
948
949 /* Set exit_flag and signal each thread's work_ready semaphore.
950 * Each thread will be woken up, notice that the exit_flag is set and
951 * break out of its main loop. The thread will then exit.
952 */
953 rast->exit_flag = TRUE;
954 for (i = 0; i < rast->num_threads; i++) {
955 pipe_semaphore_signal(&rast->tasks[i].work_ready);
956 }
957
958 /* Wait for threads to terminate before cleaning up per-thread data.
959 * We don't actually call pipe_thread_wait to avoid dead lock on Windows
960 * per https://bugs.freedesktop.org/show_bug.cgi?id=76252 */
961 for (i = 0; i < rast->num_threads; i++) {
962 #ifdef _WIN32
963 pipe_semaphore_wait(&rast->tasks[i].work_done);
964 #else
965 thrd_join(rast->threads[i], NULL);
966 #endif
967 }
968
969 /* Clean up per-thread data */
970 for (i = 0; i < rast->num_threads; i++) {
971 pipe_semaphore_destroy(&rast->tasks[i].work_ready);
972 pipe_semaphore_destroy(&rast->tasks[i].work_done);
973 }
974 for (i = 0; i < MAX2(1, rast->num_threads); i++) {
975 align_free(rast->tasks[i].thread_data.cache);
976 }
977
978 /* for synchronizing rasterization threads */
979 if (rast->num_threads > 0) {
980 util_barrier_destroy( &rast->barrier );
981 }
982
983 lp_scene_queue_destroy(rast->full_scenes);
984
985 FREE(rast);
986 }
987
988