7083029e458a644c63ef7d5664e045a365fe3a13
1 /**************************************************************************
3 * Copyright 2009 VMware, Inc.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
28 #include "util/u_memory.h"
29 #include "util/u_math.h"
30 #include "util/u_cpu_detect.h"
32 #include "lp_bin_queue.h"
36 #include "lp_rast_priv.h"
37 #include "lp_tile_soa.h"
38 #include "lp_bld_debug.h"
44 * Called by rasterization threads to get the next chunk of work.
45 * We use a lock to make sure that all the threads get the same bins.
47 static struct lp_bins
*
48 get_next_full_bin( struct lp_rasterizer
*rast
)
50 pipe_mutex_lock( rast
->get_bin_mutex
);
51 if (!rast
->curr_bins
) {
52 /* this will wait until there's something in the queue */
53 rast
->curr_bins
= lp_bins_dequeue( rast
->full_bins
);
54 rast
->release_count
= 0;
56 lp_bin_iter_begin( rast
->curr_bins
);
58 pipe_mutex_unlock( rast
->get_bin_mutex
);
59 return rast
->curr_bins
;
64 * Called by rasterization threads after they've finished with
65 * the current bin. When all threads have called this, we reset
66 * the bin and put it into the 'empty bins' queue.
69 release_current_bin( struct lp_rasterizer
*rast
)
71 pipe_mutex_lock( rast
->get_bin_mutex
);
72 rast
->release_count
++;
73 if (rast
->release_count
== rast
->num_threads
) {
74 assert(rast
->curr_bins
);
75 lp_reset_bins( rast
->curr_bins
);
76 lp_bins_enqueue( rast
->empty_bins
, rast
->curr_bins
);
77 rast
->curr_bins
= NULL
;
79 pipe_mutex_unlock( rast
->get_bin_mutex
);
85 * Begin the rasterization phase.
86 * Map the framebuffer surfaces. Initialize the 'rast' state.
89 lp_rast_begin( struct lp_rasterizer
*rast
,
90 struct pipe_surface
*cbuf
,
91 struct pipe_surface
*zsbuf
,
93 boolean write_zstencil
,
97 struct pipe_screen
*screen
= rast
->screen
;
99 LP_DBG(DEBUG_RAST
, "%s %dx%d\n", __FUNCTION__
, width
, height
);
101 pipe_surface_reference(&rast
->state
.cbuf
, cbuf
);
102 pipe_surface_reference(&rast
->state
.zsbuf
, zsbuf
);
105 rast
->height
= height
;
106 rast
->state
.write_zstencil
= write_zstencil
;
107 rast
->state
.write_color
= write_color
;
109 rast
->check_for_clipped_tiles
= (width
% TILE_SIZE
!= 0 ||
110 height
% TILE_SIZE
!= 0);
113 rast
->cbuf_transfer
= screen
->get_tex_transfer(rast
->screen
,
118 PIPE_TRANSFER_READ_WRITE
,
119 0, 0, width
, height
);
120 if (!rast
->cbuf_transfer
)
123 rast
->cbuf_map
= screen
->transfer_map(rast
->screen
,
124 rast
->cbuf_transfer
);
130 rast
->zsbuf_transfer
= screen
->get_tex_transfer(rast
->screen
,
135 PIPE_TRANSFER_READ_WRITE
,
136 0, 0, width
, height
);
137 if (!rast
->zsbuf_transfer
)
140 rast
->zsbuf_map
= screen
->transfer_map(rast
->screen
,
141 rast
->zsbuf_transfer
);
142 if (!rast
->zsbuf_map
)
151 * Finish the rasterization phase.
152 * Unmap framebuffer surfaces.
155 lp_rast_end( struct lp_rasterizer
*rast
)
157 struct pipe_screen
*screen
= rast
->screen
;
160 screen
->transfer_unmap(screen
, rast
->cbuf_transfer
);
163 screen
->transfer_unmap(screen
, rast
->zsbuf_transfer
);
165 if (rast
->cbuf_transfer
)
166 screen
->tex_transfer_destroy(rast
->cbuf_transfer
);
168 if (rast
->zsbuf_transfer
)
169 screen
->tex_transfer_destroy(rast
->zsbuf_transfer
);
171 rast
->cbuf_transfer
= NULL
;
172 rast
->zsbuf_transfer
= NULL
;
173 rast
->cbuf_map
= NULL
;
174 rast
->zsbuf_map
= NULL
;
179 * Begining rasterization of a tile.
180 * \param x window X position of the tile, in pixels
181 * \param y window Y position of the tile, in pixels
184 lp_rast_start_tile( struct lp_rasterizer
*rast
,
185 unsigned thread_index
,
186 unsigned x
, unsigned y
)
188 LP_DBG(DEBUG_RAST
, "%s %d,%d\n", __FUNCTION__
, x
, y
);
190 rast
->tasks
[thread_index
].x
= x
;
191 rast
->tasks
[thread_index
].y
= y
;
196 * Clear the rasterizer's current color tile.
197 * This is a bin command called during bin processing.
199 void lp_rast_clear_color( struct lp_rasterizer
*rast
,
200 unsigned thread_index
,
201 const union lp_rast_cmd_arg arg
)
203 const uint8_t *clear_color
= arg
.clear_color
;
204 uint8_t *color_tile
= rast
->tasks
[thread_index
].tile
.color
;
206 LP_DBG(DEBUG_RAST
, "%s 0x%x,0x%x,0x%x,0x%x\n", __FUNCTION__
,
212 if (clear_color
[0] == clear_color
[1] &&
213 clear_color
[1] == clear_color
[2] &&
214 clear_color
[2] == clear_color
[3]) {
215 memset(color_tile
, clear_color
[0], TILE_SIZE
* TILE_SIZE
* 4);
219 for (y
= 0; y
< TILE_SIZE
; y
++)
220 for (x
= 0; x
< TILE_SIZE
; x
++)
221 for (chan
= 0; chan
< 4; ++chan
)
222 TILE_PIXEL(color_tile
, x
, y
, chan
) = clear_color
[chan
];
228 * Clear the rasterizer's current z/stencil tile.
229 * This is a bin command called during bin processing.
231 void lp_rast_clear_zstencil( struct lp_rasterizer
*rast
,
232 unsigned thread_index
,
233 const union lp_rast_cmd_arg arg
)
236 uint32_t *depth_tile
= rast
->tasks
[thread_index
].tile
.depth
;
238 LP_DBG(DEBUG_RAST
, "%s 0x%x\n", __FUNCTION__
, arg
.clear_zstencil
);
240 for (i
= 0; i
< TILE_SIZE
; i
++)
241 for (j
= 0; j
< TILE_SIZE
; j
++)
242 depth_tile
[i
*TILE_SIZE
+ j
] = arg
.clear_zstencil
;
247 * Load tile color from the framebuffer surface.
248 * This is a bin command called during bin processing.
250 void lp_rast_load_color( struct lp_rasterizer
*rast
,
251 unsigned thread_index
,
252 const union lp_rast_cmd_arg arg
)
254 LP_DBG(DEBUG_RAST
, "%s\n", __FUNCTION__
);
256 /* call u_tile func to load colors from surface */
261 * Load tile z/stencil from the framebuffer surface.
262 * This is a bin command called during bin processing.
264 void lp_rast_load_zstencil( struct lp_rasterizer
*rast
,
265 unsigned thread_index
,
266 const union lp_rast_cmd_arg arg
)
268 LP_DBG(DEBUG_RAST
, "%s\n", __FUNCTION__
);
270 /* call u_tile func to load depth (and stencil?) from surface */
274 void lp_rast_set_state( struct lp_rasterizer
*rast
,
275 unsigned thread_index
,
276 const union lp_rast_cmd_arg arg
)
278 const struct lp_rast_state
*state
= arg
.set_state
;
280 LP_DBG(DEBUG_RAST
, "%s %p\n", __FUNCTION__
, (void *) state
);
282 /* just set the current state pointer for this rasterizer */
283 rast
->tasks
[thread_index
].current_state
= state
;
292 * Run the shader on all blocks in a tile. This is used when a tile is
293 * completely contained inside a triangle.
294 * This is a bin command called during bin processing.
296 void lp_rast_shade_tile( struct lp_rasterizer
*rast
,
297 unsigned thread_index
,
298 const union lp_rast_cmd_arg arg
)
300 const struct lp_rast_shader_inputs
*inputs
= arg
.shade_tile
;
301 const unsigned tile_x
= rast
->tasks
[thread_index
].x
;
302 const unsigned tile_y
= rast
->tasks
[thread_index
].y
;
303 const unsigned mask
= ~0;
306 LP_DBG(DEBUG_RAST
, "%s\n", __FUNCTION__
);
308 /* Use the existing preference for 4x4 (four quads) shading:
310 for (y
= 0; y
< TILE_SIZE
; y
+= 4)
311 for (x
= 0; x
< TILE_SIZE
; x
+= 4)
312 lp_rast_shade_quads( rast
,
322 * Compute shading for a 4x4 block of pixels.
323 * This is a bin command called during bin processing.
325 void lp_rast_shade_quads( struct lp_rasterizer
*rast
,
326 unsigned thread_index
,
327 const struct lp_rast_shader_inputs
*inputs
,
328 unsigned x
, unsigned y
,
332 const struct lp_rast_state
*state
= rast
->tasks
[thread_index
].current_state
;
333 struct lp_rast_tile
*tile
= &rast
->tasks
[thread_index
].tile
;
336 uint32_t ALIGN16_ATTRIB masks
[2][2][2][2];
343 assert(x
% TILE_VECTOR_WIDTH
== 0);
344 assert(y
% TILE_VECTOR_HEIGHT
== 0);
346 /* mask: the rasterizer wants to treat pixels in 4x4 blocks, but
347 * the pixel shader wants to swizzle them into 4 2x2 quads.
349 * Additionally, the pixel shader wants masks as full dword ~0,
350 * while the rasterizer wants to pack per-pixel bits tightly.
354 for (qy
= 0; qy
< 2; ++qy
)
355 for (qx
= 0; qx
< 2; ++qx
)
356 for (iy
= 0; iy
< 2; ++iy
)
357 for (ix
= 0; ix
< 2; ++ix
)
358 masks
[qy
][qx
][iy
][ix
] = mask
& (1 << (qy
*8+iy
*4+qx
*2+ix
)) ? ~0 : 0;
360 masks
[0][0][0][0] = mask
& (1 << (0*8+0*4+0*2+0)) ? ~0 : 0;
361 masks
[0][0][0][1] = mask
& (1 << (0*8+0*4+0*2+1)) ? ~0 : 0;
362 masks
[0][0][1][0] = mask
& (1 << (0*8+1*4+0*2+0)) ? ~0 : 0;
363 masks
[0][0][1][1] = mask
& (1 << (0*8+1*4+0*2+1)) ? ~0 : 0;
364 masks
[0][1][0][0] = mask
& (1 << (0*8+0*4+1*2+0)) ? ~0 : 0;
365 masks
[0][1][0][1] = mask
& (1 << (0*8+0*4+1*2+1)) ? ~0 : 0;
366 masks
[0][1][1][0] = mask
& (1 << (0*8+1*4+1*2+0)) ? ~0 : 0;
367 masks
[0][1][1][1] = mask
& (1 << (0*8+1*4+1*2+1)) ? ~0 : 0;
369 masks
[1][0][0][0] = mask
& (1 << (1*8+0*4+0*2+0)) ? ~0 : 0;
370 masks
[1][0][0][1] = mask
& (1 << (1*8+0*4+0*2+1)) ? ~0 : 0;
371 masks
[1][0][1][0] = mask
& (1 << (1*8+1*4+0*2+0)) ? ~0 : 0;
372 masks
[1][0][1][1] = mask
& (1 << (1*8+1*4+0*2+1)) ? ~0 : 0;
373 masks
[1][1][0][0] = mask
& (1 << (1*8+0*4+1*2+0)) ? ~0 : 0;
374 masks
[1][1][0][1] = mask
& (1 << (1*8+0*4+1*2+1)) ? ~0 : 0;
375 masks
[1][1][1][0] = mask
& (1 << (1*8+1*4+1*2+0)) ? ~0 : 0;
376 masks
[1][1][1][1] = mask
& (1 << (1*8+1*4+1*2+1)) ? ~0 : 0;
379 assert((x
% 2) == 0);
380 assert((y
% 2) == 0);
385 /* offset of the 16x16 pixel block within the tile */
386 block_offset
= ((iy
/4)*(16*16) + (ix
/4)*16);
389 color
= tile
->color
+ 4 * block_offset
;
392 depth
= tile
->depth
+ block_offset
;
394 /* XXX: This will most likely fail on 32bit x86 without -mstackrealign */
395 assert(lp_check_alignment(masks
, 16));
397 assert(lp_check_alignment(depth
, 16));
398 assert(lp_check_alignment(color
, 16));
399 assert(lp_check_alignment(state
->jit_context
.blend_color
, 16));
402 state
->jit_function( &state
->jit_context
,
411 struct lp_rast_tile
*tile
= &rast
->tile
;
419 for (q
= 0; q
< 4; ++q
)
420 for(iy
= 0; iy
< 2; ++iy
)
421 for(ix
= 0; ix
< 2; ++ix
)
422 if(masks
[q
] & (1 << (iy
*2 + ix
)))
423 for (chan_index
= 0; chan_index
< NUM_CHANNELS
; ++chan_index
)
424 TILE_PIXEL(tile
->color
, x
+ q
*2 + ix
, y
+ iy
, chan_index
) = 0xff;
435 * Write the rasterizer's color tile to the framebuffer.
437 static void lp_rast_store_color( struct lp_rasterizer
*rast
,
438 unsigned thread_index
)
440 const unsigned x
= rast
->tasks
[thread_index
].x
;
441 const unsigned y
= rast
->tasks
[thread_index
].y
;
445 if (x
+ w
> rast
->width
)
446 w
-= x
+ w
- rast
->width
;
448 if (y
+ h
> rast
->height
)
449 h
-= y
+ h
- rast
->height
;
453 assert(w
<= TILE_SIZE
);
454 assert(h
<= TILE_SIZE
);
456 LP_DBG(DEBUG_RAST
, "%s [%u] %d,%d %dx%d\n", __FUNCTION__
,
457 thread_index
, x
, y
, w
, h
);
459 lp_tile_write_4ub(rast
->cbuf_transfer
->format
,
460 rast
->tasks
[thread_index
].tile
.color
,
462 rast
->cbuf_transfer
->stride
,
469 lp_tile_write_z32(const uint32_t *src
, uint8_t *dst
, unsigned dst_stride
,
470 unsigned x0
, unsigned y0
, unsigned w
, unsigned h
)
473 uint8_t *dst_row
= dst
+ y0
*dst_stride
;
474 for (y
= 0; y
< h
; ++y
) {
475 uint32_t *dst_pixel
= (uint32_t *)(dst_row
+ x0
*4);
476 for (x
= 0; x
< w
; ++x
) {
477 *dst_pixel
++ = *src
++;
479 dst_row
+= dst_stride
;
484 * Write the rasterizer's z/stencil tile to the framebuffer.
486 static void lp_rast_store_zstencil( struct lp_rasterizer
*rast
,
487 unsigned thread_index
)
489 const unsigned x
= rast
->tasks
[thread_index
].x
;
490 const unsigned y
= rast
->tasks
[thread_index
].y
;
491 unsigned w
= TILE_SIZE
;
492 unsigned h
= TILE_SIZE
;
494 if (x
+ w
> rast
->width
)
495 w
-= x
+ w
- rast
->width
;
497 if (y
+ h
> rast
->height
)
498 h
-= y
+ h
- rast
->height
;
500 LP_DBG(DEBUG_RAST
, "%s %d,%d %dx%d\n", __FUNCTION__
, x
, y
, w
, h
);
502 assert(rast
->zsbuf_transfer
->format
== PIPE_FORMAT_Z32_UNORM
);
503 lp_tile_write_z32(rast
->tasks
[thread_index
].tile
.depth
,
505 rast
->zsbuf_transfer
->stride
,
511 * Write the rasterizer's tiles to the framebuffer.
514 lp_rast_end_tile( struct lp_rasterizer
*rast
,
515 unsigned thread_index
)
517 LP_DBG(DEBUG_RAST
, "%s\n", __FUNCTION__
);
519 if (rast
->state
.write_color
)
520 lp_rast_store_color(rast
, thread_index
);
522 if (rast
->state
.write_zstencil
)
523 lp_rast_store_zstencil(rast
, thread_index
);
528 * Rasterize commands for a single bin.
529 * \param x, y position of the bin's tile in the framebuffer
530 * Must be called between lp_rast_begin() and lp_rast_end().
534 rasterize_bin( struct lp_rasterizer
*rast
,
535 unsigned thread_index
,
536 const struct cmd_bin
*bin
,
539 const struct cmd_block_list
*commands
= &bin
->commands
;
540 struct cmd_block
*block
;
543 lp_rast_start_tile( rast
, thread_index
, x
, y
);
545 /* simply execute each of the commands in the block list */
546 for (block
= commands
->head
; block
; block
= block
->next
) {
547 for (k
= 0; k
< block
->count
; k
++) {
548 block
->cmd
[k
]( rast
, thread_index
, block
->arg
[k
] );
552 lp_rast_end_tile( rast
, thread_index
);
557 * Rasterize/execute all bins.
561 rasterize_bins( struct lp_rasterizer
*rast
,
562 unsigned thread_index
,
563 struct lp_bins
*bins
,
566 /* loop over tile bins, rasterize each */
570 for (i
= 0; i
< bins
->tiles_x
; i
++) {
571 for (j
= 0; j
< bins
->tiles_y
; j
++) {
572 struct cmd_bin
*bin
= lp_get_bin(bins
, i
, j
);
573 rasterize_bin( rast
, thread_index
,
574 bin
, i
* TILE_SIZE
, j
* TILE_SIZE
);
584 while ((bin
= lp_bin_iter_next(bins
, &x
, &y
))) {
585 rasterize_bin( rast
, thread_index
, bin
, x
* TILE_SIZE
, y
* TILE_SIZE
);
593 * Called by setup module when it has something for us to render.
596 lp_rasterize_bins( struct lp_rasterizer
*rast
,
597 struct lp_bins
*bins
,
598 const struct pipe_framebuffer_state
*fb
,
601 boolean debug
= false;
603 LP_DBG(DEBUG_SETUP
, "%s\n", __FUNCTION__
);
607 printf("rasterize bins:\n");
608 printf(" data size: %u\n", lp_bin_data_size(bins
));
609 for (y
= 0; y
< bins
->tiles_y
; y
++) {
610 for (x
= 0; x
< bins
->tiles_x
; x
++) {
611 printf(" bin %u, %u size: %u\n", x
, y
,
612 lp_bin_cmd_size(bins
, x
, y
));
620 fb
->cbufs
[0] != NULL
,
621 fb
->zsbuf
!= NULL
&& write_depth
,
625 if (rast
->num_threads
== 0) {
627 lp_bin_iter_begin( bins
);
628 rasterize_bins( rast
, 0, bins
, write_depth
);
630 /* reset bins and put into the empty queue */
631 lp_reset_bins( bins
);
632 lp_bins_enqueue( rast
->empty_bins
, bins
);
635 /* threaded rendering! */
638 lp_bins_enqueue( rast
->full_bins
, bins
);
640 /* XXX need to move/fix these */
642 rast
->write_depth
= write_depth
;
644 /*lp_bin_iter_begin( bins );*/
646 /* signal the threads that there's work to do */
647 for (i
= 0; i
< rast
->num_threads
; i
++) {
648 pipe_semaphore_signal(&rast
->tasks
[i
].work_ready
);
651 /* wait for work to complete */
652 for (i
= 0; i
< rast
->num_threads
; i
++) {
653 pipe_semaphore_wait(&rast
->tasks
[i
].work_done
);
659 LP_DBG(DEBUG_SETUP
, "%s done \n", __FUNCTION__
);
664 * This is the thread's main entrypoint.
665 * It's a simple loop:
668 * 3. signal that we're done
671 thread_func( void *init_data
)
673 struct lp_rasterizer_task
*task
= (struct lp_rasterizer_task
*) init_data
;
674 struct lp_rasterizer
*rast
= task
->rast
;
675 boolean debug
= false;
678 struct lp_bins
*bins
;
682 debug_printf("thread %d waiting for work\n", task
->thread_index
);
683 pipe_semaphore_wait(&task
->work_ready
);
685 bins
= get_next_full_bin( rast
);
690 debug_printf("thread %d doing work\n", task
->thread_index
);
691 rasterize_bins(rast
, task
->thread_index
,
692 bins
, rast
->write_depth
);
694 release_current_bin( rast
);
696 /* signal done with work */
698 debug_printf("thread %d done working\n", task
->thread_index
);
699 pipe_semaphore_signal(&task
->work_done
);
707 * Initialize semaphores and spawn the threads.
710 create_rast_threads(struct lp_rasterizer
*rast
)
714 rast
->num_threads
= util_cpu_caps
.nr_cpus
;
715 rast
->num_threads
= debug_get_num_option("LP_NUM_THREADS", rast
->num_threads
);
716 rast
->num_threads
= MIN2(rast
->num_threads
, MAX_THREADS
);
718 /* NOTE: if num_threads is zero, we won't use any threads */
719 for (i
= 0; i
< rast
->num_threads
; i
++) {
720 pipe_semaphore_init(&rast
->tasks
[i
].work_ready
, 0);
721 pipe_semaphore_init(&rast
->tasks
[i
].work_done
, 0);
722 rast
->threads
[i
] = pipe_thread_create(thread_func
,
723 (void *) &rast
->tasks
[i
]);
730 * Create new lp_rasterizer.
731 * \param empty the queue to put empty bins on after we've finished
734 struct lp_rasterizer
*
735 lp_rast_create( struct pipe_screen
*screen
, struct lp_bins_queue
*empty
)
737 struct lp_rasterizer
*rast
;
740 rast
= CALLOC_STRUCT(lp_rasterizer
);
744 rast
->screen
= screen
;
746 rast
->empty_bins
= empty
;
747 rast
->full_bins
= lp_bins_queue_create();
749 for (i
= 0; i
< Elements(rast
->tasks
); i
++) {
750 rast
->tasks
[i
].tile
.color
= align_malloc( TILE_SIZE
*TILE_SIZE
*4, 16 );
751 rast
->tasks
[i
].tile
.depth
= align_malloc( TILE_SIZE
*TILE_SIZE
*4, 16 );
752 rast
->tasks
[i
].rast
= rast
;
753 rast
->tasks
[i
].thread_index
= i
;
756 create_rast_threads(rast
);
764 void lp_rast_destroy( struct lp_rasterizer
*rast
)
768 pipe_surface_reference(&rast
->state
.cbuf
, NULL
);
769 pipe_surface_reference(&rast
->state
.zsbuf
, NULL
);
771 for (i
= 0; i
< Elements(rast
->tasks
); i
++) {
772 align_free(rast
->tasks
[i
].tile
.depth
);
773 align_free(rast
->tasks
[i
].tile
.color
);