1 /**************************************************************************
3 * Copyright 2007-2009 VMware, Inc.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
29 * Rasterization for binned triangles within a tile
33 #include "util/u_math.h"
36 #include "lp_rast_priv.h"
37 #include "lp_tile_soa.h"
43 * Shade all pixels in a 4x4 block.
46 block_full_4(struct lp_rasterizer_task
*task
,
47 const struct lp_rast_triangle
*tri
,
50 lp_rast_shade_quads_all(task
, &tri
->inputs
, x
, y
);
55 * Shade all pixels in a 16x16 block.
58 block_full_16(struct lp_rasterizer_task
*task
,
59 const struct lp_rast_triangle
*tri
,
65 for (iy
= 0; iy
< 16; iy
+= 4)
66 for (ix
= 0; ix
< 16; ix
+= 4)
67 block_full_4(task
, tri
, x
+ ix
, y
+ iy
);
70 #if !defined(PIPE_ARCH_SSE)
72 static INLINE
unsigned
73 build_mask_linear(int c
, int dcdx
, int dcdy
)
82 mask
|= ((c0
+ 0 * dcdx
) >> 31) & (1 << 0);
83 mask
|= ((c0
+ 1 * dcdx
) >> 31) & (1 << 1);
84 mask
|= ((c0
+ 2 * dcdx
) >> 31) & (1 << 2);
85 mask
|= ((c0
+ 3 * dcdx
) >> 31) & (1 << 3);
86 mask
|= ((c1
+ 0 * dcdx
) >> 31) & (1 << 4);
87 mask
|= ((c1
+ 1 * dcdx
) >> 31) & (1 << 5);
88 mask
|= ((c1
+ 2 * dcdx
) >> 31) & (1 << 6);
89 mask
|= ((c1
+ 3 * dcdx
) >> 31) & (1 << 7);
90 mask
|= ((c2
+ 0 * dcdx
) >> 31) & (1 << 8);
91 mask
|= ((c2
+ 1 * dcdx
) >> 31) & (1 << 9);
92 mask
|= ((c2
+ 2 * dcdx
) >> 31) & (1 << 10);
93 mask
|= ((c2
+ 3 * dcdx
) >> 31) & (1 << 11);
94 mask
|= ((c3
+ 0 * dcdx
) >> 31) & (1 << 12);
95 mask
|= ((c3
+ 1 * dcdx
) >> 31) & (1 << 13);
96 mask
|= ((c3
+ 2 * dcdx
) >> 31) & (1 << 14);
97 mask
|= ((c3
+ 3 * dcdx
) >> 31) & (1 << 15);
111 *outmask
|= build_mask_linear(c
, dcdx
, dcdy
);
112 *partmask
|= build_mask_linear(c
+ cdiff
, dcdx
, dcdy
);
116 lp_rast_triangle_3_16(struct lp_rasterizer_task
*task
,
117 const union lp_rast_cmd_arg arg
)
119 union lp_rast_cmd_arg arg2
;
120 arg2
.triangle
.tri
= arg
.triangle
.tri
;
121 arg2
.triangle
.plane_mask
= (1<<3)-1;
122 lp_rast_triangle_3(task
, arg2
);
126 lp_rast_triangle_4_16(struct lp_rasterizer_task
*task
,
127 const union lp_rast_cmd_arg arg
)
129 union lp_rast_cmd_arg arg2
;
130 arg2
.triangle
.tri
= arg
.triangle
.tri
;
131 arg2
.triangle
.plane_mask
= (1<<4)-1;
132 lp_rast_triangle_3(task
, arg2
);
136 lp_rast_triangle_3_4(struct lp_rasterizer_task
*task
,
137 const union lp_rast_cmd_arg arg
)
139 lp_rast_triangle_3_16(task
, arg
);
143 #include <emmintrin.h>
144 #include "util/u_sse.h"
155 __m128i cstep0
= _mm_setr_epi32(c
, c
+dcdx
, c
+dcdx
*2, c
+dcdx
*3);
156 __m128i xdcdy
= _mm_set1_epi32(dcdy
);
158 /* Get values across the quad
160 __m128i cstep1
= _mm_add_epi32(cstep0
, xdcdy
);
161 __m128i cstep2
= _mm_add_epi32(cstep1
, xdcdy
);
162 __m128i cstep3
= _mm_add_epi32(cstep2
, xdcdy
);
165 __m128i cstep01
, cstep23
, result
;
167 cstep01
= _mm_packs_epi32(cstep0
, cstep1
);
168 cstep23
= _mm_packs_epi32(cstep2
, cstep3
);
169 result
= _mm_packs_epi16(cstep01
, cstep23
);
171 *outmask
|= _mm_movemask_epi8(result
);
176 __m128i cio4
= _mm_set1_epi32(cdiff
);
177 __m128i cstep01
, cstep23
, result
;
179 cstep0
= _mm_add_epi32(cstep0
, cio4
);
180 cstep1
= _mm_add_epi32(cstep1
, cio4
);
181 cstep2
= _mm_add_epi32(cstep2
, cio4
);
182 cstep3
= _mm_add_epi32(cstep3
, cio4
);
184 cstep01
= _mm_packs_epi32(cstep0
, cstep1
);
185 cstep23
= _mm_packs_epi32(cstep2
, cstep3
);
186 result
= _mm_packs_epi16(cstep01
, cstep23
);
188 *partmask
|= _mm_movemask_epi8(result
);
193 static INLINE
unsigned
194 build_mask_linear(int c
, int dcdx
, int dcdy
)
196 __m128i cstep0
= _mm_setr_epi32(c
, c
+dcdx
, c
+dcdx
*2, c
+dcdx
*3);
197 __m128i xdcdy
= _mm_set1_epi32(dcdy
);
199 /* Get values across the quad
201 __m128i cstep1
= _mm_add_epi32(cstep0
, xdcdy
);
202 __m128i cstep2
= _mm_add_epi32(cstep1
, xdcdy
);
203 __m128i cstep3
= _mm_add_epi32(cstep2
, xdcdy
);
205 /* pack pairs of results into epi16
207 __m128i cstep01
= _mm_packs_epi32(cstep0
, cstep1
);
208 __m128i cstep23
= _mm_packs_epi32(cstep2
, cstep3
);
210 /* pack into epi8, preserving sign bits
212 __m128i result
= _mm_packs_epi16(cstep01
, cstep23
);
214 /* extract sign bits to create mask
216 return _mm_movemask_epi8(result
);
219 static INLINE
unsigned
220 sign_bits4(const __m128i
*cstep
, int cdiff
)
223 /* Adjust the step values
225 __m128i cio4
= _mm_set1_epi32(cdiff
);
226 __m128i cstep0
= _mm_add_epi32(cstep
[0], cio4
);
227 __m128i cstep1
= _mm_add_epi32(cstep
[1], cio4
);
228 __m128i cstep2
= _mm_add_epi32(cstep
[2], cio4
);
229 __m128i cstep3
= _mm_add_epi32(cstep
[3], cio4
);
233 __m128i cstep01
= _mm_packs_epi32(cstep0
, cstep1
);
234 __m128i cstep23
= _mm_packs_epi32(cstep2
, cstep3
);
235 __m128i result
= _mm_packs_epi16(cstep01
, cstep23
);
237 /* Extract the sign bits
239 return _mm_movemask_epi8(result
);
252 lp_rast_triangle_3_16(struct lp_rasterizer_task
*task
,
253 const union lp_rast_cmd_arg arg
)
255 const struct lp_rast_triangle
*tri
= arg
.triangle
.tri
;
256 const struct lp_rast_plane
*plane
= GET_PLANES(tri
);
257 int x
= (arg
.triangle
.plane_mask
& 0xff) + task
->x
;
258 int y
= (arg
.triangle
.plane_mask
>> 8) + task
->y
;
261 struct { unsigned mask
:16; unsigned i
:8; unsigned j
:8; } out
[16];
264 __m128i p0
= _mm_load_si128((__m128i
*)&plane
[0]); /* c, dcdx, dcdy, eo */
265 __m128i p1
= _mm_load_si128((__m128i
*)&plane
[1]); /* c, dcdx, dcdy, eo */
266 __m128i p2
= _mm_load_si128((__m128i
*)&plane
[2]); /* c, dcdx, dcdy, eo */
267 __m128i zero
= _mm_setzero_si128();
277 __m128i span_0
; /* 0,dcdx,2dcdx,3dcdx for plane 0 */
278 __m128i span_1
; /* 0,dcdx,2dcdx,3dcdx for plane 1 */
279 __m128i span_2
; /* 0,dcdx,2dcdx,3dcdx for plane 2 */
282 transpose4_epi32(&p0
, &p1
, &p2
, &zero
,
283 &c
, &dcdx
, &dcdy
, &rej4
);
287 dcdx
= _mm_sub_epi32(zero
, dcdx
);
289 c
= _mm_add_epi32(c
, mm_mullo_epi32(dcdx
, _mm_set1_epi32(x
)));
290 c
= _mm_add_epi32(c
, mm_mullo_epi32(dcdy
, _mm_set1_epi32(y
)));
291 rej4
= _mm_slli_epi32(rej4
, 2);
293 dcdx2
= _mm_add_epi32(dcdx
, dcdx
);
294 dcdx3
= _mm_add_epi32(dcdx2
, dcdx
);
296 transpose4_epi32(&zero
, &dcdx
, &dcdx2
, &dcdx3
,
297 &span_0
, &span_1
, &span_2
, &unused
);
299 for (i
= 0; i
< 4; i
++) {
302 for (j
= 0; j
< 4; j
++) {
303 __m128i c4rej
= _mm_add_epi32(cx
, rej4
);
304 __m128i rej_masks
= _mm_srai_epi32(c4rej
, 31);
306 /* if (is_zero(rej_masks)) */
307 if (_mm_movemask_epi8(rej_masks
) == 0) {
308 __m128i c0_0
= _mm_add_epi32(SCALAR_EPI32(cx
, 0), span_0
);
309 __m128i c1_0
= _mm_add_epi32(SCALAR_EPI32(cx
, 1), span_1
);
310 __m128i c2_0
= _mm_add_epi32(SCALAR_EPI32(cx
, 2), span_2
);
312 __m128i c_0
= _mm_or_si128(_mm_or_si128(c0_0
, c1_0
), c2_0
);
314 __m128i c0_1
= _mm_add_epi32(c0_0
, SCALAR_EPI32(dcdy
, 0));
315 __m128i c1_1
= _mm_add_epi32(c1_0
, SCALAR_EPI32(dcdy
, 1));
316 __m128i c2_1
= _mm_add_epi32(c2_0
, SCALAR_EPI32(dcdy
, 2));
318 __m128i c_1
= _mm_or_si128(_mm_or_si128(c0_1
, c1_1
), c2_1
);
319 __m128i c_01
= _mm_packs_epi32(c_0
, c_1
);
321 __m128i c0_2
= _mm_add_epi32(c0_1
, SCALAR_EPI32(dcdy
, 0));
322 __m128i c1_2
= _mm_add_epi32(c1_1
, SCALAR_EPI32(dcdy
, 1));
323 __m128i c2_2
= _mm_add_epi32(c2_1
, SCALAR_EPI32(dcdy
, 2));
325 __m128i c_2
= _mm_or_si128(_mm_or_si128(c0_2
, c1_2
), c2_2
);
327 __m128i c0_3
= _mm_add_epi32(c0_2
, SCALAR_EPI32(dcdy
, 0));
328 __m128i c1_3
= _mm_add_epi32(c1_2
, SCALAR_EPI32(dcdy
, 1));
329 __m128i c2_3
= _mm_add_epi32(c2_2
, SCALAR_EPI32(dcdy
, 2));
331 __m128i c_3
= _mm_or_si128(_mm_or_si128(c0_3
, c1_3
), c2_3
);
332 __m128i c_23
= _mm_packs_epi32(c_2
, c_3
);
333 __m128i c_0123
= _mm_packs_epi16(c_01
, c_23
);
335 unsigned mask
= _mm_movemask_epi8(c_0123
);
343 cx
= _mm_add_epi32(cx
, _mm_slli_epi32(dcdx
, 2));
346 c
= _mm_add_epi32(c
, _mm_slli_epi32(dcdy
, 2));
349 for (i
= 0; i
< nr
; i
++)
350 lp_rast_shade_quads_mask(task
,
354 0xffff & ~out
[i
].mask
);
362 lp_rast_triangle_3_4(struct lp_rasterizer_task
*task
,
363 const union lp_rast_cmd_arg arg
)
365 const struct lp_rast_triangle
*tri
= arg
.triangle
.tri
;
366 const struct lp_rast_plane
*plane
= GET_PLANES(tri
);
367 int x
= (arg
.triangle
.plane_mask
& 0xff) + task
->x
;
368 int y
= (arg
.triangle
.plane_mask
>> 8) + task
->y
;
370 __m128i p0
= _mm_load_si128((__m128i
*)&plane
[0]); /* c, dcdx, dcdy, eo */
371 __m128i p1
= _mm_load_si128((__m128i
*)&plane
[1]); /* c, dcdx, dcdy, eo */
372 __m128i p2
= _mm_load_si128((__m128i
*)&plane
[2]); /* c, dcdx, dcdy, eo */
373 __m128i zero
= _mm_setzero_si128();
382 __m128i span_0
; /* 0,dcdx,2dcdx,3dcdx for plane 0 */
383 __m128i span_1
; /* 0,dcdx,2dcdx,3dcdx for plane 1 */
384 __m128i span_2
; /* 0,dcdx,2dcdx,3dcdx for plane 2 */
387 transpose4_epi32(&p0
, &p1
, &p2
, &zero
,
388 &c
, &dcdx
, &dcdy
, &unused
);
392 dcdx
= _mm_sub_epi32(zero
, dcdx
);
394 c
= _mm_add_epi32(c
, mm_mullo_epi32(dcdx
, _mm_set1_epi32(x
)));
395 c
= _mm_add_epi32(c
, mm_mullo_epi32(dcdy
, _mm_set1_epi32(y
)));
397 dcdx2
= _mm_add_epi32(dcdx
, dcdx
);
398 dcdx3
= _mm_add_epi32(dcdx2
, dcdx
);
400 transpose4_epi32(&zero
, &dcdx
, &dcdx2
, &dcdx3
,
401 &span_0
, &span_1
, &span_2
, &unused
);
405 __m128i c0_0
= _mm_add_epi32(SCALAR_EPI32(c
, 0), span_0
);
406 __m128i c1_0
= _mm_add_epi32(SCALAR_EPI32(c
, 1), span_1
);
407 __m128i c2_0
= _mm_add_epi32(SCALAR_EPI32(c
, 2), span_2
);
409 __m128i c_0
= _mm_or_si128(_mm_or_si128(c0_0
, c1_0
), c2_0
);
411 __m128i c0_1
= _mm_add_epi32(c0_0
, SCALAR_EPI32(dcdy
, 0));
412 __m128i c1_1
= _mm_add_epi32(c1_0
, SCALAR_EPI32(dcdy
, 1));
413 __m128i c2_1
= _mm_add_epi32(c2_0
, SCALAR_EPI32(dcdy
, 2));
415 __m128i c_1
= _mm_or_si128(_mm_or_si128(c0_1
, c1_1
), c2_1
);
416 __m128i c_01
= _mm_packs_epi32(c_0
, c_1
);
418 __m128i c0_2
= _mm_add_epi32(c0_1
, SCALAR_EPI32(dcdy
, 0));
419 __m128i c1_2
= _mm_add_epi32(c1_1
, SCALAR_EPI32(dcdy
, 1));
420 __m128i c2_2
= _mm_add_epi32(c2_1
, SCALAR_EPI32(dcdy
, 2));
422 __m128i c_2
= _mm_or_si128(_mm_or_si128(c0_2
, c1_2
), c2_2
);
424 __m128i c0_3
= _mm_add_epi32(c0_2
, SCALAR_EPI32(dcdy
, 0));
425 __m128i c1_3
= _mm_add_epi32(c1_2
, SCALAR_EPI32(dcdy
, 1));
426 __m128i c2_3
= _mm_add_epi32(c2_2
, SCALAR_EPI32(dcdy
, 2));
428 __m128i c_3
= _mm_or_si128(_mm_or_si128(c0_3
, c1_3
), c2_3
);
429 __m128i c_23
= _mm_packs_epi32(c_2
, c_3
);
430 __m128i c_0123
= _mm_packs_epi16(c_01
, c_23
);
432 unsigned mask
= _mm_movemask_epi8(c_0123
);
435 lp_rast_shade_quads_mask(task
,
451 #include "lp_rast_tri_tmp.h"
455 #include "lp_rast_tri_tmp.h"
459 /*#define TRI_4 lp_rast_triangle_3_4*/
460 /*#define TRI_16 lp_rast_triangle_3_16*/
461 #include "lp_rast_tri_tmp.h"
465 #define TRI_16 lp_rast_triangle_4_16
466 #include "lp_rast_tri_tmp.h"
470 #include "lp_rast_tri_tmp.h"
474 #include "lp_rast_tri_tmp.h"
478 #include "lp_rast_tri_tmp.h"
482 #include "lp_rast_tri_tmp.h"