llvmpipe: intrinsics versions of build_mask functions
[mesa.git] / src / gallium / drivers / llvmpipe / lp_rast_tri.c
1 /**************************************************************************
2 *
3 * Copyright 2007-2009 VMware, Inc.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28 /*
29 * Rasterization for binned triangles within a tile
30 */
31
32 #include <limits.h>
33 #include "util/u_math.h"
34 #include "lp_debug.h"
35 #include "lp_perf.h"
36 #include "lp_rast_priv.h"
37 #include "lp_tile_soa.h"
38
39
40
41
42 /**
43 * Shade all pixels in a 4x4 block.
44 */
45 static void
46 block_full_4(struct lp_rasterizer_task *task,
47 const struct lp_rast_triangle *tri,
48 int x, int y)
49 {
50 lp_rast_shade_quads_all(task, &tri->inputs, x, y);
51 }
52
53
54 /**
55 * Shade all pixels in a 16x16 block.
56 */
57 static void
58 block_full_16(struct lp_rasterizer_task *task,
59 const struct lp_rast_triangle *tri,
60 int x, int y)
61 {
62 unsigned ix, iy;
63 assert(x % 16 == 0);
64 assert(y % 16 == 0);
65 for (iy = 0; iy < 16; iy += 4)
66 for (ix = 0; ix < 16; ix += 4)
67 block_full_4(task, tri, x + ix, y + iy);
68 }
69
70 #if !defined(PIPE_ARCH_SSE)
71 static INLINE unsigned
72 build_mask(int c, int dcdx, int dcdy)
73 {
74 int mask = 0;
75
76 int c0 = c;
77 int c1 = c0 + dcdx;
78 int c2 = c1 + dcdx;
79 int c3 = c2 + dcdx;
80
81 mask |= ((c0 + 0 * dcdy) >> 31) & (1 << 0);
82 mask |= ((c0 + 1 * dcdy) >> 31) & (1 << 2);
83 mask |= ((c0 + 2 * dcdy) >> 31) & (1 << 8);
84 mask |= ((c0 + 3 * dcdy) >> 31) & (1 << 10);
85 mask |= ((c1 + 0 * dcdy) >> 31) & (1 << 1);
86 mask |= ((c1 + 1 * dcdy) >> 31) & (1 << 3);
87 mask |= ((c1 + 2 * dcdy) >> 31) & (1 << 9);
88 mask |= ((c1 + 3 * dcdy) >> 31) & (1 << 11);
89 mask |= ((c2 + 0 * dcdy) >> 31) & (1 << 4);
90 mask |= ((c2 + 1 * dcdy) >> 31) & (1 << 6);
91 mask |= ((c2 + 2 * dcdy) >> 31) & (1 << 12);
92 mask |= ((c2 + 3 * dcdy) >> 31) & (1 << 14);
93 mask |= ((c3 + 0 * dcdy) >> 31) & (1 << 5);
94 mask |= ((c3 + 1 * dcdy) >> 31) & (1 << 7);
95 mask |= ((c3 + 2 * dcdy) >> 31) & (1 << 13);
96 mask |= ((c3 + 3 * dcdy) >> 31) & (1 << 15);
97
98 return mask;
99 }
100
101
102 static INLINE unsigned
103 build_mask_linear(int c, int dcdx, int dcdy)
104 {
105 int mask = 0;
106
107 int c0 = c;
108 int c1 = c0 + dcdy;
109 int c2 = c1 + dcdy;
110 int c3 = c2 + dcdy;
111
112 mask |= ((c0 + 0 * dcdx) >> 31) & (1 << 0);
113 mask |= ((c0 + 1 * dcdx) >> 31) & (1 << 1);
114 mask |= ((c0 + 2 * dcdx) >> 31) & (1 << 2);
115 mask |= ((c0 + 3 * dcdx) >> 31) & (1 << 3);
116 mask |= ((c1 + 0 * dcdx) >> 31) & (1 << 4);
117 mask |= ((c1 + 1 * dcdx) >> 31) & (1 << 5);
118 mask |= ((c1 + 2 * dcdx) >> 31) & (1 << 6);
119 mask |= ((c1 + 3 * dcdx) >> 31) & (1 << 7);
120 mask |= ((c2 + 0 * dcdx) >> 31) & (1 << 8);
121 mask |= ((c2 + 1 * dcdx) >> 31) & (1 << 9);
122 mask |= ((c2 + 2 * dcdx) >> 31) & (1 << 10);
123 mask |= ((c2 + 3 * dcdx) >> 31) & (1 << 11);
124 mask |= ((c3 + 0 * dcdx) >> 31) & (1 << 12);
125 mask |= ((c3 + 1 * dcdx) >> 31) & (1 << 13);
126 mask |= ((c3 + 2 * dcdx) >> 31) & (1 << 14);
127 mask |= ((c3 + 3 * dcdx) >> 31) & (1 << 15);
128
129 return mask;
130 }
131 #else
132 #include <emmintrin.h>
133 #include "util/u_sse.h"
134
135
136 static INLINE unsigned
137 build_mask_linear(int c, int dcdx, int dcdy)
138 {
139 __m128i cstep0 = _mm_setr_epi32(c, c+dcdx, c+dcdx*2, c+dcdx*3);
140 __m128i xdcdy = _mm_set1_epi32(dcdy);
141
142 /* Get values across the quad
143 */
144 __m128i cstep1 = _mm_add_epi32(cstep0, xdcdy);
145 __m128i cstep2 = _mm_add_epi32(cstep1, xdcdy);
146 __m128i cstep3 = _mm_add_epi32(cstep2, xdcdy);
147
148 /* pack pairs of results into epi16
149 */
150 __m128i cstep01 = _mm_packs_epi32(cstep0, cstep1);
151 __m128i cstep23 = _mm_packs_epi32(cstep2, cstep3);
152
153 /* pack into epi8, preserving sign bits
154 */
155 __m128i result = _mm_packs_epi16(cstep01, cstep23);
156
157 /* extract sign bits to create mask
158 */
159 return _mm_movemask_epi8(result);
160 }
161
162 static INLINE unsigned
163 build_mask(int c, int dcdx, int dcdy)
164 {
165 __m128i step = _mm_setr_epi32(0, dcdx, dcdy, dcdx + dcdy);
166 __m128i c0 = _mm_set1_epi32(c);
167
168 /* Get values across the quad
169 */
170 __m128i cstep0 = _mm_add_epi32(c0, step);
171
172 /* Scale up step for moving between quads. This should probably
173 * be an arithmetic shift left, but there doesn't seem to be
174 * such a thing in SSE. It's unlikely that the step value is
175 * going to be large enough to overflow across 4 pixels, though
176 * if it is that big, rendering will be incorrect anyway.
177 */
178 __m128i step4 = _mm_slli_epi32(step, 1);
179
180 /* Get values for the remaining quads:
181 */
182 __m128i cstep1 = _mm_add_epi32(cstep0,
183 _mm_shuffle_epi32(step4, _MM_SHUFFLE(1,1,1,1)));
184 __m128i cstep2 = _mm_add_epi32(cstep0,
185 _mm_shuffle_epi32(step4, _MM_SHUFFLE(2,2,2,2)));
186 __m128i cstep3 = _mm_add_epi32(cstep2,
187 _mm_shuffle_epi32(step4, _MM_SHUFFLE(1,1,1,1)));
188
189 /* pack pairs of results into epi16
190 */
191 __m128i cstep01 = _mm_packs_epi32(cstep0, cstep1);
192 __m128i cstep23 = _mm_packs_epi32(cstep2, cstep3);
193
194 /* pack into epi8, preserving sign bits
195 */
196 __m128i result = _mm_packs_epi16(cstep01, cstep23);
197
198 /* extract sign bits to create mask
199 */
200 return _mm_movemask_epi8(result);
201 }
202
203 #endif
204
205
206
207
208 #define TAG(x) x##_1
209 #define NR_PLANES 1
210 #include "lp_rast_tri_tmp.h"
211
212 #define TAG(x) x##_2
213 #define NR_PLANES 2
214 #include "lp_rast_tri_tmp.h"
215
216 #define TAG(x) x##_3
217 #define NR_PLANES 3
218 #include "lp_rast_tri_tmp.h"
219
220 #define TAG(x) x##_4
221 #define NR_PLANES 4
222 #include "lp_rast_tri_tmp.h"
223
224 #define TAG(x) x##_5
225 #define NR_PLANES 5
226 #include "lp_rast_tri_tmp.h"
227
228 #define TAG(x) x##_6
229 #define NR_PLANES 6
230 #include "lp_rast_tri_tmp.h"
231
232 #define TAG(x) x##_7
233 #define NR_PLANES 7
234 #include "lp_rast_tri_tmp.h"
235
236 #define TAG(x) x##_8
237 #define NR_PLANES 8
238 #include "lp_rast_tri_tmp.h"
239
240
241 /* Special case for 3 plane triangle which is contained entirely
242 * within a 16x16 block.
243 */
244 void
245 lp_rast_triangle_3_16(struct lp_rasterizer_task *task,
246 const union lp_rast_cmd_arg arg)
247 {
248 const struct lp_rast_triangle *tri = arg.triangle.tri;
249 const struct lp_rast_plane *plane = tri->plane;
250 unsigned mask = arg.triangle.plane_mask;
251 const int x = task->x + (mask & 0xf) * 16;
252 const int y = task->y + (mask >> 4) * 16;
253 unsigned outmask, inmask, partmask, partial_mask;
254 unsigned j;
255 int c[3];
256
257 outmask = 0; /* outside one or more trivial reject planes */
258 partmask = 0; /* outside one or more trivial accept planes */
259
260 for (j = 0; j < 3; j++) {
261 c[j] = plane[j].c + plane[j].dcdy * y - plane[j].dcdx * x;
262
263 {
264 const int dcdx = -plane[j].dcdx * 4;
265 const int dcdy = plane[j].dcdy * 4;
266 const int cox = c[j] + plane[j].eo * 4;
267 const int cio = c[j] + plane[j].ei * 4 - 1;
268
269 outmask |= build_mask_linear(cox, dcdx, dcdy);
270 partmask |= build_mask_linear(cio, dcdx, dcdy);
271 }
272 }
273
274 if (outmask == 0xffff)
275 return;
276
277 /* Mask of sub-blocks which are inside all trivial accept planes:
278 */
279 inmask = ~partmask & 0xffff;
280
281 /* Mask of sub-blocks which are inside all trivial reject planes,
282 * but outside at least one trivial accept plane:
283 */
284 partial_mask = partmask & ~outmask;
285
286 assert((partial_mask & inmask) == 0);
287
288 /* Iterate over partials:
289 */
290 while (partial_mask) {
291 int i = ffs(partial_mask) - 1;
292 int ix = (i & 3) * 4;
293 int iy = (i >> 2) * 4;
294 int px = x + ix;
295 int py = y + iy;
296 int cx[3];
297
298 partial_mask &= ~(1 << i);
299
300 for (j = 0; j < 3; j++)
301 cx[j] = (c[j]
302 - plane[j].dcdx * ix
303 + plane[j].dcdy * iy);
304
305 do_block_4_3(task, tri, plane, px, py, cx);
306 }
307
308 /* Iterate over fulls:
309 */
310 while (inmask) {
311 int i = ffs(inmask) - 1;
312 int ix = (i & 3) * 4;
313 int iy = (i >> 2) * 4;
314 int px = x + ix;
315 int py = y + iy;
316
317 inmask &= ~(1 << i);
318
319 block_full_4(task, tri, px, py);
320 }
321 }