1 /**************************************************************************
3 * Copyright 2007-2010 VMware, Inc.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
29 * Rasterization for binned triangles within a tile
35 * Prototype for a 8 plane rasterizer function. Will codegenerate
38 * XXX: Varients for more/fewer planes.
39 * XXX: Need ways of dropping planes as we descend.
43 TAG(do_block_4
)(struct lp_rasterizer_task
*task
,
44 const struct lp_rast_triangle
*tri
,
45 const struct lp_rast_plane
*plane
,
51 unsigned mask
= 0xffff;
53 uint64_t mask
= UINT64_MAX
;
56 for (j
= 0; j
< NR_PLANES
; j
++) {
59 mask
&= ~BUILD_MASK_LINEAR(((c
[j
] - 1) >> (int64_t)FIXED_ORDER
),
60 -plane
[j
].dcdx
>> FIXED_ORDER
,
61 plane
[j
].dcdy
>> FIXED_ORDER
);
63 mask
&= ~BUILD_MASK_LINEAR((c
[j
] - 1),
68 for (unsigned s
= 0; s
< 4; s
++) {
69 int64_t new_c
= (c
[j
]) + ((IMUL64(task
->scene
->fixed_sample_pos
[s
][1], plane
[j
].dcdy
) + IMUL64(task
->scene
->fixed_sample_pos
[s
][0], -plane
[j
].dcdx
)) >> FIXED_ORDER
);
72 build_mask
= BUILD_MASK_LINEAR((int32_t)((new_c
- 1) >> (int64_t)FIXED_ORDER
),
73 -plane
[j
].dcdx
>> FIXED_ORDER
,
74 plane
[j
].dcdy
>> FIXED_ORDER
);
76 build_mask
= BUILD_MASK_LINEAR((new_c
- 1),
80 mask
&= ~((uint64_t)build_mask
<< (s
* 16));
85 /* Now pass to the shader:
88 lp_rast_shade_quads_mask_sample(task
, &tri
->inputs
, x
, y
, mask
);
92 * Evaluate a 16x16 block of pixels to determine which 4x4 subblocks are in/out
93 * of the triangle's bounds.
96 TAG(do_block_16
)(struct lp_rasterizer_task
*task
,
97 const struct lp_rast_triangle
*tri
,
98 const struct lp_rast_plane
*plane
,
102 unsigned outmask
, inmask
, partmask
, partial_mask
;
105 outmask
= 0; /* outside one or more trivial reject planes */
106 partmask
= 0; /* outside one or more trivial accept planes */
108 for (j
= 0; j
< NR_PLANES
; j
++) {
110 int32_t dcdx
= -plane
[j
].dcdx
>> FIXED_ORDER
;
111 int32_t dcdy
= plane
[j
].dcdy
>> FIXED_ORDER
;
112 const int32_t cox
= plane
[j
].eo
>> FIXED_ORDER
;
113 const int32_t ei
= (dcdy
+ dcdx
- cox
) << 2;
114 const int32_t cox_s
= cox
<< 2;
115 const int32_t co
= (int32_t)(c
[j
] >> (int64_t)FIXED_ORDER
) + cox_s
;
117 cdiff
= ei
- cox_s
+ ((int32_t)((c
[j
] - 1) >> (int64_t)FIXED_ORDER
) -
118 (int32_t)(c
[j
] >> (int64_t)FIXED_ORDER
));
122 const int64_t dcdx
= -IMUL64(plane
[j
].dcdx
, 4);
123 const int64_t dcdy
= IMUL64(plane
[j
].dcdy
, 4);
124 const int64_t cox
= IMUL64(plane
[j
].eo
, 4);
125 const int32_t ei
= plane
[j
].dcdy
- plane
[j
].dcdx
- (int64_t)plane
[j
].eo
;
126 const int64_t cio
= IMUL64(ei
, 4) - 1;
132 BUILD_MASKS(co
, cdiff
,
134 &outmask
, /* sign bits from c[i][0..15] + cox */
135 &partmask
); /* sign bits from c[i][0..15] + cio */
138 if (outmask
== 0xffff)
141 /* Mask of sub-blocks which are inside all trivial accept planes:
143 inmask
= ~partmask
& 0xffff;
145 /* Mask of sub-blocks which are inside all trivial reject planes,
146 * but outside at least one trivial accept plane:
148 partial_mask
= partmask
& ~outmask
;
150 assert((partial_mask
& inmask
) == 0);
152 LP_COUNT_ADD(nr_empty_4
, util_bitcount(0xffff & ~(partial_mask
| inmask
)));
154 /* Iterate over partials:
156 while (partial_mask
) {
157 int i
= ffs(partial_mask
) - 1;
158 int ix
= (i
& 3) * 4;
159 int iy
= (i
>> 2) * 4;
162 int64_t cx
[NR_PLANES
];
164 partial_mask
&= ~(1 << i
);
166 LP_COUNT(nr_partially_covered_4
);
168 for (j
= 0; j
< NR_PLANES
; j
++)
170 - IMUL64(plane
[j
].dcdx
, ix
)
171 + IMUL64(plane
[j
].dcdy
, iy
));
173 TAG(do_block_4
)(task
, tri
, plane
, px
, py
, cx
);
176 /* Iterate over fulls:
179 int i
= ffs(inmask
) - 1;
180 int ix
= (i
& 3) * 4;
181 int iy
= (i
>> 2) * 4;
187 LP_COUNT(nr_fully_covered_4
);
188 block_full_4(task
, tri
, px
, py
);
194 * Scan the tile in chunks and figure out which pixels to rasterize
198 TAG(lp_rast_triangle
)(struct lp_rasterizer_task
*task
,
199 const union lp_rast_cmd_arg arg
)
201 const struct lp_rast_triangle
*tri
= arg
.triangle
.tri
;
202 unsigned plane_mask
= arg
.triangle
.plane_mask
;
203 const struct lp_rast_plane
*tri_plane
= GET_PLANES(tri
);
204 const int x
= task
->x
, y
= task
->y
;
205 struct lp_rast_plane plane
[NR_PLANES
];
206 int64_t c
[NR_PLANES
];
207 unsigned outmask
, inmask
, partmask
, partial_mask
;
210 if (tri
->inputs
.disable
) {
211 /* This triangle was partially binned and has been disabled */
215 outmask
= 0; /* outside one or more trivial reject planes */
216 partmask
= 0; /* outside one or more trivial accept planes */
219 int i
= ffs(plane_mask
) - 1;
220 plane
[j
] = tri_plane
[i
];
221 plane_mask
&= ~(1 << i
);
222 c
[j
] = plane
[j
].c
+ IMUL64(plane
[j
].dcdy
, y
) - IMUL64(plane
[j
].dcdx
, x
);
227 * Strip off lower FIXED_ORDER bits. Note that those bits from
228 * dcdx, dcdy, eo are always 0 (by definition).
229 * c values, however, are not. This means that for every
230 * addition of the form c + n*dcdx the lower FIXED_ORDER bits will
231 * NOT change. And those bits are not relevant to the sign bit (which
232 * is only what we need!) that is,
233 * sign(c + n*dcdx) == sign((c >> FIXED_ORDER) + n*(dcdx >> FIXED_ORDER))
234 * This means we can get away with using 32bit math for the most part.
235 * Only tricky part is the -1 adjustment for cdiff.
237 int32_t dcdx
= -plane
[j
].dcdx
>> FIXED_ORDER
;
238 int32_t dcdy
= plane
[j
].dcdy
>> FIXED_ORDER
;
239 const int32_t cox
= plane
[j
].eo
>> FIXED_ORDER
;
240 const int32_t ei
= (dcdy
+ dcdx
- cox
) << 4;
241 const int32_t cox_s
= cox
<< 4;
242 const int32_t co
= (int32_t)(c
[j
] >> (int64_t)FIXED_ORDER
) + cox_s
;
245 * Plausibility check to ensure the 32bit math works.
246 * Note that within a tile, the max we can move the edge function
247 * is essentially dcdx * TILE_SIZE + dcdy * TILE_SIZE.
248 * TILE_SIZE is 64, dcdx/dcdy are nominally 21 bit (for 8192 max size
249 * and 8 subpixel bits), I'd be happy with 2 bits more too (1 for
250 * increasing fb size to 16384, the required d3d11 value, another one
251 * because I'm not quite sure we can't be _just_ above the max value
252 * here). This gives us 30 bits max - hence if c would exceed that here
253 * that means the plane is either trivial reject for the whole tile
254 * (in which case the tri will not get binned), or trivial accept for
255 * the whole tile (in which case plane_mask will not include it).
257 assert((c
[j
] >> (int64_t)FIXED_ORDER
) > (int32_t)0xb0000000 &&
258 (c
[j
] >> (int64_t)FIXED_ORDER
) < (int32_t)0x3fffffff);
260 * Note the fixup part is constant throughout the tile - thus could
261 * just calculate this and avoid _all_ 64bit math in rasterization
262 * (except exactly this fixup calc).
263 * In fact theoretically could move that even to setup, albeit that
264 * seems tricky (pre-bin certainly can have values larger than 32bit,
265 * and would need to communicate that fixup value through).
266 * And if we want to support msaa, we'd probably don't want to do the
267 * downscaling in setup in any case...
269 cdiff
= ei
- cox_s
+ ((int32_t)((c
[j
] - 1) >> (int64_t)FIXED_ORDER
) -
270 (int32_t)(c
[j
] >> (int64_t)FIXED_ORDER
));
274 const int32_t dcdx
= -plane
[j
].dcdx
<< 4;
275 const int32_t dcdy
= plane
[j
].dcdy
<< 4;
276 const int32_t cox
= plane
[j
].eo
<< 4;
277 const int32_t ei
= plane
[j
].dcdy
- plane
[j
].dcdx
- (int32_t)plane
[j
].eo
;
278 const int32_t cio
= (ei
<< 4) - 1;
283 BUILD_MASKS(co
, cdiff
,
285 &outmask
, /* sign bits from c[i][0..15] + cox */
286 &partmask
); /* sign bits from c[i][0..15] + cio */
292 if (outmask
== 0xffff)
295 /* Mask of sub-blocks which are inside all trivial accept planes:
297 inmask
= ~partmask
& 0xffff;
299 /* Mask of sub-blocks which are inside all trivial reject planes,
300 * but outside at least one trivial accept plane:
302 partial_mask
= partmask
& ~outmask
;
304 assert((partial_mask
& inmask
) == 0);
306 LP_COUNT_ADD(nr_empty_16
, util_bitcount(0xffff & ~(partial_mask
| inmask
)));
308 /* Iterate over partials:
310 while (partial_mask
) {
311 int i
= ffs(partial_mask
) - 1;
312 int ix
= (i
& 3) * 16;
313 int iy
= (i
>> 2) * 16;
316 int64_t cx
[NR_PLANES
];
318 for (j
= 0; j
< NR_PLANES
; j
++)
320 - IMUL64(plane
[j
].dcdx
, ix
)
321 + IMUL64(plane
[j
].dcdy
, iy
));
323 partial_mask
&= ~(1 << i
);
325 LP_COUNT(nr_partially_covered_16
);
326 TAG(do_block_16
)(task
, tri
, plane
, px
, py
, cx
);
329 /* Iterate over fulls:
332 int i
= ffs(inmask
) - 1;
333 int ix
= (i
& 3) * 16;
334 int iy
= (i
>> 2) * 16;
340 LP_COUNT(nr_fully_covered_16
);
341 block_full_16(task
, tri
, px
, py
);
345 #if defined(PIPE_ARCH_SSE) && defined(TRI_16)
346 /* XXX: special case this when intersection is not required.
347 * - tile completely within bbox,
348 * - bbox completely within tile.
351 TRI_16(struct lp_rasterizer_task
*task
,
352 const union lp_rast_cmd_arg arg
)
354 const struct lp_rast_triangle
*tri
= arg
.triangle
.tri
;
355 const struct lp_rast_plane
*plane
= GET_PLANES(tri
);
356 unsigned mask
= arg
.triangle
.plane_mask
;
357 unsigned outmask
, partial_mask
;
359 __m128i cstep4
[NR_PLANES
][4];
361 int x
= (mask
& 0xff);
364 outmask
= 0; /* outside one or more trivial reject planes */
369 for (j
= 0; j
< NR_PLANES
; j
++) {
370 const int dcdx
= -plane
[j
].dcdx
* 4;
371 const int dcdy
= plane
[j
].dcdy
* 4;
372 __m128i xdcdy
= _mm_set1_epi32(dcdy
);
374 cstep4
[j
][0] = _mm_setr_epi32(0, dcdx
, dcdx
*2, dcdx
*3);
375 cstep4
[j
][1] = _mm_add_epi32(cstep4
[j
][0], xdcdy
);
376 cstep4
[j
][2] = _mm_add_epi32(cstep4
[j
][1], xdcdy
);
377 cstep4
[j
][3] = _mm_add_epi32(cstep4
[j
][2], xdcdy
);
380 const int c
= plane
[j
].c
+ plane
[j
].dcdy
* y
- plane
[j
].dcdx
* x
;
381 const int cox
= plane
[j
].eo
* 4;
383 outmask
|= sign_bits4(cstep4
[j
], c
+ cox
);
387 if (outmask
== 0xffff)
391 /* Mask of sub-blocks which are inside all trivial reject planes,
392 * but outside at least one trivial accept plane:
394 partial_mask
= 0xffff & ~outmask
;
396 /* Iterate over partials:
398 while (partial_mask
) {
399 int i
= ffs(partial_mask
) - 1;
400 int ix
= (i
& 3) * 4;
401 int iy
= (i
>> 2) * 4;
404 unsigned mask
= 0xffff;
406 partial_mask
&= ~(1 << i
);
408 for (j
= 0; j
< NR_PLANES
; j
++) {
409 const int cx
= (plane
[j
].c
- 1
411 + plane
[j
].dcdy
* py
) * 4;
413 mask
&= ~sign_bits4(cstep4
[j
], cx
);
417 lp_rast_shade_quads_mask(task
, &tri
->inputs
, px
, py
, mask
);
422 #if defined(PIPE_ARCH_SSE) && defined(TRI_4)
424 TRI_4(struct lp_rasterizer_task
*task
,
425 const union lp_rast_cmd_arg arg
)
427 const struct lp_rast_triangle
*tri
= arg
.triangle
.tri
;
428 const struct lp_rast_plane
*plane
= GET_PLANES(tri
);
429 unsigned mask
= arg
.triangle
.plane_mask
;
430 const int x
= task
->x
+ (mask
& 0xff);
431 const int y
= task
->y
+ (mask
>> 8);
434 /* Iterate over partials:
437 unsigned mask
= 0xffff;
439 for (j
= 0; j
< NR_PLANES
; j
++) {
440 const int cx
= (plane
[j
].c
442 + plane
[j
].dcdy
* y
);
444 const int dcdx
= -plane
[j
].dcdx
;
445 const int dcdy
= plane
[j
].dcdy
;
446 __m128i xdcdy
= _mm_set1_epi32(dcdy
);
448 __m128i cstep0
= _mm_setr_epi32(cx
, cx
+ dcdx
, cx
+ dcdx
*2, cx
+ dcdx
*3);
449 __m128i cstep1
= _mm_add_epi32(cstep0
, xdcdy
);
450 __m128i cstep2
= _mm_add_epi32(cstep1
, xdcdy
);
451 __m128i cstep3
= _mm_add_epi32(cstep2
, xdcdy
);
453 __m128i cstep01
= _mm_packs_epi32(cstep0
, cstep1
);
454 __m128i cstep23
= _mm_packs_epi32(cstep2
, cstep3
);
455 __m128i result
= _mm_packs_epi16(cstep01
, cstep23
);
457 /* Extract the sign bits
459 mask
&= ~_mm_movemask_epi8(result
);
463 lp_rast_shade_quads_mask(task
, &tri
->inputs
, x
, y
, mask
);