1 /****************************************************************************
2 * Copyright (C) 2014-2015 Intel Corporation. All Rights Reserved.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 * @brief Definitions for clipping
27 ******************************************************************************/
30 #include "common/simdintrin.h"
31 #include "core/context.h"
33 #include "rdtsc_core.h"
35 // Temp storage used by the clipper
36 extern THREAD simdvertex tlsTempVertices
[7];
40 // Shift clip codes out of the mantissa to prevent denormalized values when used in float compare.
41 // Guardband is able to use a single high-bit with 4 separate LSBs, because it computes a union, rather than intersection, of clipcodes.
42 #define CLIPCODE_SHIFT 23
43 FRUSTUM_LEFT
= (0x01 << CLIPCODE_SHIFT
),
44 FRUSTUM_TOP
= (0x02 << CLIPCODE_SHIFT
),
45 FRUSTUM_RIGHT
= (0x04 << CLIPCODE_SHIFT
),
46 FRUSTUM_BOTTOM
= (0x08 << CLIPCODE_SHIFT
),
48 FRUSTUM_NEAR
= (0x10 << CLIPCODE_SHIFT
),
49 FRUSTUM_FAR
= (0x20 << CLIPCODE_SHIFT
),
51 NEGW
= (0x40 << CLIPCODE_SHIFT
),
53 GUARDBAND_LEFT
= (0x80 << CLIPCODE_SHIFT
| 0x1),
54 GUARDBAND_TOP
= (0x80 << CLIPCODE_SHIFT
| 0x2),
55 GUARDBAND_RIGHT
= (0x80 << CLIPCODE_SHIFT
| 0x4),
56 GUARDBAND_BOTTOM
= (0x80 << CLIPCODE_SHIFT
| 0x8)
59 #define FRUSTUM_CLIP_MASK (FRUSTUM_LEFT|FRUSTUM_TOP|FRUSTUM_RIGHT|FRUSTUM_BOTTOM|FRUSTUM_NEAR|FRUSTUM_FAR)
60 #define GUARDBAND_CLIP_MASK (FRUSTUM_NEAR|FRUSTUM_FAR|GUARDBAND_LEFT|GUARDBAND_TOP|GUARDBAND_RIGHT|GUARDBAND_BOTTOM|NEGW)
62 void Clip(const float *pTriangle
, const float *pAttribs
, int numAttribs
, float *pOutTriangles
,
63 int *numVerts
, float *pOutAttribs
);
66 void ComputeClipCodes(const API_STATE
& state
, const simdvector
& vertex
, simdscalar
& clipCodes
, simdscalari viewportIndexes
)
68 clipCodes
= _simd_setzero_ps();
71 simdscalar vNegW
= _simd_mul_ps(vertex
.w
, _simd_set1_ps(-1.0f
));
74 simdscalar vRes
= _simd_cmplt_ps(vertex
.x
, vNegW
);
75 clipCodes
= _simd_and_ps(vRes
, _simd_castsi_ps(_simd_set1_epi32(FRUSTUM_LEFT
)));
78 vRes
= _simd_cmplt_ps(vertex
.y
, vNegW
);
79 clipCodes
= _simd_or_ps(clipCodes
, _simd_and_ps(vRes
, _simd_castsi_ps(_simd_set1_epi32(FRUSTUM_TOP
))));
82 vRes
= _simd_cmpgt_ps(vertex
.x
, vertex
.w
);
83 clipCodes
= _simd_or_ps(clipCodes
, _simd_and_ps(vRes
, _simd_castsi_ps(_simd_set1_epi32(FRUSTUM_RIGHT
))));
86 vRes
= _simd_cmpgt_ps(vertex
.y
, vertex
.w
);
87 clipCodes
= _simd_or_ps(clipCodes
, _simd_and_ps(vRes
, _simd_castsi_ps(_simd_set1_epi32(FRUSTUM_BOTTOM
))));
89 if (state
.rastState
.depthClipEnable
)
92 // DX clips depth [0..w], GL clips [-w..w]
93 if (state
.rastState
.clipHalfZ
)
95 vRes
= _simd_cmplt_ps(vertex
.z
, _simd_setzero_ps());
99 vRes
= _simd_cmplt_ps(vertex
.z
, vNegW
);
101 clipCodes
= _simd_or_ps(clipCodes
, _simd_and_ps(vRes
, _simd_castsi_ps(_simd_set1_epi32(FRUSTUM_NEAR
))));
104 vRes
= _simd_cmpgt_ps(vertex
.z
, vertex
.w
);
105 clipCodes
= _simd_or_ps(clipCodes
, _simd_and_ps(vRes
, _simd_castsi_ps(_simd_set1_epi32(FRUSTUM_FAR
))));
109 vRes
= _simd_cmple_ps(vertex
.w
, _simd_setzero_ps());
110 clipCodes
= _simd_or_ps(clipCodes
, _simd_and_ps(vRes
, _simd_castsi_ps(_simd_set1_epi32(NEGW
))));
113 simdscalar gbMult
= _simd_mul_ps(vNegW
, _simd_i32gather_ps(&state
.gbState
.left
[0], viewportIndexes
, 4));
114 vRes
= _simd_cmplt_ps(vertex
.x
, gbMult
);
115 clipCodes
= _simd_or_ps(clipCodes
, _simd_and_ps(vRes
, _simd_castsi_ps(_simd_set1_epi32(GUARDBAND_LEFT
))));
118 gbMult
= _simd_mul_ps(vNegW
, _simd_i32gather_ps(&state
.gbState
.top
[0], viewportIndexes
, 4));
119 vRes
= _simd_cmplt_ps(vertex
.y
, gbMult
);
120 clipCodes
= _simd_or_ps(clipCodes
, _simd_and_ps(vRes
, _simd_castsi_ps(_simd_set1_epi32(GUARDBAND_TOP
))));
123 gbMult
= _simd_mul_ps(vertex
.w
, _simd_i32gather_ps(&state
.gbState
.right
[0], viewportIndexes
, 4));
124 vRes
= _simd_cmpgt_ps(vertex
.x
, gbMult
);
125 clipCodes
= _simd_or_ps(clipCodes
, _simd_and_ps(vRes
, _simd_castsi_ps(_simd_set1_epi32(GUARDBAND_RIGHT
))));
128 gbMult
= _simd_mul_ps(vertex
.w
, _simd_i32gather_ps(&state
.gbState
.bottom
[0], viewportIndexes
, 4));
129 vRes
= _simd_cmpgt_ps(vertex
.y
, gbMult
);
130 clipCodes
= _simd_or_ps(clipCodes
, _simd_and_ps(vRes
, _simd_castsi_ps(_simd_set1_epi32(GUARDBAND_BOTTOM
))));
133 template<uint32_t NumVertsPerPrim
>
137 Clipper(uint32_t in_workerId
, DRAW_CONTEXT
* in_pDC
) :
138 workerId(in_workerId
), pDC(in_pDC
), state(GetApiState(in_pDC
))
140 static_assert(NumVertsPerPrim
>= 1 && NumVertsPerPrim
<= 3, "Invalid NumVertsPerPrim");
143 void ComputeClipCodes(simdvector vertex
[], simdscalari viewportIndexes
)
145 for (uint32_t i
= 0; i
< NumVertsPerPrim
; ++i
)
147 ::ComputeClipCodes(this->state
, vertex
[i
], this->clipCodes
[i
], viewportIndexes
);
151 simdscalar
ComputeClipCodeIntersection()
153 simdscalar result
= this->clipCodes
[0];
154 for (uint32_t i
= 1; i
< NumVertsPerPrim
; ++i
)
156 result
= _simd_and_ps(result
, this->clipCodes
[i
]);
161 simdscalar
ComputeClipCodeUnion()
163 simdscalar result
= this->clipCodes
[0];
164 for (uint32_t i
= 1; i
< NumVertsPerPrim
; ++i
)
166 result
= _simd_or_ps(result
, this->clipCodes
[i
]);
171 int ComputeNegWMask()
173 simdscalar clipCodeUnion
= ComputeClipCodeUnion();
174 clipCodeUnion
= _simd_and_ps(clipCodeUnion
, _simd_castsi_ps(_simd_set1_epi32(NEGW
)));
175 return _simd_movemask_ps(_simd_cmpneq_ps(clipCodeUnion
, _simd_setzero_ps()));
178 int ComputeClipMask()
180 simdscalar clipUnion
= ComputeClipCodeUnion();
181 clipUnion
= _simd_and_ps(clipUnion
, _simd_castsi_ps(_simd_set1_epi32(GUARDBAND_CLIP_MASK
)));
182 return _simd_movemask_ps(_simd_cmpneq_ps(clipUnion
, _simd_setzero_ps()));
185 // clipper is responsible for culling any prims with NAN coordinates
186 int ComputeNaNMask(simdvector prim
[])
188 simdscalar vNanMask
= _simd_setzero_ps();
189 for (uint32_t e
= 0; e
< NumVertsPerPrim
; ++e
)
191 simdscalar vNan01
= _simd_cmp_ps(prim
[e
].v
[0], prim
[e
].v
[1], _CMP_UNORD_Q
);
192 vNanMask
= _simd_or_ps(vNanMask
, vNan01
);
193 simdscalar vNan23
= _simd_cmp_ps(prim
[e
].v
[2], prim
[e
].v
[3], _CMP_UNORD_Q
);
194 vNanMask
= _simd_or_ps(vNanMask
, vNan23
);
197 return _simd_movemask_ps(vNanMask
);
200 int ComputeUserClipCullMask(PA_STATE
& pa
, simdvector prim
[])
202 uint8_t cullMask
= this->state
.rastState
.cullDistanceMask
;
203 simdscalar vClipCullMask
= _simd_setzero_ps();
206 simdvector vClipCullDistLo
[3];
207 simdvector vClipCullDistHi
[3];
209 pa
.Assemble(VERTEX_CLIPCULL_DIST_LO_SLOT
, vClipCullDistLo
);
210 pa
.Assemble(VERTEX_CLIPCULL_DIST_HI_SLOT
, vClipCullDistHi
);
211 while (_BitScanForward(&index
, cullMask
))
213 cullMask
&= ~(1 << index
);
214 uint32_t slot
= index
>> 2;
215 uint32_t component
= index
& 0x3;
217 simdscalar vCullMaskElem
= _simd_set1_ps(-1.0f
);
218 for (uint32_t e
= 0; e
< NumVertsPerPrim
; ++e
)
220 simdscalar vCullComp
;
223 vCullComp
= vClipCullDistLo
[e
][component
];
227 vCullComp
= vClipCullDistHi
[e
][component
];
230 // cull if cull distance < 0 || NAN
231 simdscalar vCull
= _simd_cmp_ps(_mm256_setzero_ps(), vCullComp
, _CMP_NLE_UQ
);
232 vCullMaskElem
= _simd_and_ps(vCullMaskElem
, vCull
);
234 vClipCullMask
= _simd_or_ps(vClipCullMask
, vCullMaskElem
);
237 // clipper should also discard any primitive with NAN clip distance
238 uint8_t clipMask
= this->state
.rastState
.clipDistanceMask
;
239 while (_BitScanForward(&index
, clipMask
))
241 clipMask
&= ~(1 << index
);
242 uint32_t slot
= index
>> 2;
243 uint32_t component
= index
& 0x3;
245 for (uint32_t e
= 0; e
< NumVertsPerPrim
; ++e
)
247 simdscalar vClipComp
;
250 vClipComp
= vClipCullDistLo
[e
][component
];
254 vClipComp
= vClipCullDistHi
[e
][component
];
257 simdscalar vClip
= _simd_cmp_ps(vClipComp
, vClipComp
, _CMP_UNORD_Q
);
258 vClipCullMask
= _simd_or_ps(vClipCullMask
, vClip
);
262 return _simd_movemask_ps(vClipCullMask
);
265 // clip SIMD primitives
266 void ClipSimd(const simdscalar
& vPrimMask
, const simdscalar
& vClipMask
, PA_STATE
& pa
, const simdscalari
& vPrimId
, const simdscalari
& vViewportIdx
)
268 // input/output vertex store for clipper
269 simdvertex vertices
[7]; // maximum 7 verts generated per triangle
271 LONG constantInterpMask
= this->state
.backendState
.constantInterpolationMask
;
272 uint32_t provokingVertex
= 0;
273 if(pa
.binTopology
== TOP_TRIANGLE_FAN
)
275 provokingVertex
= this->state
.frontendState
.provokingVertex
.triFan
;
277 ///@todo: line topology for wireframe?
280 simdvector tmpVector
[NumVertsPerPrim
];
281 pa
.Assemble(VERTEX_POSITION_SLOT
, tmpVector
);
282 for (uint32_t i
= 0; i
< NumVertsPerPrim
; ++i
)
284 vertices
[i
].attrib
[VERTEX_POSITION_SLOT
] = tmpVector
[i
];
288 const SWR_BACKEND_STATE
& backendState
= this->state
.backendState
;
290 int32_t maxSlot
= -1;
291 for (uint32_t slot
= 0; slot
< backendState
.numAttributes
; ++slot
)
293 // Compute absolute attrib slot in vertex array
294 uint32_t mapSlot
= backendState
.swizzleEnable
? backendState
.swizzleMap
[slot
].sourceAttrib
: slot
;
295 maxSlot
= std::max
<int32_t>(maxSlot
, mapSlot
);
296 uint32_t inputSlot
= VERTEX_ATTRIB_START_SLOT
+ mapSlot
;
298 pa
.Assemble(inputSlot
, tmpVector
);
300 // if constant interpolation enabled for this attribute, assign the provoking
301 // vertex values to all edges
302 if (_bittest(&constantInterpMask
, slot
))
304 for (uint32_t i
= 0; i
< NumVertsPerPrim
; ++i
)
306 vertices
[i
].attrib
[inputSlot
] = tmpVector
[provokingVertex
];
311 for (uint32_t i
= 0; i
< NumVertsPerPrim
; ++i
)
313 vertices
[i
].attrib
[inputSlot
] = tmpVector
[i
];
318 // assemble user clip distances if enabled
319 if (this->state
.rastState
.clipDistanceMask
& 0xf)
321 pa
.Assemble(VERTEX_CLIPCULL_DIST_LO_SLOT
, tmpVector
);
322 for (uint32_t i
= 0; i
< NumVertsPerPrim
; ++i
)
324 vertices
[i
].attrib
[VERTEX_CLIPCULL_DIST_LO_SLOT
] = tmpVector
[i
];
328 if (this->state
.rastState
.clipDistanceMask
& 0xf0)
330 pa
.Assemble(VERTEX_CLIPCULL_DIST_HI_SLOT
, tmpVector
);
331 for (uint32_t i
= 0; i
< NumVertsPerPrim
; ++i
)
333 vertices
[i
].attrib
[VERTEX_CLIPCULL_DIST_HI_SLOT
] = tmpVector
[i
];
337 uint32_t numAttribs
= maxSlot
+ 1;
339 simdscalari vNumClippedVerts
= ClipPrims((float*)&vertices
[0], vPrimMask
, vClipMask
, numAttribs
);
341 // set up new PA for binning clipped primitives
342 PFN_PROCESS_PRIMS pfnBinFunc
= nullptr;
343 PRIMITIVE_TOPOLOGY clipTopology
= TOP_UNKNOWN
;
344 if (NumVertsPerPrim
== 3)
346 pfnBinFunc
= GetBinTrianglesFunc((pa
.pDC
->pState
->state
.rastState
.conservativeRast
> 0));
347 clipTopology
= TOP_TRIANGLE_FAN
;
349 // so that the binner knows to bloat wide points later
350 if (pa
.binTopology
== TOP_POINT_LIST
)
351 clipTopology
= TOP_POINT_LIST
;
354 else if (NumVertsPerPrim
== 2)
356 pfnBinFunc
= BinLines
;
357 clipTopology
= TOP_LINE_LIST
;
361 SWR_ASSERT(0 && "Unexpected points in clipper.");
364 uint32_t* pVertexCount
= (uint32_t*)&vNumClippedVerts
;
365 uint32_t* pPrimitiveId
= (uint32_t*)&vPrimId
;
366 uint32_t* pViewportIdx
= (uint32_t*)&vViewportIdx
;
368 const simdscalari vOffsets
= _mm256_set_epi32(
369 0 * sizeof(simdvertex
), // unused lane
370 6 * sizeof(simdvertex
),
371 5 * sizeof(simdvertex
),
372 4 * sizeof(simdvertex
),
373 3 * sizeof(simdvertex
),
374 2 * sizeof(simdvertex
),
375 1 * sizeof(simdvertex
),
376 0 * sizeof(simdvertex
));
378 // only need to gather 7 verts
379 // @todo dynamic mask based on actual # of verts generated per lane
380 const simdscalar vMask
= _mm256_set_ps(0, -1, -1, -1, -1, -1, -1, -1);
382 uint32_t numClippedPrims
= 0;
383 for (uint32_t inputPrim
= 0; inputPrim
< pa
.NumPrims(); ++inputPrim
)
385 uint32_t numEmittedVerts
= pVertexCount
[inputPrim
];
386 if (numEmittedVerts
< NumVertsPerPrim
)
390 SWR_ASSERT(numEmittedVerts
<= 7, "Unexpected vertex count from clipper.");
392 uint32_t numEmittedPrims
= GetNumPrims(clipTopology
, numEmittedVerts
);
393 numClippedPrims
+= numEmittedPrims
;
395 // tranpose clipper output so that each lane's vertices are in SIMD order
396 // set aside space for 2 vertices, as the PA will try to read up to 16 verts
398 simdvertex transposedPrims
[2];
401 uint8_t* pBase
= (uint8_t*)(&vertices
[0].attrib
[VERTEX_POSITION_SLOT
]) + sizeof(float) * inputPrim
;
402 for (uint32_t c
= 0; c
< 4; ++c
)
404 transposedPrims
[0].attrib
[VERTEX_POSITION_SLOT
][c
] = _simd_mask_i32gather_ps(_mm256_undefined_ps(), (const float*)pBase
, vOffsets
, vMask
, 1);
405 pBase
+= sizeof(simdscalar
);
409 pBase
= (uint8_t*)(&vertices
[0].attrib
[VERTEX_ATTRIB_START_SLOT
]) + sizeof(float) * inputPrim
;
410 for (uint32_t attrib
= 0; attrib
< numAttribs
; ++attrib
)
412 uint32_t attribSlot
= VERTEX_ATTRIB_START_SLOT
+ attrib
;
413 for (uint32_t c
= 0; c
< 4; ++c
)
415 transposedPrims
[0].attrib
[attribSlot
][c
] = _simd_mask_i32gather_ps(_mm256_undefined_ps(), (const float*)pBase
, vOffsets
, vMask
, 1);
416 pBase
+= sizeof(simdscalar
);
420 // transpose user clip distances if enabled
421 if (this->state
.rastState
.clipDistanceMask
& 0xf)
423 pBase
= (uint8_t*)(&vertices
[0].attrib
[VERTEX_CLIPCULL_DIST_LO_SLOT
]) + sizeof(float) * inputPrim
;
424 for (uint32_t c
= 0; c
< 4; ++c
)
426 transposedPrims
[0].attrib
[VERTEX_CLIPCULL_DIST_LO_SLOT
][c
] = _simd_mask_i32gather_ps(_mm256_undefined_ps(), (const float*)pBase
, vOffsets
, vMask
, 1);
427 pBase
+= sizeof(simdscalar
);
431 if (this->state
.rastState
.clipDistanceMask
& 0xf0)
433 pBase
= (uint8_t*)(&vertices
[0].attrib
[VERTEX_CLIPCULL_DIST_HI_SLOT
]) + sizeof(float) * inputPrim
;
434 for (uint32_t c
= 0; c
< 4; ++c
)
436 transposedPrims
[0].attrib
[VERTEX_CLIPCULL_DIST_HI_SLOT
][c
] = _simd_mask_i32gather_ps(_mm256_undefined_ps(), (const float*)pBase
, vOffsets
, vMask
, 1);
437 pBase
+= sizeof(simdscalar
);
441 PA_STATE_OPT
clipPa(this->pDC
, numEmittedPrims
, (uint8_t*)&transposedPrims
[0], numEmittedVerts
, true, clipTopology
);
443 while (clipPa
.GetNextStreamOutput())
447 simdvector attrib
[NumVertsPerPrim
];
448 bool assemble
= clipPa
.Assemble(VERTEX_POSITION_SLOT
, attrib
);
451 static const uint32_t primMaskMap
[] = { 0x0, 0x1, 0x3, 0x7, 0xf, 0x1f, 0x3f, 0x7f, 0xff };
452 pfnBinFunc(this->pDC
, clipPa
, this->workerId
, attrib
, primMaskMap
[numEmittedPrims
], _simd_set1_epi32(pPrimitiveId
[inputPrim
]), _simd_set1_epi32(pViewportIdx
[inputPrim
]));
454 } while (clipPa
.NextPrim());
458 // update global pipeline stat
459 UPDATE_STAT_FE(CPrimitives
, numClippedPrims
);
462 // execute the clipper stage
463 void ExecuteStage(PA_STATE
& pa
, simdvector prim
[], uint32_t primMask
, simdscalari primId
, simdscalari viewportIdx
)
465 SWR_ASSERT(pa
.pDC
!= nullptr);
466 SWR_CONTEXT
* pContext
= pa
.pDC
->pContext
;
468 // set up binner based on PA state
469 PFN_PROCESS_PRIMS pfnBinner
;
470 switch (pa
.binTopology
)
473 pfnBinner
= BinPoints
;
478 case TOP_LINE_LIST_ADJ
:
479 case TOP_LISTSTRIP_ADJ
:
480 pfnBinner
= BinLines
;
483 pfnBinner
= GetBinTrianglesFunc((pa
.pDC
->pState
->state
.rastState
.conservativeRast
> 0));
487 // update clipper invocations pipeline stat
488 uint32_t numInvoc
= _mm_popcnt_u32(primMask
);
489 UPDATE_STAT_FE(CInvocations
, numInvoc
);
491 ComputeClipCodes(prim
, viewportIdx
);
493 // cull prims with NAN coords
494 primMask
&= ~ComputeNaNMask(prim
);
496 // user cull distance cull
497 if (this->state
.rastState
.cullDistanceMask
)
499 primMask
&= ~ComputeUserClipCullMask(pa
, prim
);
502 // cull prims outside view frustum
503 simdscalar clipIntersection
= ComputeClipCodeIntersection();
504 int validMask
= primMask
& _simd_movemask_ps(_simd_cmpeq_ps(clipIntersection
, _simd_setzero_ps()));
506 // skip clipping for points
507 uint32_t clipMask
= 0;
508 if (NumVertsPerPrim
!= 1)
510 clipMask
= primMask
& ComputeClipMask();
515 AR_BEGIN(FEGuardbandClip
, pa
.pDC
->drawId
);
516 // we have to clip tris, execute the clipper, which will also
518 ClipSimd(vMask(primMask
), vMask(clipMask
), pa
, primId
, viewportIdx
);
519 AR_END(FEGuardbandClip
, 1);
523 // update CPrimitives pipeline state
524 UPDATE_STAT_FE(CPrimitives
, _mm_popcnt_u32(validMask
));
526 // forward valid prims directly to binner
527 pfnBinner(this->pDC
, pa
, this->workerId
, prim
, validMask
, primId
, viewportIdx
);
532 inline simdscalar
ComputeInterpFactor(simdscalar boundaryCoord0
, simdscalar boundaryCoord1
)
534 return _simd_div_ps(boundaryCoord0
, _simd_sub_ps(boundaryCoord0
, boundaryCoord1
));
537 inline simdscalari
ComputeOffsets(uint32_t attrib
, simdscalari vIndices
, uint32_t component
)
539 const uint32_t simdVertexStride
= sizeof(simdvertex
);
540 const uint32_t componentStride
= sizeof(simdscalar
);
541 const uint32_t attribStride
= sizeof(simdvector
);
542 const __m256i vElemOffset
= _mm256_set_epi32(7 * sizeof(float), 6 * sizeof(float), 5 * sizeof(float), 4 * sizeof(float),
543 3 * sizeof(float), 2 * sizeof(float), 1 * sizeof(float), 0 * sizeof(float));
545 // step to the simdvertex
546 simdscalari vOffsets
= _simd_mullo_epi32(vIndices
, _simd_set1_epi32(simdVertexStride
));
548 // step to the attribute and component
549 vOffsets
= _simd_add_epi32(vOffsets
, _simd_set1_epi32(attribStride
* attrib
+ componentStride
* component
));
552 vOffsets
= _simd_add_epi32(vOffsets
, vElemOffset
);
557 // gathers a single component for a given attribute for each SIMD lane
558 inline simdscalar
GatherComponent(const float* pBuffer
, uint32_t attrib
, simdscalar vMask
, simdscalari vIndices
, uint32_t component
)
560 simdscalari vOffsets
= ComputeOffsets(attrib
, vIndices
, component
);
561 simdscalar vSrc
= _mm256_undefined_ps();
562 return _simd_mask_i32gather_ps(vSrc
, pBuffer
, vOffsets
, vMask
, 1);
565 inline void ScatterComponent(const float* pBuffer
, uint32_t attrib
, simdscalar vMask
, simdscalari vIndices
, uint32_t component
, simdscalar vSrc
)
567 simdscalari vOffsets
= ComputeOffsets(attrib
, vIndices
, component
);
569 uint32_t* pOffsets
= (uint32_t*)&vOffsets
;
570 float* pSrc
= (float*)&vSrc
;
571 uint32_t mask
= _simd_movemask_ps(vMask
);
573 while (_BitScanForward(&lane
, mask
))
575 mask
&= ~(1 << lane
);
576 uint8_t* pBuf
= (uint8_t*)pBuffer
+ pOffsets
[lane
];
577 *(float*)pBuf
= pSrc
[lane
];
581 template<SWR_CLIPCODES ClippingPlane
>
582 inline void intersect(
583 const simdscalar
& vActiveMask
, // active lanes to operate on
584 const simdscalari
& s
, // index to first edge vertex v0 in pInPts.
585 const simdscalari
& p
, // index to second edge vertex v1 in pInPts.
586 const simdvector
& v1
, // vertex 0 position
587 const simdvector
& v2
, // vertex 1 position
588 simdscalari
& outIndex
, // output index.
589 const float *pInVerts
, // array of all the input positions.
590 uint32_t numInAttribs
, // number of attributes per vertex.
591 float *pOutVerts
) // array of output positions. We'll write our new intersection point at i*4.
593 // compute interpolation factor
595 switch (ClippingPlane
)
597 case FRUSTUM_LEFT
: t
= ComputeInterpFactor(_simd_add_ps(v1
[3], v1
[0]), _simd_add_ps(v2
[3], v2
[0])); break;
598 case FRUSTUM_RIGHT
: t
= ComputeInterpFactor(_simd_sub_ps(v1
[3], v1
[0]), _simd_sub_ps(v2
[3], v2
[0])); break;
599 case FRUSTUM_TOP
: t
= ComputeInterpFactor(_simd_add_ps(v1
[3], v1
[1]), _simd_add_ps(v2
[3], v2
[1])); break;
600 case FRUSTUM_BOTTOM
: t
= ComputeInterpFactor(_simd_sub_ps(v1
[3], v1
[1]), _simd_sub_ps(v2
[3], v2
[1])); break;
602 // DX Znear plane is 0, GL is -w
603 if (this->state
.rastState
.clipHalfZ
)
605 t
= ComputeInterpFactor(v1
[2], v2
[2]);
609 t
= ComputeInterpFactor(_simd_add_ps(v1
[3], v1
[2]), _simd_add_ps(v2
[3], v2
[2]));
612 case FRUSTUM_FAR
: t
= ComputeInterpFactor(_simd_sub_ps(v1
[3], v1
[2]), _simd_sub_ps(v2
[3], v2
[2])); break;
613 default: SWR_ASSERT(false, "invalid clipping plane: %d", ClippingPlane
);
616 // interpolate position and store
617 for (uint32_t c
= 0; c
< 4; ++c
)
619 simdscalar vOutPos
= _simd_fmadd_ps(_simd_sub_ps(v2
[c
], v1
[c
]), t
, v1
[c
]);
620 ScatterComponent(pOutVerts
, VERTEX_POSITION_SLOT
, vActiveMask
, outIndex
, c
, vOutPos
);
623 // interpolate attributes and store
624 for (uint32_t a
= 0; a
< numInAttribs
; ++a
)
626 uint32_t attribSlot
= VERTEX_ATTRIB_START_SLOT
+ a
;
627 for (uint32_t c
= 0; c
< 4; ++c
)
629 simdscalar vAttrib0
= GatherComponent(pInVerts
, attribSlot
, vActiveMask
, s
, c
);
630 simdscalar vAttrib1
= GatherComponent(pInVerts
, attribSlot
, vActiveMask
, p
, c
);
631 simdscalar vOutAttrib
= _simd_fmadd_ps(_simd_sub_ps(vAttrib1
, vAttrib0
), t
, vAttrib0
);
632 ScatterComponent(pOutVerts
, attribSlot
, vActiveMask
, outIndex
, c
, vOutAttrib
);
636 // interpolate clip distance if enabled
637 if (this->state
.rastState
.clipDistanceMask
& 0xf)
639 uint32_t attribSlot
= VERTEX_CLIPCULL_DIST_LO_SLOT
;
640 for (uint32_t c
= 0; c
< 4; ++c
)
642 simdscalar vAttrib0
= GatherComponent(pInVerts
, attribSlot
, vActiveMask
, s
, c
);
643 simdscalar vAttrib1
= GatherComponent(pInVerts
, attribSlot
, vActiveMask
, p
, c
);
644 simdscalar vOutAttrib
= _simd_fmadd_ps(_simd_sub_ps(vAttrib1
, vAttrib0
), t
, vAttrib0
);
645 ScatterComponent(pOutVerts
, attribSlot
, vActiveMask
, outIndex
, c
, vOutAttrib
);
649 if (this->state
.rastState
.clipDistanceMask
& 0xf0)
651 uint32_t attribSlot
= VERTEX_CLIPCULL_DIST_HI_SLOT
;
652 for (uint32_t c
= 0; c
< 4; ++c
)
654 simdscalar vAttrib0
= GatherComponent(pInVerts
, attribSlot
, vActiveMask
, s
, c
);
655 simdscalar vAttrib1
= GatherComponent(pInVerts
, attribSlot
, vActiveMask
, p
, c
);
656 simdscalar vOutAttrib
= _simd_fmadd_ps(_simd_sub_ps(vAttrib1
, vAttrib0
), t
, vAttrib0
);
657 ScatterComponent(pOutVerts
, attribSlot
, vActiveMask
, outIndex
, c
, vOutAttrib
);
662 template<SWR_CLIPCODES ClippingPlane
>
663 inline simdscalar
inside(const simdvector
& v
)
665 switch (ClippingPlane
)
667 case FRUSTUM_LEFT
: return _simd_cmpge_ps(v
[0], _simd_mul_ps(v
[3], _simd_set1_ps(-1.0f
)));
668 case FRUSTUM_RIGHT
: return _simd_cmple_ps(v
[0], v
[3]);
669 case FRUSTUM_TOP
: return _simd_cmpge_ps(v
[1], _simd_mul_ps(v
[3], _simd_set1_ps(-1.0f
)));
670 case FRUSTUM_BOTTOM
: return _simd_cmple_ps(v
[1], v
[3]);
671 case FRUSTUM_NEAR
: return _simd_cmpge_ps(v
[2], this->state
.rastState
.clipHalfZ
? _simd_setzero_ps() : _simd_mul_ps(v
[3], _simd_set1_ps(-1.0f
)));
672 case FRUSTUM_FAR
: return _simd_cmple_ps(v
[2], v
[3]);
674 SWR_ASSERT(false, "invalid clipping plane: %d", ClippingPlane
);
675 return _simd_setzero_ps();
679 template<SWR_CLIPCODES ClippingPlane
>
680 simdscalari
ClipTriToPlane(const float* pInVerts
, const simdscalari
& vNumInPts
, uint32_t numInAttribs
, float* pOutVerts
)
682 simdscalari vCurIndex
= _simd_setzero_si();
683 simdscalari vOutIndex
= _simd_setzero_si();
684 simdscalar vActiveMask
= _simd_castsi_ps(_simd_cmplt_epi32(vCurIndex
, vNumInPts
));
686 while (!_simd_testz_ps(vActiveMask
, vActiveMask
)) // loop until activeMask is empty
688 simdscalari s
= vCurIndex
;
689 simdscalari p
= _simd_add_epi32(s
, _simd_set1_epi32(1));
690 simdscalari underFlowMask
= _simd_cmpgt_epi32(vNumInPts
, p
);
691 p
= _simd_castps_si(_simd_blendv_ps(_simd_setzero_ps(), _simd_castsi_ps(p
), _simd_castsi_ps(underFlowMask
)));
694 simdvector vInPos0
, vInPos1
;
695 for (uint32_t c
= 0; c
< 4; ++c
)
697 vInPos0
[c
] = GatherComponent(pInVerts
, VERTEX_POSITION_SLOT
, vActiveMask
, s
, c
);
698 vInPos1
[c
] = GatherComponent(pInVerts
, VERTEX_POSITION_SLOT
, vActiveMask
, p
, c
);
701 // compute inside mask
702 simdscalar s_in
= inside
<ClippingPlane
>(vInPos0
);
703 simdscalar p_in
= inside
<ClippingPlane
>(vInPos1
);
705 // compute intersection mask (s_in != p_in)
706 simdscalar intersectMask
= _simd_xor_ps(s_in
, p_in
);
707 intersectMask
= _simd_and_ps(intersectMask
, vActiveMask
);
710 s_in
= _simd_and_ps(s_in
, vActiveMask
);
711 if (!_simd_testz_ps(s_in
, s_in
))
714 for (uint32_t c
= 0; c
< 4; ++c
)
716 ScatterComponent(pOutVerts
, VERTEX_POSITION_SLOT
, s_in
, vOutIndex
, c
, vInPos0
[c
]);
720 for (uint32_t a
= 0; a
< numInAttribs
; ++a
)
722 uint32_t attribSlot
= VERTEX_ATTRIB_START_SLOT
+ a
;
723 for (uint32_t c
= 0; c
< 4; ++c
)
725 simdscalar vAttrib
= GatherComponent(pInVerts
, attribSlot
, s_in
, s
, c
);
726 ScatterComponent(pOutVerts
, attribSlot
, s_in
, vOutIndex
, c
, vAttrib
);
730 // store clip distance if enabled
731 if (this->state
.rastState
.clipDistanceMask
& 0xf)
733 uint32_t attribSlot
= VERTEX_CLIPCULL_DIST_LO_SLOT
;
734 for (uint32_t c
= 0; c
< 4; ++c
)
736 simdscalar vAttrib
= GatherComponent(pInVerts
, attribSlot
, s_in
, s
, c
);
737 ScatterComponent(pOutVerts
, attribSlot
, s_in
, vOutIndex
, c
, vAttrib
);
741 if (this->state
.rastState
.clipDistanceMask
& 0xf0)
743 uint32_t attribSlot
= VERTEX_CLIPCULL_DIST_HI_SLOT
;
744 for (uint32_t c
= 0; c
< 4; ++c
)
746 simdscalar vAttrib
= GatherComponent(pInVerts
, attribSlot
, s_in
, s
, c
);
747 ScatterComponent(pOutVerts
, attribSlot
, s_in
, vOutIndex
, c
, vAttrib
);
751 // increment outIndex
752 vOutIndex
= _simd_blendv_epi32(vOutIndex
, _simd_add_epi32(vOutIndex
, _simd_set1_epi32(1)), s_in
);
755 // compute and store intersection
756 if (!_simd_testz_ps(intersectMask
, intersectMask
))
758 intersect
<ClippingPlane
>(intersectMask
, s
, p
, vInPos0
, vInPos1
, vOutIndex
, pInVerts
, numInAttribs
, pOutVerts
);
760 // increment outIndex for active lanes
761 vOutIndex
= _simd_blendv_epi32(vOutIndex
, _simd_add_epi32(vOutIndex
, _simd_set1_epi32(1)), intersectMask
);
764 // increment loop index and update active mask
765 vCurIndex
= _simd_add_epi32(vCurIndex
, _simd_set1_epi32(1));
766 vActiveMask
= _simd_castsi_ps(_simd_cmplt_epi32(vCurIndex
, vNumInPts
));
772 template<SWR_CLIPCODES ClippingPlane
>
773 simdscalari
ClipLineToPlane(const float* pInVerts
, const simdscalari
& vNumInPts
, uint32_t numInAttribs
, float* pOutVerts
)
775 simdscalari vCurIndex
= _simd_setzero_si();
776 simdscalari vOutIndex
= _simd_setzero_si();
777 simdscalar vActiveMask
= _simd_castsi_ps(_simd_cmplt_epi32(vCurIndex
, vNumInPts
));
779 if (!_simd_testz_ps(vActiveMask
, vActiveMask
))
781 simdscalari s
= vCurIndex
;
782 simdscalari p
= _simd_add_epi32(s
, _simd_set1_epi32(1));
785 simdvector vInPos0
, vInPos1
;
786 for (uint32_t c
= 0; c
< 4; ++c
)
788 vInPos0
[c
] = GatherComponent(pInVerts
, VERTEX_POSITION_SLOT
, vActiveMask
, s
, c
);
789 vInPos1
[c
] = GatherComponent(pInVerts
, VERTEX_POSITION_SLOT
, vActiveMask
, p
, c
);
792 // compute inside mask
793 simdscalar s_in
= inside
<ClippingPlane
>(vInPos0
);
794 simdscalar p_in
= inside
<ClippingPlane
>(vInPos1
);
796 // compute intersection mask (s_in != p_in)
797 simdscalar intersectMask
= _simd_xor_ps(s_in
, p_in
);
798 intersectMask
= _simd_and_ps(intersectMask
, vActiveMask
);
801 s_in
= _simd_and_ps(s_in
, vActiveMask
);
802 if (!_simd_testz_ps(s_in
, s_in
))
804 for (uint32_t c
= 0; c
< 4; ++c
)
806 ScatterComponent(pOutVerts
, VERTEX_POSITION_SLOT
, s_in
, vOutIndex
, c
, vInPos0
[c
]);
809 // interpolate attributes and store
810 for (uint32_t a
= 0; a
< numInAttribs
; ++a
)
812 uint32_t attribSlot
= VERTEX_ATTRIB_START_SLOT
+ a
;
813 for (uint32_t c
= 0; c
< 4; ++c
)
815 simdscalar vAttrib
= GatherComponent(pInVerts
, attribSlot
, s_in
, s
, c
);
816 ScatterComponent(pOutVerts
, attribSlot
, s_in
, vOutIndex
, c
, vAttrib
);
820 // increment outIndex
821 vOutIndex
= _simd_blendv_epi32(vOutIndex
, _simd_add_epi32(vOutIndex
, _simd_set1_epi32(1)), s_in
);
824 // compute and store intersection
825 if (!_simd_testz_ps(intersectMask
, intersectMask
))
827 intersect
<ClippingPlane
>(intersectMask
, s
, p
, vInPos0
, vInPos1
, vOutIndex
, pInVerts
, numInAttribs
, pOutVerts
);
829 // increment outIndex for active lanes
830 vOutIndex
= _simd_blendv_epi32(vOutIndex
, _simd_add_epi32(vOutIndex
, _simd_set1_epi32(1)), intersectMask
);
834 p_in
= _simd_and_ps(p_in
, vActiveMask
);
835 if (!_simd_testz_ps(p_in
, p_in
))
837 for (uint32_t c
= 0; c
< 4; ++c
)
839 ScatterComponent(pOutVerts
, VERTEX_POSITION_SLOT
, p_in
, vOutIndex
, c
, vInPos1
[c
]);
842 // interpolate attributes and store
843 for (uint32_t a
= 0; a
< numInAttribs
; ++a
)
845 uint32_t attribSlot
= VERTEX_ATTRIB_START_SLOT
+ a
;
846 for (uint32_t c
= 0; c
< 4; ++c
)
848 simdscalar vAttrib
= GatherComponent(pInVerts
, attribSlot
, p_in
, p
, c
);
849 ScatterComponent(pOutVerts
, attribSlot
, p_in
, vOutIndex
, c
, vAttrib
);
853 // increment outIndex
854 vOutIndex
= _simd_blendv_epi32(vOutIndex
, _simd_add_epi32(vOutIndex
, _simd_set1_epi32(1)), p_in
);
861 //////////////////////////////////////////////////////////////////////////
862 /// @brief Vertical clipper. Clips SIMD primitives at a time
863 /// @param pVertices - pointer to vertices in SOA form. Clipper will read input and write results to this buffer
864 /// @param vPrimMask - mask of valid input primitives, including non-clipped prims
865 /// @param numAttribs - number of valid input attribs, including position
866 simdscalari
ClipPrims(float* pVertices
, const simdscalar
& vPrimMask
, const simdscalar
& vClipMask
, int numAttribs
)
869 float* pTempVerts
= (float*)&tlsTempVertices
[0];
871 // zero out num input verts for non-active lanes
872 simdscalari vNumInPts
= _simd_set1_epi32(NumVertsPerPrim
);
873 vNumInPts
= _simd_blendv_epi32(_simd_setzero_si(), vNumInPts
, vClipMask
);
875 // clip prims to frustum
876 simdscalari vNumOutPts
;
877 if (NumVertsPerPrim
== 3)
879 vNumOutPts
= ClipTriToPlane
<FRUSTUM_NEAR
>(pVertices
, vNumInPts
, numAttribs
, pTempVerts
);
880 vNumOutPts
= ClipTriToPlane
<FRUSTUM_FAR
>(pTempVerts
, vNumOutPts
, numAttribs
, pVertices
);
881 vNumOutPts
= ClipTriToPlane
<FRUSTUM_LEFT
>(pVertices
, vNumOutPts
, numAttribs
, pTempVerts
);
882 vNumOutPts
= ClipTriToPlane
<FRUSTUM_RIGHT
>(pTempVerts
, vNumOutPts
, numAttribs
, pVertices
);
883 vNumOutPts
= ClipTriToPlane
<FRUSTUM_BOTTOM
>(pVertices
, vNumOutPts
, numAttribs
, pTempVerts
);
884 vNumOutPts
= ClipTriToPlane
<FRUSTUM_TOP
>(pTempVerts
, vNumOutPts
, numAttribs
, pVertices
);
888 SWR_ASSERT(NumVertsPerPrim
== 2);
889 vNumOutPts
= ClipLineToPlane
<FRUSTUM_NEAR
>(pVertices
, vNumInPts
, numAttribs
, pTempVerts
);
890 vNumOutPts
= ClipLineToPlane
<FRUSTUM_FAR
>(pTempVerts
, vNumOutPts
, numAttribs
, pVertices
);
891 vNumOutPts
= ClipLineToPlane
<FRUSTUM_LEFT
>(pVertices
, vNumOutPts
, numAttribs
, pTempVerts
);
892 vNumOutPts
= ClipLineToPlane
<FRUSTUM_RIGHT
>(pTempVerts
, vNumOutPts
, numAttribs
, pVertices
);
893 vNumOutPts
= ClipLineToPlane
<FRUSTUM_BOTTOM
>(pVertices
, vNumOutPts
, numAttribs
, pTempVerts
);
894 vNumOutPts
= ClipLineToPlane
<FRUSTUM_TOP
>(pTempVerts
, vNumOutPts
, numAttribs
, pVertices
);
897 // restore num verts for non-clipped, active lanes
898 simdscalar vNonClippedMask
= _simd_andnot_ps(vClipMask
, vPrimMask
);
899 vNumOutPts
= _simd_blendv_epi32(vNumOutPts
, _simd_set1_epi32(NumVertsPerPrim
), vNonClippedMask
);
904 const uint32_t workerId
{ 0 };
905 DRAW_CONTEXT
* pDC
{ nullptr };
906 const API_STATE
& state
;
907 simdscalar clipCodes
[NumVertsPerPrim
];
911 // pipeline stage functions
912 void ClipTriangles(DRAW_CONTEXT
*pDC
, PA_STATE
& pa
, uint32_t workerId
, simdvector prims
[], uint32_t primMask
, simdscalari primId
, simdscalari viewportIdx
);
913 void ClipLines(DRAW_CONTEXT
*pDC
, PA_STATE
& pa
, uint32_t workerId
, simdvector prims
[], uint32_t primMask
, simdscalari primId
, simdscalari viewportIdx
);
914 void ClipPoints(DRAW_CONTEXT
*pDC
, PA_STATE
& pa
, uint32_t workerId
, simdvector prims
[], uint32_t primMask
, simdscalari primId
, simdscalari viewportIdx
);