swr: don't claim to allow setting layer/viewport from VS
[mesa.git] / src / gallium / drivers / swr / rasterizer / core / clip.h
1 /****************************************************************************
2 * Copyright (C) 2014-2015 Intel Corporation. All Rights Reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * @file clip.h
24 *
25 * @brief Definitions for clipping
26 *
27 ******************************************************************************/
28 #pragma once
29
30 #include "common/simdintrin.h"
31 #include "core/context.h"
32 #include "core/pa.h"
33 #include "rdtsc_core.h"
34
35 // Temp storage used by the clipper
36 extern THREAD simdvertex tlsTempVertices[7];
37
38 enum SWR_CLIPCODES
39 {
40 // Shift clip codes out of the mantissa to prevent denormalized values when used in float compare.
41 // Guardband is able to use a single high-bit with 4 separate LSBs, because it computes a union, rather than intersection, of clipcodes.
42 #define CLIPCODE_SHIFT 23
43 FRUSTUM_LEFT = (0x01 << CLIPCODE_SHIFT),
44 FRUSTUM_TOP = (0x02 << CLIPCODE_SHIFT),
45 FRUSTUM_RIGHT = (0x04 << CLIPCODE_SHIFT),
46 FRUSTUM_BOTTOM = (0x08 << CLIPCODE_SHIFT),
47
48 FRUSTUM_NEAR = (0x10 << CLIPCODE_SHIFT),
49 FRUSTUM_FAR = (0x20 << CLIPCODE_SHIFT),
50
51 NEGW = (0x40 << CLIPCODE_SHIFT),
52
53 GUARDBAND_LEFT = (0x80 << CLIPCODE_SHIFT | 0x1),
54 GUARDBAND_TOP = (0x80 << CLIPCODE_SHIFT | 0x2),
55 GUARDBAND_RIGHT = (0x80 << CLIPCODE_SHIFT | 0x4),
56 GUARDBAND_BOTTOM = (0x80 << CLIPCODE_SHIFT | 0x8)
57 };
58
59 #define FRUSTUM_CLIP_MASK (FRUSTUM_LEFT|FRUSTUM_TOP|FRUSTUM_RIGHT|FRUSTUM_BOTTOM|FRUSTUM_NEAR|FRUSTUM_FAR)
60 #define GUARDBAND_CLIP_MASK (FRUSTUM_NEAR|FRUSTUM_FAR|GUARDBAND_LEFT|GUARDBAND_TOP|GUARDBAND_RIGHT|GUARDBAND_BOTTOM|NEGW)
61
62 void Clip(const float *pTriangle, const float *pAttribs, int numAttribs, float *pOutTriangles,
63 int *numVerts, float *pOutAttribs);
64
65 INLINE
66 void ComputeClipCodes(const API_STATE& state, const simdvector& vertex, simdscalar& clipCodes, simdscalari viewportIndexes)
67 {
68 clipCodes = _simd_setzero_ps();
69
70 // -w
71 simdscalar vNegW = _simd_mul_ps(vertex.w, _simd_set1_ps(-1.0f));
72
73 // FRUSTUM_LEFT
74 simdscalar vRes = _simd_cmplt_ps(vertex.x, vNegW);
75 clipCodes = _simd_and_ps(vRes, _simd_castsi_ps(_simd_set1_epi32(FRUSTUM_LEFT)));
76
77 // FRUSTUM_TOP
78 vRes = _simd_cmplt_ps(vertex.y, vNegW);
79 clipCodes = _simd_or_ps(clipCodes, _simd_and_ps(vRes, _simd_castsi_ps(_simd_set1_epi32(FRUSTUM_TOP))));
80
81 // FRUSTUM_RIGHT
82 vRes = _simd_cmpgt_ps(vertex.x, vertex.w);
83 clipCodes = _simd_or_ps(clipCodes, _simd_and_ps(vRes, _simd_castsi_ps(_simd_set1_epi32(FRUSTUM_RIGHT))));
84
85 // FRUSTUM_BOTTOM
86 vRes = _simd_cmpgt_ps(vertex.y, vertex.w);
87 clipCodes = _simd_or_ps(clipCodes, _simd_and_ps(vRes, _simd_castsi_ps(_simd_set1_epi32(FRUSTUM_BOTTOM))));
88
89 if (state.rastState.depthClipEnable)
90 {
91 // FRUSTUM_NEAR
92 // DX clips depth [0..w], GL clips [-w..w]
93 if (state.rastState.clipHalfZ)
94 {
95 vRes = _simd_cmplt_ps(vertex.z, _simd_setzero_ps());
96 }
97 else
98 {
99 vRes = _simd_cmplt_ps(vertex.z, vNegW);
100 }
101 clipCodes = _simd_or_ps(clipCodes, _simd_and_ps(vRes, _simd_castsi_ps(_simd_set1_epi32(FRUSTUM_NEAR))));
102
103 // FRUSTUM_FAR
104 vRes = _simd_cmpgt_ps(vertex.z, vertex.w);
105 clipCodes = _simd_or_ps(clipCodes, _simd_and_ps(vRes, _simd_castsi_ps(_simd_set1_epi32(FRUSTUM_FAR))));
106 }
107
108 // NEGW
109 vRes = _simd_cmple_ps(vertex.w, _simd_setzero_ps());
110 clipCodes = _simd_or_ps(clipCodes, _simd_and_ps(vRes, _simd_castsi_ps(_simd_set1_epi32(NEGW))));
111
112 // GUARDBAND_LEFT
113 simdscalar gbMult = _simd_mul_ps(vNegW, _simd_i32gather_ps(&state.gbState.left[0], viewportIndexes, 4));
114 vRes = _simd_cmplt_ps(vertex.x, gbMult);
115 clipCodes = _simd_or_ps(clipCodes, _simd_and_ps(vRes, _simd_castsi_ps(_simd_set1_epi32(GUARDBAND_LEFT))));
116
117 // GUARDBAND_TOP
118 gbMult = _simd_mul_ps(vNegW, _simd_i32gather_ps(&state.gbState.top[0], viewportIndexes, 4));
119 vRes = _simd_cmplt_ps(vertex.y, gbMult);
120 clipCodes = _simd_or_ps(clipCodes, _simd_and_ps(vRes, _simd_castsi_ps(_simd_set1_epi32(GUARDBAND_TOP))));
121
122 // GUARDBAND_RIGHT
123 gbMult = _simd_mul_ps(vertex.w, _simd_i32gather_ps(&state.gbState.right[0], viewportIndexes, 4));
124 vRes = _simd_cmpgt_ps(vertex.x, gbMult);
125 clipCodes = _simd_or_ps(clipCodes, _simd_and_ps(vRes, _simd_castsi_ps(_simd_set1_epi32(GUARDBAND_RIGHT))));
126
127 // GUARDBAND_BOTTOM
128 gbMult = _simd_mul_ps(vertex.w, _simd_i32gather_ps(&state.gbState.bottom[0], viewportIndexes, 4));
129 vRes = _simd_cmpgt_ps(vertex.y, gbMult);
130 clipCodes = _simd_or_ps(clipCodes, _simd_and_ps(vRes, _simd_castsi_ps(_simd_set1_epi32(GUARDBAND_BOTTOM))));
131 }
132
133 template<uint32_t NumVertsPerPrim>
134 class Clipper
135 {
136 public:
137 Clipper(uint32_t in_workerId, DRAW_CONTEXT* in_pDC) :
138 workerId(in_workerId), pDC(in_pDC), state(GetApiState(in_pDC))
139 {
140 static_assert(NumVertsPerPrim >= 1 && NumVertsPerPrim <= 3, "Invalid NumVertsPerPrim");
141 }
142
143 void ComputeClipCodes(simdvector vertex[], simdscalari viewportIndexes)
144 {
145 for (uint32_t i = 0; i < NumVertsPerPrim; ++i)
146 {
147 ::ComputeClipCodes(this->state, vertex[i], this->clipCodes[i], viewportIndexes);
148 }
149 }
150
151 simdscalar ComputeClipCodeIntersection()
152 {
153 simdscalar result = this->clipCodes[0];
154 for (uint32_t i = 1; i < NumVertsPerPrim; ++i)
155 {
156 result = _simd_and_ps(result, this->clipCodes[i]);
157 }
158 return result;
159 }
160
161 simdscalar ComputeClipCodeUnion()
162 {
163 simdscalar result = this->clipCodes[0];
164 for (uint32_t i = 1; i < NumVertsPerPrim; ++i)
165 {
166 result = _simd_or_ps(result, this->clipCodes[i]);
167 }
168 return result;
169 }
170
171 int ComputeNegWMask()
172 {
173 simdscalar clipCodeUnion = ComputeClipCodeUnion();
174 clipCodeUnion = _simd_and_ps(clipCodeUnion, _simd_castsi_ps(_simd_set1_epi32(NEGW)));
175 return _simd_movemask_ps(_simd_cmpneq_ps(clipCodeUnion, _simd_setzero_ps()));
176 }
177
178 int ComputeClipMask()
179 {
180 simdscalar clipUnion = ComputeClipCodeUnion();
181 clipUnion = _simd_and_ps(clipUnion, _simd_castsi_ps(_simd_set1_epi32(GUARDBAND_CLIP_MASK)));
182 return _simd_movemask_ps(_simd_cmpneq_ps(clipUnion, _simd_setzero_ps()));
183 }
184
185 // clipper is responsible for culling any prims with NAN coordinates
186 int ComputeNaNMask(simdvector prim[])
187 {
188 simdscalar vNanMask = _simd_setzero_ps();
189 for (uint32_t e = 0; e < NumVertsPerPrim; ++e)
190 {
191 simdscalar vNan01 = _simd_cmp_ps(prim[e].v[0], prim[e].v[1], _CMP_UNORD_Q);
192 vNanMask = _simd_or_ps(vNanMask, vNan01);
193 simdscalar vNan23 = _simd_cmp_ps(prim[e].v[2], prim[e].v[3], _CMP_UNORD_Q);
194 vNanMask = _simd_or_ps(vNanMask, vNan23);
195 }
196
197 return _simd_movemask_ps(vNanMask);
198 }
199
200 int ComputeUserClipCullMask(PA_STATE& pa, simdvector prim[])
201 {
202 uint8_t cullMask = this->state.rastState.cullDistanceMask;
203 simdscalar vClipCullMask = _simd_setzero_ps();
204 DWORD index;
205
206 simdvector vClipCullDistLo[3];
207 simdvector vClipCullDistHi[3];
208
209 pa.Assemble(VERTEX_CLIPCULL_DIST_LO_SLOT, vClipCullDistLo);
210 pa.Assemble(VERTEX_CLIPCULL_DIST_HI_SLOT, vClipCullDistHi);
211 while (_BitScanForward(&index, cullMask))
212 {
213 cullMask &= ~(1 << index);
214 uint32_t slot = index >> 2;
215 uint32_t component = index & 0x3;
216
217 simdscalar vCullMaskElem = _simd_set1_ps(-1.0f);
218 for (uint32_t e = 0; e < NumVertsPerPrim; ++e)
219 {
220 simdscalar vCullComp;
221 if (slot == 0)
222 {
223 vCullComp = vClipCullDistLo[e][component];
224 }
225 else
226 {
227 vCullComp = vClipCullDistHi[e][component];
228 }
229
230 // cull if cull distance < 0 || NAN
231 simdscalar vCull = _simd_cmp_ps(_mm256_setzero_ps(), vCullComp, _CMP_NLE_UQ);
232 vCullMaskElem = _simd_and_ps(vCullMaskElem, vCull);
233 }
234 vClipCullMask = _simd_or_ps(vClipCullMask, vCullMaskElem);
235 }
236
237 // clipper should also discard any primitive with NAN clip distance
238 uint8_t clipMask = this->state.rastState.clipDistanceMask;
239 while (_BitScanForward(&index, clipMask))
240 {
241 clipMask &= ~(1 << index);
242 uint32_t slot = index >> 2;
243 uint32_t component = index & 0x3;
244
245 for (uint32_t e = 0; e < NumVertsPerPrim; ++e)
246 {
247 simdscalar vClipComp;
248 if (slot == 0)
249 {
250 vClipComp = vClipCullDistLo[e][component];
251 }
252 else
253 {
254 vClipComp = vClipCullDistHi[e][component];
255 }
256
257 simdscalar vClip = _simd_cmp_ps(vClipComp, vClipComp, _CMP_UNORD_Q);
258 vClipCullMask = _simd_or_ps(vClipCullMask, vClip);
259 }
260 }
261
262 return _simd_movemask_ps(vClipCullMask);
263 }
264
265 // clip a single primitive
266 int ClipScalar(PA_STATE& pa, uint32_t primIndex, float* pOutPos, float* pOutAttribs)
267 {
268 OSALIGNSIMD(float) inVerts[3 * 4];
269 OSALIGNSIMD(float) inAttribs[3 * KNOB_NUM_ATTRIBUTES * 4];
270
271 // transpose primitive position
272 __m128 verts[3];
273 pa.AssembleSingle(VERTEX_POSITION_SLOT, primIndex, verts);
274 _mm_store_ps(&inVerts[0], verts[0]);
275 _mm_store_ps(&inVerts[4], verts[1]);
276 _mm_store_ps(&inVerts[8], verts[2]);
277
278 // transpose attribs
279 uint32_t numScalarAttribs = this->state.linkageCount * 4;
280
281 int idx = 0;
282 DWORD slot = 0;
283 uint32_t mapIdx = 0;
284 uint32_t tmpLinkage = uint32_t(this->state.linkageMask);
285 while (_BitScanForward(&slot, tmpLinkage))
286 {
287 tmpLinkage &= ~(1 << slot);
288 // Compute absolute attrib slot in vertex array
289 uint32_t inputSlot = VERTEX_ATTRIB_START_SLOT + this->state.linkageMap[mapIdx++];
290 __m128 attrib[3]; // triangle attribs (always 4 wide)
291 pa.AssembleSingle(inputSlot, primIndex, attrib);
292 _mm_store_ps(&inAttribs[idx], attrib[0]);
293 _mm_store_ps(&inAttribs[idx + numScalarAttribs], attrib[1]);
294 _mm_store_ps(&inAttribs[idx + numScalarAttribs * 2], attrib[2]);
295 idx += 4;
296 }
297
298 int numVerts;
299 Clip(inVerts, inAttribs, numScalarAttribs, pOutPos, &numVerts, pOutAttribs);
300
301 return numVerts;
302 }
303
304 // clip SIMD primitives
305 void ClipSimd(const simdscalar& vPrimMask, const simdscalar& vClipMask, PA_STATE& pa, const simdscalari& vPrimId, const simdscalari& vViewportIdx)
306 {
307 // input/output vertex store for clipper
308 simdvertex vertices[7]; // maximum 7 verts generated per triangle
309
310 LONG constantInterpMask = this->state.backendState.constantInterpolationMask;
311 uint32_t provokingVertex = 0;
312 if(pa.binTopology == TOP_TRIANGLE_FAN)
313 {
314 provokingVertex = this->state.frontendState.provokingVertex.triFan;
315 }
316 ///@todo: line topology for wireframe?
317
318 // assemble pos
319 simdvector tmpVector[NumVertsPerPrim];
320 pa.Assemble(VERTEX_POSITION_SLOT, tmpVector);
321 for (uint32_t i = 0; i < NumVertsPerPrim; ++i)
322 {
323 vertices[i].attrib[VERTEX_POSITION_SLOT] = tmpVector[i];
324 }
325
326 // assemble attribs
327 const SWR_BACKEND_STATE& backendState = this->state.backendState;
328
329 int32_t maxSlot = -1;
330 for (uint32_t slot = 0; slot < backendState.numAttributes; ++slot)
331 {
332 // Compute absolute attrib slot in vertex array
333 uint32_t mapSlot = backendState.swizzleEnable ? backendState.swizzleMap[slot].sourceAttrib : slot;
334 maxSlot = std::max<int32_t>(maxSlot, mapSlot);
335 uint32_t inputSlot = VERTEX_ATTRIB_START_SLOT + mapSlot;
336
337 pa.Assemble(inputSlot, tmpVector);
338
339 // if constant interpolation enabled for this attribute, assign the provoking
340 // vertex values to all edges
341 if (_bittest(&constantInterpMask, slot))
342 {
343 for (uint32_t i = 0; i < NumVertsPerPrim; ++i)
344 {
345 vertices[i].attrib[inputSlot] = tmpVector[provokingVertex];
346 }
347 }
348 else
349 {
350 for (uint32_t i = 0; i < NumVertsPerPrim; ++i)
351 {
352 vertices[i].attrib[inputSlot] = tmpVector[i];
353 }
354 }
355 }
356
357 // assemble user clip distances if enabled
358 if (this->state.rastState.clipDistanceMask & 0xf)
359 {
360 pa.Assemble(VERTEX_CLIPCULL_DIST_LO_SLOT, tmpVector);
361 for (uint32_t i = 0; i < NumVertsPerPrim; ++i)
362 {
363 vertices[i].attrib[VERTEX_CLIPCULL_DIST_LO_SLOT] = tmpVector[i];
364 }
365 }
366
367 if (this->state.rastState.clipDistanceMask & 0xf0)
368 {
369 pa.Assemble(VERTEX_CLIPCULL_DIST_HI_SLOT, tmpVector);
370 for (uint32_t i = 0; i < NumVertsPerPrim; ++i)
371 {
372 vertices[i].attrib[VERTEX_CLIPCULL_DIST_HI_SLOT] = tmpVector[i];
373 }
374 }
375
376 uint32_t numAttribs = maxSlot + 1;
377
378 simdscalari vNumClippedVerts = ClipPrims((float*)&vertices[0], vPrimMask, vClipMask, numAttribs);
379
380 // set up new PA for binning clipped primitives
381 PFN_PROCESS_PRIMS pfnBinFunc = nullptr;
382 PRIMITIVE_TOPOLOGY clipTopology = TOP_UNKNOWN;
383 if (NumVertsPerPrim == 3)
384 {
385 pfnBinFunc = GetBinTrianglesFunc((pa.pDC->pState->state.rastState.conservativeRast > 0));
386 clipTopology = TOP_TRIANGLE_FAN;
387
388 // so that the binner knows to bloat wide points later
389 if (pa.binTopology == TOP_POINT_LIST)
390 clipTopology = TOP_POINT_LIST;
391 }
392 else if (NumVertsPerPrim == 2)
393 {
394 pfnBinFunc = BinLines;
395 clipTopology = TOP_LINE_LIST;
396 }
397 else
398 {
399 SWR_ASSERT(0 && "Unexpected points in clipper.");
400 }
401
402
403 uint32_t* pVertexCount = (uint32_t*)&vNumClippedVerts;
404 uint32_t* pPrimitiveId = (uint32_t*)&vPrimId;
405 uint32_t* pViewportIdx = (uint32_t*)&vViewportIdx;
406
407 const simdscalari vOffsets = _mm256_set_epi32(
408 0 * sizeof(simdvertex), // unused lane
409 6 * sizeof(simdvertex),
410 5 * sizeof(simdvertex),
411 4 * sizeof(simdvertex),
412 3 * sizeof(simdvertex),
413 2 * sizeof(simdvertex),
414 1 * sizeof(simdvertex),
415 0 * sizeof(simdvertex));
416
417 // only need to gather 7 verts
418 // @todo dynamic mask based on actual # of verts generated per lane
419 const simdscalar vMask = _mm256_set_ps(0, -1, -1, -1, -1, -1, -1, -1);
420
421 uint32_t numClippedPrims = 0;
422 for (uint32_t inputPrim = 0; inputPrim < pa.NumPrims(); ++inputPrim)
423 {
424 uint32_t numEmittedVerts = pVertexCount[inputPrim];
425 if (numEmittedVerts < NumVertsPerPrim)
426 {
427 continue;
428 }
429 SWR_ASSERT(numEmittedVerts <= 7, "Unexpected vertex count from clipper.");
430
431 uint32_t numEmittedPrims = GetNumPrims(clipTopology, numEmittedVerts);
432 numClippedPrims += numEmittedPrims;
433
434 // tranpose clipper output so that each lane's vertices are in SIMD order
435 // set aside space for 2 vertices, as the PA will try to read up to 16 verts
436 // for triangle fan
437 simdvertex transposedPrims[2];
438
439 // transpose pos
440 uint8_t* pBase = (uint8_t*)(&vertices[0].attrib[VERTEX_POSITION_SLOT]) + sizeof(float) * inputPrim;
441 for (uint32_t c = 0; c < 4; ++c)
442 {
443 transposedPrims[0].attrib[VERTEX_POSITION_SLOT][c] = _simd_mask_i32gather_ps(_mm256_undefined_ps(), (const float*)pBase, vOffsets, vMask, 1);
444 pBase += sizeof(simdscalar);
445 }
446
447 // transpose attribs
448 pBase = (uint8_t*)(&vertices[0].attrib[VERTEX_ATTRIB_START_SLOT]) + sizeof(float) * inputPrim;
449 for (uint32_t attrib = 0; attrib < numAttribs; ++attrib)
450 {
451 uint32_t attribSlot = VERTEX_ATTRIB_START_SLOT + attrib;
452 for (uint32_t c = 0; c < 4; ++c)
453 {
454 transposedPrims[0].attrib[attribSlot][c] = _simd_mask_i32gather_ps(_mm256_undefined_ps(), (const float*)pBase, vOffsets, vMask, 1);
455 pBase += sizeof(simdscalar);
456 }
457 }
458
459 // transpose user clip distances if enabled
460 if (this->state.rastState.clipDistanceMask & 0xf)
461 {
462 pBase = (uint8_t*)(&vertices[0].attrib[VERTEX_CLIPCULL_DIST_LO_SLOT]) + sizeof(float) * inputPrim;
463 for (uint32_t c = 0; c < 4; ++c)
464 {
465 transposedPrims[0].attrib[VERTEX_CLIPCULL_DIST_LO_SLOT][c] = _simd_mask_i32gather_ps(_mm256_undefined_ps(), (const float*)pBase, vOffsets, vMask, 1);
466 pBase += sizeof(simdscalar);
467 }
468 }
469
470 if (this->state.rastState.clipDistanceMask & 0xf0)
471 {
472 pBase = (uint8_t*)(&vertices[0].attrib[VERTEX_CLIPCULL_DIST_HI_SLOT]) + sizeof(float) * inputPrim;
473 for (uint32_t c = 0; c < 4; ++c)
474 {
475 transposedPrims[0].attrib[VERTEX_CLIPCULL_DIST_HI_SLOT][c] = _simd_mask_i32gather_ps(_mm256_undefined_ps(), (const float*)pBase, vOffsets, vMask, 1);
476 pBase += sizeof(simdscalar);
477 }
478 }
479
480 PA_STATE_OPT clipPa(this->pDC, numEmittedPrims, (uint8_t*)&transposedPrims[0], numEmittedVerts, true, clipTopology);
481
482 while (clipPa.GetNextStreamOutput())
483 {
484 do
485 {
486 simdvector attrib[NumVertsPerPrim];
487 bool assemble = clipPa.Assemble(VERTEX_POSITION_SLOT, attrib);
488 if (assemble)
489 {
490 static const uint32_t primMaskMap[] = { 0x0, 0x1, 0x3, 0x7, 0xf, 0x1f, 0x3f, 0x7f, 0xff };
491 pfnBinFunc(this->pDC, clipPa, this->workerId, attrib, primMaskMap[numEmittedPrims], _simd_set1_epi32(pPrimitiveId[inputPrim]), _simd_set1_epi32(pViewportIdx[inputPrim]));
492 }
493 } while (clipPa.NextPrim());
494 }
495 }
496
497 // update global pipeline stat
498 UPDATE_STAT_FE(CPrimitives, numClippedPrims);
499 }
500
501 // execute the clipper stage
502 void ExecuteStage(PA_STATE& pa, simdvector prim[], uint32_t primMask, simdscalari primId, simdscalari viewportIdx)
503 {
504 SWR_ASSERT(pa.pDC != nullptr);
505 SWR_CONTEXT* pContext = pa.pDC->pContext;
506
507 // set up binner based on PA state
508 PFN_PROCESS_PRIMS pfnBinner;
509 switch (pa.binTopology)
510 {
511 case TOP_POINT_LIST:
512 pfnBinner = BinPoints;
513 break;
514 case TOP_LINE_LIST:
515 case TOP_LINE_STRIP:
516 case TOP_LINE_LOOP:
517 case TOP_LINE_LIST_ADJ:
518 case TOP_LISTSTRIP_ADJ:
519 pfnBinner = BinLines;
520 break;
521 default:
522 pfnBinner = GetBinTrianglesFunc((pa.pDC->pState->state.rastState.conservativeRast > 0));
523 break;
524 };
525
526
527 // update clipper invocations pipeline stat
528 uint32_t numInvoc = _mm_popcnt_u32(primMask);
529 UPDATE_STAT_FE(CInvocations, numInvoc);
530
531 ComputeClipCodes(prim, viewportIdx);
532
533 // cull prims with NAN coords
534 primMask &= ~ComputeNaNMask(prim);
535
536 // user cull distance cull
537 if (this->state.rastState.cullDistanceMask)
538 {
539 primMask &= ~ComputeUserClipCullMask(pa, prim);
540 }
541
542 // cull prims outside view frustum
543 simdscalar clipIntersection = ComputeClipCodeIntersection();
544 int validMask = primMask & _simd_movemask_ps(_simd_cmpeq_ps(clipIntersection, _simd_setzero_ps()));
545
546 // skip clipping for points
547 uint32_t clipMask = 0;
548 if (NumVertsPerPrim != 1)
549 {
550 clipMask = primMask & ComputeClipMask();
551 }
552
553 if (clipMask)
554 {
555 AR_BEGIN(FEGuardbandClip, pa.pDC->drawId);
556 // we have to clip tris, execute the clipper, which will also
557 // call the binner
558 ClipSimd(vMask(primMask), vMask(clipMask), pa, primId, viewportIdx);
559 AR_END(FEGuardbandClip, 1);
560 }
561 else if (validMask)
562 {
563 // update CPrimitives pipeline state
564 UPDATE_STAT_FE(CPrimitives, _mm_popcnt_u32(validMask));
565
566 // forward valid prims directly to binner
567 pfnBinner(this->pDC, pa, this->workerId, prim, validMask, primId, viewportIdx);
568 }
569 }
570
571 private:
572 inline simdscalar ComputeInterpFactor(simdscalar boundaryCoord0, simdscalar boundaryCoord1)
573 {
574 return _simd_div_ps(boundaryCoord0, _simd_sub_ps(boundaryCoord0, boundaryCoord1));
575 }
576
577 inline simdscalari ComputeOffsets(uint32_t attrib, simdscalari vIndices, uint32_t component)
578 {
579 const uint32_t simdVertexStride = sizeof(simdvertex);
580 const uint32_t componentStride = sizeof(simdscalar);
581 const uint32_t attribStride = sizeof(simdvector);
582 const __m256i vElemOffset = _mm256_set_epi32(7 * sizeof(float), 6 * sizeof(float), 5 * sizeof(float), 4 * sizeof(float),
583 3 * sizeof(float), 2 * sizeof(float), 1 * sizeof(float), 0 * sizeof(float));
584
585 // step to the simdvertex
586 simdscalari vOffsets = _simd_mullo_epi32(vIndices, _simd_set1_epi32(simdVertexStride));
587
588 // step to the attribute and component
589 vOffsets = _simd_add_epi32(vOffsets, _simd_set1_epi32(attribStride * attrib + componentStride * component));
590
591 // step to the lane
592 vOffsets = _simd_add_epi32(vOffsets, vElemOffset);
593
594 return vOffsets;
595 }
596
597 // gathers a single component for a given attribute for each SIMD lane
598 inline simdscalar GatherComponent(const float* pBuffer, uint32_t attrib, simdscalar vMask, simdscalari vIndices, uint32_t component)
599 {
600 simdscalari vOffsets = ComputeOffsets(attrib, vIndices, component);
601 simdscalar vSrc = _mm256_undefined_ps();
602 return _simd_mask_i32gather_ps(vSrc, pBuffer, vOffsets, vMask, 1);
603 }
604
605 inline void ScatterComponent(const float* pBuffer, uint32_t attrib, simdscalar vMask, simdscalari vIndices, uint32_t component, simdscalar vSrc)
606 {
607 simdscalari vOffsets = ComputeOffsets(attrib, vIndices, component);
608
609 uint32_t* pOffsets = (uint32_t*)&vOffsets;
610 float* pSrc = (float*)&vSrc;
611 uint32_t mask = _simd_movemask_ps(vMask);
612 DWORD lane;
613 while (_BitScanForward(&lane, mask))
614 {
615 mask &= ~(1 << lane);
616 uint8_t* pBuf = (uint8_t*)pBuffer + pOffsets[lane];
617 *(float*)pBuf = pSrc[lane];
618 }
619 }
620
621 template<SWR_CLIPCODES ClippingPlane>
622 inline void intersect(
623 const simdscalar& vActiveMask, // active lanes to operate on
624 const simdscalari& s, // index to first edge vertex v0 in pInPts.
625 const simdscalari& p, // index to second edge vertex v1 in pInPts.
626 const simdvector& v1, // vertex 0 position
627 const simdvector& v2, // vertex 1 position
628 simdscalari& outIndex, // output index.
629 const float *pInVerts, // array of all the input positions.
630 uint32_t numInAttribs, // number of attributes per vertex.
631 float *pOutVerts) // array of output positions. We'll write our new intersection point at i*4.
632 {
633 // compute interpolation factor
634 simdscalar t;
635 switch (ClippingPlane)
636 {
637 case FRUSTUM_LEFT: t = ComputeInterpFactor(_simd_add_ps(v1[3], v1[0]), _simd_add_ps(v2[3], v2[0])); break;
638 case FRUSTUM_RIGHT: t = ComputeInterpFactor(_simd_sub_ps(v1[3], v1[0]), _simd_sub_ps(v2[3], v2[0])); break;
639 case FRUSTUM_TOP: t = ComputeInterpFactor(_simd_add_ps(v1[3], v1[1]), _simd_add_ps(v2[3], v2[1])); break;
640 case FRUSTUM_BOTTOM: t = ComputeInterpFactor(_simd_sub_ps(v1[3], v1[1]), _simd_sub_ps(v2[3], v2[1])); break;
641 case FRUSTUM_NEAR:
642 // DX Znear plane is 0, GL is -w
643 if (this->state.rastState.clipHalfZ)
644 {
645 t = ComputeInterpFactor(v1[2], v2[2]);
646 }
647 else
648 {
649 t = ComputeInterpFactor(_simd_add_ps(v1[3], v1[2]), _simd_add_ps(v2[3], v2[2]));
650 }
651 break;
652 case FRUSTUM_FAR: t = ComputeInterpFactor(_simd_sub_ps(v1[3], v1[2]), _simd_sub_ps(v2[3], v2[2])); break;
653 default: SWR_ASSERT(false, "invalid clipping plane: %d", ClippingPlane);
654 };
655
656 // interpolate position and store
657 for (uint32_t c = 0; c < 4; ++c)
658 {
659 simdscalar vOutPos = _simd_fmadd_ps(_simd_sub_ps(v2[c], v1[c]), t, v1[c]);
660 ScatterComponent(pOutVerts, VERTEX_POSITION_SLOT, vActiveMask, outIndex, c, vOutPos);
661 }
662
663 // interpolate attributes and store
664 for (uint32_t a = 0; a < numInAttribs; ++a)
665 {
666 uint32_t attribSlot = VERTEX_ATTRIB_START_SLOT + a;
667 for (uint32_t c = 0; c < 4; ++c)
668 {
669 simdscalar vAttrib0 = GatherComponent(pInVerts, attribSlot, vActiveMask, s, c);
670 simdscalar vAttrib1 = GatherComponent(pInVerts, attribSlot, vActiveMask, p, c);
671 simdscalar vOutAttrib = _simd_fmadd_ps(_simd_sub_ps(vAttrib1, vAttrib0), t, vAttrib0);
672 ScatterComponent(pOutVerts, attribSlot, vActiveMask, outIndex, c, vOutAttrib);
673 }
674 }
675
676 // interpolate clip distance if enabled
677 if (this->state.rastState.clipDistanceMask & 0xf)
678 {
679 uint32_t attribSlot = VERTEX_CLIPCULL_DIST_LO_SLOT;
680 for (uint32_t c = 0; c < 4; ++c)
681 {
682 simdscalar vAttrib0 = GatherComponent(pInVerts, attribSlot, vActiveMask, s, c);
683 simdscalar vAttrib1 = GatherComponent(pInVerts, attribSlot, vActiveMask, p, c);
684 simdscalar vOutAttrib = _simd_fmadd_ps(_simd_sub_ps(vAttrib1, vAttrib0), t, vAttrib0);
685 ScatterComponent(pOutVerts, attribSlot, vActiveMask, outIndex, c, vOutAttrib);
686 }
687 }
688
689 if (this->state.rastState.clipDistanceMask & 0xf0)
690 {
691 uint32_t attribSlot = VERTEX_CLIPCULL_DIST_HI_SLOT;
692 for (uint32_t c = 0; c < 4; ++c)
693 {
694 simdscalar vAttrib0 = GatherComponent(pInVerts, attribSlot, vActiveMask, s, c);
695 simdscalar vAttrib1 = GatherComponent(pInVerts, attribSlot, vActiveMask, p, c);
696 simdscalar vOutAttrib = _simd_fmadd_ps(_simd_sub_ps(vAttrib1, vAttrib0), t, vAttrib0);
697 ScatterComponent(pOutVerts, attribSlot, vActiveMask, outIndex, c, vOutAttrib);
698 }
699 }
700 }
701
702 template<SWR_CLIPCODES ClippingPlane>
703 inline simdscalar inside(const simdvector& v)
704 {
705 switch (ClippingPlane)
706 {
707 case FRUSTUM_LEFT: return _simd_cmpge_ps(v[0], _simd_mul_ps(v[3], _simd_set1_ps(-1.0f)));
708 case FRUSTUM_RIGHT: return _simd_cmple_ps(v[0], v[3]);
709 case FRUSTUM_TOP: return _simd_cmpge_ps(v[1], _simd_mul_ps(v[3], _simd_set1_ps(-1.0f)));
710 case FRUSTUM_BOTTOM: return _simd_cmple_ps(v[1], v[3]);
711 case FRUSTUM_NEAR: return _simd_cmpge_ps(v[2], this->state.rastState.clipHalfZ ? _simd_setzero_ps() : _simd_mul_ps(v[3], _simd_set1_ps(-1.0f)));
712 case FRUSTUM_FAR: return _simd_cmple_ps(v[2], v[3]);
713 default:
714 SWR_ASSERT(false, "invalid clipping plane: %d", ClippingPlane);
715 return _simd_setzero_ps();
716 }
717 }
718
719 template<SWR_CLIPCODES ClippingPlane>
720 simdscalari ClipTriToPlane(const float* pInVerts, const simdscalari& vNumInPts, uint32_t numInAttribs, float* pOutVerts)
721 {
722 simdscalari vCurIndex = _simd_setzero_si();
723 simdscalari vOutIndex = _simd_setzero_si();
724 simdscalar vActiveMask = _simd_castsi_ps(_simd_cmplt_epi32(vCurIndex, vNumInPts));
725
726 while (!_simd_testz_ps(vActiveMask, vActiveMask)) // loop until activeMask is empty
727 {
728 simdscalari s = vCurIndex;
729 simdscalari p = _simd_add_epi32(s, _simd_set1_epi32(1));
730 simdscalari underFlowMask = _simd_cmpgt_epi32(vNumInPts, p);
731 p = _simd_castps_si(_simd_blendv_ps(_simd_setzero_ps(), _simd_castsi_ps(p), _simd_castsi_ps(underFlowMask)));
732
733 // gather position
734 simdvector vInPos0, vInPos1;
735 for (uint32_t c = 0; c < 4; ++c)
736 {
737 vInPos0[c] = GatherComponent(pInVerts, VERTEX_POSITION_SLOT, vActiveMask, s, c);
738 vInPos1[c] = GatherComponent(pInVerts, VERTEX_POSITION_SLOT, vActiveMask, p, c);
739 }
740
741 // compute inside mask
742 simdscalar s_in = inside<ClippingPlane>(vInPos0);
743 simdscalar p_in = inside<ClippingPlane>(vInPos1);
744
745 // compute intersection mask (s_in != p_in)
746 simdscalar intersectMask = _simd_xor_ps(s_in, p_in);
747 intersectMask = _simd_and_ps(intersectMask, vActiveMask);
748
749 // store s if inside
750 s_in = _simd_and_ps(s_in, vActiveMask);
751 if (!_simd_testz_ps(s_in, s_in))
752 {
753 // store position
754 for (uint32_t c = 0; c < 4; ++c)
755 {
756 ScatterComponent(pOutVerts, VERTEX_POSITION_SLOT, s_in, vOutIndex, c, vInPos0[c]);
757 }
758
759 // store attribs
760 for (uint32_t a = 0; a < numInAttribs; ++a)
761 {
762 uint32_t attribSlot = VERTEX_ATTRIB_START_SLOT + a;
763 for (uint32_t c = 0; c < 4; ++c)
764 {
765 simdscalar vAttrib = GatherComponent(pInVerts, attribSlot, s_in, s, c);
766 ScatterComponent(pOutVerts, attribSlot, s_in, vOutIndex, c, vAttrib);
767 }
768 }
769
770 // store clip distance if enabled
771 if (this->state.rastState.clipDistanceMask & 0xf)
772 {
773 uint32_t attribSlot = VERTEX_CLIPCULL_DIST_LO_SLOT;
774 for (uint32_t c = 0; c < 4; ++c)
775 {
776 simdscalar vAttrib = GatherComponent(pInVerts, attribSlot, s_in, s, c);
777 ScatterComponent(pOutVerts, attribSlot, s_in, vOutIndex, c, vAttrib);
778 }
779 }
780
781 if (this->state.rastState.clipDistanceMask & 0xf0)
782 {
783 uint32_t attribSlot = VERTEX_CLIPCULL_DIST_HI_SLOT;
784 for (uint32_t c = 0; c < 4; ++c)
785 {
786 simdscalar vAttrib = GatherComponent(pInVerts, attribSlot, s_in, s, c);
787 ScatterComponent(pOutVerts, attribSlot, s_in, vOutIndex, c, vAttrib);
788 }
789 }
790
791 // increment outIndex
792 vOutIndex = _simd_blendv_epi32(vOutIndex, _simd_add_epi32(vOutIndex, _simd_set1_epi32(1)), s_in);
793 }
794
795 // compute and store intersection
796 if (!_simd_testz_ps(intersectMask, intersectMask))
797 {
798 intersect<ClippingPlane>(intersectMask, s, p, vInPos0, vInPos1, vOutIndex, pInVerts, numInAttribs, pOutVerts);
799
800 // increment outIndex for active lanes
801 vOutIndex = _simd_blendv_epi32(vOutIndex, _simd_add_epi32(vOutIndex, _simd_set1_epi32(1)), intersectMask);
802 }
803
804 // increment loop index and update active mask
805 vCurIndex = _simd_add_epi32(vCurIndex, _simd_set1_epi32(1));
806 vActiveMask = _simd_castsi_ps(_simd_cmplt_epi32(vCurIndex, vNumInPts));
807 }
808
809 return vOutIndex;
810 }
811
812 template<SWR_CLIPCODES ClippingPlane>
813 simdscalari ClipLineToPlane(const float* pInVerts, const simdscalari& vNumInPts, uint32_t numInAttribs, float* pOutVerts)
814 {
815 simdscalari vCurIndex = _simd_setzero_si();
816 simdscalari vOutIndex = _simd_setzero_si();
817 simdscalar vActiveMask = _simd_castsi_ps(_simd_cmplt_epi32(vCurIndex, vNumInPts));
818
819 if (!_simd_testz_ps(vActiveMask, vActiveMask))
820 {
821 simdscalari s = vCurIndex;
822 simdscalari p = _simd_add_epi32(s, _simd_set1_epi32(1));
823
824 // gather position
825 simdvector vInPos0, vInPos1;
826 for (uint32_t c = 0; c < 4; ++c)
827 {
828 vInPos0[c] = GatherComponent(pInVerts, VERTEX_POSITION_SLOT, vActiveMask, s, c);
829 vInPos1[c] = GatherComponent(pInVerts, VERTEX_POSITION_SLOT, vActiveMask, p, c);
830 }
831
832 // compute inside mask
833 simdscalar s_in = inside<ClippingPlane>(vInPos0);
834 simdscalar p_in = inside<ClippingPlane>(vInPos1);
835
836 // compute intersection mask (s_in != p_in)
837 simdscalar intersectMask = _simd_xor_ps(s_in, p_in);
838 intersectMask = _simd_and_ps(intersectMask, vActiveMask);
839
840 // store s if inside
841 s_in = _simd_and_ps(s_in, vActiveMask);
842 if (!_simd_testz_ps(s_in, s_in))
843 {
844 for (uint32_t c = 0; c < 4; ++c)
845 {
846 ScatterComponent(pOutVerts, VERTEX_POSITION_SLOT, s_in, vOutIndex, c, vInPos0[c]);
847 }
848
849 // interpolate attributes and store
850 for (uint32_t a = 0; a < numInAttribs; ++a)
851 {
852 uint32_t attribSlot = VERTEX_ATTRIB_START_SLOT + a;
853 for (uint32_t c = 0; c < 4; ++c)
854 {
855 simdscalar vAttrib = GatherComponent(pInVerts, attribSlot, s_in, s, c);
856 ScatterComponent(pOutVerts, attribSlot, s_in, vOutIndex, c, vAttrib);
857 }
858 }
859
860 // increment outIndex
861 vOutIndex = _simd_blendv_epi32(vOutIndex, _simd_add_epi32(vOutIndex, _simd_set1_epi32(1)), s_in);
862 }
863
864 // compute and store intersection
865 if (!_simd_testz_ps(intersectMask, intersectMask))
866 {
867 intersect<ClippingPlane>(intersectMask, s, p, vInPos0, vInPos1, vOutIndex, pInVerts, numInAttribs, pOutVerts);
868
869 // increment outIndex for active lanes
870 vOutIndex = _simd_blendv_epi32(vOutIndex, _simd_add_epi32(vOutIndex, _simd_set1_epi32(1)), intersectMask);
871 }
872
873 // store p if inside
874 p_in = _simd_and_ps(p_in, vActiveMask);
875 if (!_simd_testz_ps(p_in, p_in))
876 {
877 for (uint32_t c = 0; c < 4; ++c)
878 {
879 ScatterComponent(pOutVerts, VERTEX_POSITION_SLOT, p_in, vOutIndex, c, vInPos1[c]);
880 }
881
882 // interpolate attributes and store
883 for (uint32_t a = 0; a < numInAttribs; ++a)
884 {
885 uint32_t attribSlot = VERTEX_ATTRIB_START_SLOT + a;
886 for (uint32_t c = 0; c < 4; ++c)
887 {
888 simdscalar vAttrib = GatherComponent(pInVerts, attribSlot, p_in, p, c);
889 ScatterComponent(pOutVerts, attribSlot, p_in, vOutIndex, c, vAttrib);
890 }
891 }
892
893 // increment outIndex
894 vOutIndex = _simd_blendv_epi32(vOutIndex, _simd_add_epi32(vOutIndex, _simd_set1_epi32(1)), p_in);
895 }
896 }
897
898 return vOutIndex;
899 }
900
901 //////////////////////////////////////////////////////////////////////////
902 /// @brief Vertical clipper. Clips SIMD primitives at a time
903 /// @param pVertices - pointer to vertices in SOA form. Clipper will read input and write results to this buffer
904 /// @param vPrimMask - mask of valid input primitives, including non-clipped prims
905 /// @param numAttribs - number of valid input attribs, including position
906 simdscalari ClipPrims(float* pVertices, const simdscalar& vPrimMask, const simdscalar& vClipMask, int numAttribs)
907 {
908 // temp storage
909 float* pTempVerts = (float*)&tlsTempVertices[0];
910
911 // zero out num input verts for non-active lanes
912 simdscalari vNumInPts = _simd_set1_epi32(NumVertsPerPrim);
913 vNumInPts = _simd_blendv_epi32(_simd_setzero_si(), vNumInPts, vClipMask);
914
915 // clip prims to frustum
916 simdscalari vNumOutPts;
917 if (NumVertsPerPrim == 3)
918 {
919 vNumOutPts = ClipTriToPlane<FRUSTUM_NEAR>(pVertices, vNumInPts, numAttribs, pTempVerts);
920 vNumOutPts = ClipTriToPlane<FRUSTUM_FAR>(pTempVerts, vNumOutPts, numAttribs, pVertices);
921 vNumOutPts = ClipTriToPlane<FRUSTUM_LEFT>(pVertices, vNumOutPts, numAttribs, pTempVerts);
922 vNumOutPts = ClipTriToPlane<FRUSTUM_RIGHT>(pTempVerts, vNumOutPts, numAttribs, pVertices);
923 vNumOutPts = ClipTriToPlane<FRUSTUM_BOTTOM>(pVertices, vNumOutPts, numAttribs, pTempVerts);
924 vNumOutPts = ClipTriToPlane<FRUSTUM_TOP>(pTempVerts, vNumOutPts, numAttribs, pVertices);
925 }
926 else
927 {
928 SWR_ASSERT(NumVertsPerPrim == 2);
929 vNumOutPts = ClipLineToPlane<FRUSTUM_NEAR>(pVertices, vNumInPts, numAttribs, pTempVerts);
930 vNumOutPts = ClipLineToPlane<FRUSTUM_FAR>(pTempVerts, vNumOutPts, numAttribs, pVertices);
931 vNumOutPts = ClipLineToPlane<FRUSTUM_LEFT>(pVertices, vNumOutPts, numAttribs, pTempVerts);
932 vNumOutPts = ClipLineToPlane<FRUSTUM_RIGHT>(pTempVerts, vNumOutPts, numAttribs, pVertices);
933 vNumOutPts = ClipLineToPlane<FRUSTUM_BOTTOM>(pVertices, vNumOutPts, numAttribs, pTempVerts);
934 vNumOutPts = ClipLineToPlane<FRUSTUM_TOP>(pTempVerts, vNumOutPts, numAttribs, pVertices);
935 }
936
937 // restore num verts for non-clipped, active lanes
938 simdscalar vNonClippedMask = _simd_andnot_ps(vClipMask, vPrimMask);
939 vNumOutPts = _simd_blendv_epi32(vNumOutPts, _simd_set1_epi32(NumVertsPerPrim), vNonClippedMask);
940
941 return vNumOutPts;
942 }
943
944 const uint32_t workerId{ 0 };
945 DRAW_CONTEXT* pDC{ nullptr };
946 const API_STATE& state;
947 simdscalar clipCodes[NumVertsPerPrim];
948 };
949
950
951 // pipeline stage functions
952 void ClipTriangles(DRAW_CONTEXT *pDC, PA_STATE& pa, uint32_t workerId, simdvector prims[], uint32_t primMask, simdscalari primId, simdscalari viewportIdx);
953 void ClipLines(DRAW_CONTEXT *pDC, PA_STATE& pa, uint32_t workerId, simdvector prims[], uint32_t primMask, simdscalari primId, simdscalari viewportIdx);
954 void ClipPoints(DRAW_CONTEXT *pDC, PA_STATE& pa, uint32_t workerId, simdvector prims[], uint32_t primMask, simdscalari primId, simdscalari viewportIdx);