swr: [rasterizer core] conservative rasterization frontend support
[mesa.git] / src / gallium / drivers / swr / rasterizer / core / utils.h
1 /****************************************************************************
2 * Copyright (C) 2014-2015 Intel Corporation. All Rights Reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * @file utils.h
24 *
25 * @brief Utilities used by SWR core.
26 *
27 ******************************************************************************/
28 #pragma once
29
30 #include <string.h>
31 #include <type_traits>
32 #include "common/os.h"
33 #include "common/simdintrin.h"
34 #include "common/swr_assert.h"
35
36 #if defined(_WIN64) || defined(__x86_64__)
37 #define _MM_INSERT_EPI64 _mm_insert_epi64
38 #define _MM_EXTRACT_EPI64 _mm_extract_epi64
39 #else
40 INLINE INT64 _MM_EXTRACT_EPI64(__m128i a, const int32_t ndx)
41 {
42 OSALIGNLINE(uint32_t) elems[4];
43 _mm_store_si128((__m128i*)elems, a);
44 if (ndx == 0)
45 {
46 uint64_t foo = elems[0];
47 foo |= (uint64_t)elems[1] << 32;
48 return foo;
49 }
50 else
51 {
52 uint64_t foo = elems[2];
53 foo |= (uint64_t)elems[3] << 32;
54 return foo;
55 }
56 }
57
58 INLINE __m128i _MM_INSERT_EPI64(__m128i a, INT64 b, const int32_t ndx)
59 {
60 OSALIGNLINE(int64_t) elems[2];
61 _mm_store_si128((__m128i*)elems, a);
62 if (ndx == 0)
63 {
64 elems[0] = b;
65 }
66 else
67 {
68 elems[1] = b;
69 }
70 __m128i out;
71 out = _mm_load_si128((const __m128i*)elems);
72 return out;
73 }
74 #endif
75
76 OSALIGNLINE(struct) BBOX
77 {
78 int top{ 0 };
79 int bottom{ 0 };
80 int left{ 0 };
81 int right{ 0 };
82
83 BBOX() {}
84 BBOX(int t, int b, int l, int r) : top(t), bottom(b), left(l), right(r) {}
85
86 bool operator==(const BBOX& rhs)
87 {
88 return (this->top == rhs.top &&
89 this->bottom == rhs.bottom &&
90 this->left == rhs.left &&
91 this->right == rhs.right);
92 }
93
94 bool operator!=(const BBOX& rhs)
95 {
96 return !(*this == rhs);
97 }
98 };
99
100 struct simdBBox
101 {
102 simdscalari top;
103 simdscalari bottom;
104 simdscalari left;
105 simdscalari right;
106 };
107
108 INLINE
109 void vTranspose(__m128 &row0, __m128 &row1, __m128 &row2, __m128 &row3)
110 {
111 __m128i row0i = _mm_castps_si128(row0);
112 __m128i row1i = _mm_castps_si128(row1);
113 __m128i row2i = _mm_castps_si128(row2);
114 __m128i row3i = _mm_castps_si128(row3);
115
116 __m128i vTemp = row2i;
117 row2i = _mm_unpacklo_epi32(row2i, row3i);
118 vTemp = _mm_unpackhi_epi32(vTemp, row3i);
119
120 row3i = row0i;
121 row0i = _mm_unpacklo_epi32(row0i, row1i);
122 row3i = _mm_unpackhi_epi32(row3i, row1i);
123
124 row1i = row0i;
125 row0i = _mm_unpacklo_epi64(row0i, row2i);
126 row1i = _mm_unpackhi_epi64(row1i, row2i);
127
128 row2i = row3i;
129 row2i = _mm_unpacklo_epi64(row2i, vTemp);
130 row3i = _mm_unpackhi_epi64(row3i, vTemp);
131
132 row0 = _mm_castsi128_ps(row0i);
133 row1 = _mm_castsi128_ps(row1i);
134 row2 = _mm_castsi128_ps(row2i);
135 row3 = _mm_castsi128_ps(row3i);
136 }
137
138 INLINE
139 void vTranspose(__m128i &row0, __m128i &row1, __m128i &row2, __m128i &row3)
140 {
141 __m128i vTemp = row2;
142 row2 = _mm_unpacklo_epi32(row2, row3);
143 vTemp = _mm_unpackhi_epi32(vTemp, row3);
144
145 row3 = row0;
146 row0 = _mm_unpacklo_epi32(row0, row1);
147 row3 = _mm_unpackhi_epi32(row3, row1);
148
149 row1 = row0;
150 row0 = _mm_unpacklo_epi64(row0, row2);
151 row1 = _mm_unpackhi_epi64(row1, row2);
152
153 row2 = row3;
154 row2 = _mm_unpacklo_epi64(row2, vTemp);
155 row3 = _mm_unpackhi_epi64(row3, vTemp);
156 }
157
158 #define GCC_VERSION (__GNUC__ * 10000 \
159 + __GNUC_MINOR__ * 100 \
160 + __GNUC_PATCHLEVEL__)
161
162 #if defined(__clang__) || (defined(__GNUC__) && (GCC_VERSION < 40900))
163 #define _mm_undefined_ps _mm_setzero_ps
164 #define _mm_undefined_si128 _mm_setzero_si128
165 #if KNOB_SIMD_WIDTH == 8
166 #define _mm256_undefined_ps _mm256_setzero_ps
167 #endif
168 #endif
169
170 #if KNOB_SIMD_WIDTH == 8
171 INLINE
172 void vTranspose3x8(__m128 (&vDst)[8], __m256 &vSrc0, __m256 &vSrc1, __m256 &vSrc2)
173 {
174 __m256 r0r2 = _mm256_unpacklo_ps(vSrc0, vSrc2); //x0z0x1z1 x4z4x5z5
175 __m256 r1rx = _mm256_unpacklo_ps(vSrc1, _mm256_undefined_ps()); //y0w0y1w1 y4w4y5w5
176 __m256 r02r1xlolo = _mm256_unpacklo_ps(r0r2, r1rx); //x0y0z0w0 x4y4z4w4
177 __m256 r02r1xlohi = _mm256_unpackhi_ps(r0r2, r1rx); //x1y1z1w1 x5y5z5w5
178
179 r0r2 = _mm256_unpackhi_ps(vSrc0, vSrc2); //x2z2x3z3 x6z6x7z7
180 r1rx = _mm256_unpackhi_ps(vSrc1, _mm256_undefined_ps()); //y2w2y3w3 y6w6yw77
181 __m256 r02r1xhilo = _mm256_unpacklo_ps(r0r2, r1rx); //x2y2z2w2 x6y6z6w6
182 __m256 r02r1xhihi = _mm256_unpackhi_ps(r0r2, r1rx); //x3y3z3w3 x7y7z7w7
183
184 vDst[0] = _mm256_castps256_ps128(r02r1xlolo);
185 vDst[1] = _mm256_castps256_ps128(r02r1xlohi);
186 vDst[2] = _mm256_castps256_ps128(r02r1xhilo);
187 vDst[3] = _mm256_castps256_ps128(r02r1xhihi);
188
189 vDst[4] = _mm256_extractf128_ps(r02r1xlolo, 1);
190 vDst[5] = _mm256_extractf128_ps(r02r1xlohi, 1);
191 vDst[6] = _mm256_extractf128_ps(r02r1xhilo, 1);
192 vDst[7] = _mm256_extractf128_ps(r02r1xhihi, 1);
193 }
194
195 INLINE
196 void vTranspose4x8(__m128 (&vDst)[8], __m256 &vSrc0, __m256 &vSrc1, __m256 &vSrc2, __m256 &vSrc3)
197 {
198 __m256 r0r2 = _mm256_unpacklo_ps(vSrc0, vSrc2); //x0z0x1z1 x4z4x5z5
199 __m256 r1rx = _mm256_unpacklo_ps(vSrc1, vSrc3); //y0w0y1w1 y4w4y5w5
200 __m256 r02r1xlolo = _mm256_unpacklo_ps(r0r2, r1rx); //x0y0z0w0 x4y4z4w4
201 __m256 r02r1xlohi = _mm256_unpackhi_ps(r0r2, r1rx); //x1y1z1w1 x5y5z5w5
202
203 r0r2 = _mm256_unpackhi_ps(vSrc0, vSrc2); //x2z2x3z3 x6z6x7z7
204 r1rx = _mm256_unpackhi_ps(vSrc1, vSrc3) ; //y2w2y3w3 y6w6yw77
205 __m256 r02r1xhilo = _mm256_unpacklo_ps(r0r2, r1rx); //x2y2z2w2 x6y6z6w6
206 __m256 r02r1xhihi = _mm256_unpackhi_ps(r0r2, r1rx); //x3y3z3w3 x7y7z7w7
207
208 vDst[0] = _mm256_castps256_ps128(r02r1xlolo);
209 vDst[1] = _mm256_castps256_ps128(r02r1xlohi);
210 vDst[2] = _mm256_castps256_ps128(r02r1xhilo);
211 vDst[3] = _mm256_castps256_ps128(r02r1xhihi);
212
213 vDst[4] = _mm256_extractf128_ps(r02r1xlolo, 1);
214 vDst[5] = _mm256_extractf128_ps(r02r1xlohi, 1);
215 vDst[6] = _mm256_extractf128_ps(r02r1xhilo, 1);
216 vDst[7] = _mm256_extractf128_ps(r02r1xhihi, 1);
217 }
218
219 INLINE
220 void vTranspose8x8(__m256 (&vDst)[8], const __m256 &vMask0, const __m256 &vMask1, const __m256 &vMask2, const __m256 &vMask3, const __m256 &vMask4, const __m256 &vMask5, const __m256 &vMask6, const __m256 &vMask7)
221 {
222 __m256 __t0 = _mm256_unpacklo_ps(vMask0, vMask1);
223 __m256 __t1 = _mm256_unpackhi_ps(vMask0, vMask1);
224 __m256 __t2 = _mm256_unpacklo_ps(vMask2, vMask3);
225 __m256 __t3 = _mm256_unpackhi_ps(vMask2, vMask3);
226 __m256 __t4 = _mm256_unpacklo_ps(vMask4, vMask5);
227 __m256 __t5 = _mm256_unpackhi_ps(vMask4, vMask5);
228 __m256 __t6 = _mm256_unpacklo_ps(vMask6, vMask7);
229 __m256 __t7 = _mm256_unpackhi_ps(vMask6, vMask7);
230 __m256 __tt0 = _mm256_shuffle_ps(__t0,__t2,_MM_SHUFFLE(1,0,1,0));
231 __m256 __tt1 = _mm256_shuffle_ps(__t0,__t2,_MM_SHUFFLE(3,2,3,2));
232 __m256 __tt2 = _mm256_shuffle_ps(__t1,__t3,_MM_SHUFFLE(1,0,1,0));
233 __m256 __tt3 = _mm256_shuffle_ps(__t1,__t3,_MM_SHUFFLE(3,2,3,2));
234 __m256 __tt4 = _mm256_shuffle_ps(__t4,__t6,_MM_SHUFFLE(1,0,1,0));
235 __m256 __tt5 = _mm256_shuffle_ps(__t4,__t6,_MM_SHUFFLE(3,2,3,2));
236 __m256 __tt6 = _mm256_shuffle_ps(__t5,__t7,_MM_SHUFFLE(1,0,1,0));
237 __m256 __tt7 = _mm256_shuffle_ps(__t5,__t7,_MM_SHUFFLE(3,2,3,2));
238 vDst[0] = _mm256_permute2f128_ps(__tt0, __tt4, 0x20);
239 vDst[1] = _mm256_permute2f128_ps(__tt1, __tt5, 0x20);
240 vDst[2] = _mm256_permute2f128_ps(__tt2, __tt6, 0x20);
241 vDst[3] = _mm256_permute2f128_ps(__tt3, __tt7, 0x20);
242 vDst[4] = _mm256_permute2f128_ps(__tt0, __tt4, 0x31);
243 vDst[5] = _mm256_permute2f128_ps(__tt1, __tt5, 0x31);
244 vDst[6] = _mm256_permute2f128_ps(__tt2, __tt6, 0x31);
245 vDst[7] = _mm256_permute2f128_ps(__tt3, __tt7, 0x31);
246 }
247
248 INLINE
249 void vTranspose8x8(__m256 (&vDst)[8], const __m256i &vMask0, const __m256i &vMask1, const __m256i &vMask2, const __m256i &vMask3, const __m256i &vMask4, const __m256i &vMask5, const __m256i &vMask6, const __m256i &vMask7)
250 {
251 vTranspose8x8(vDst, _mm256_castsi256_ps(vMask0), _mm256_castsi256_ps(vMask1), _mm256_castsi256_ps(vMask2), _mm256_castsi256_ps(vMask3),
252 _mm256_castsi256_ps(vMask4), _mm256_castsi256_ps(vMask5), _mm256_castsi256_ps(vMask6), _mm256_castsi256_ps(vMask7));
253 }
254 #endif
255
256 //////////////////////////////////////////////////////////////////////////
257 /// TranposeSingleComponent
258 //////////////////////////////////////////////////////////////////////////
259 template<uint32_t bpp>
260 struct TransposeSingleComponent
261 {
262 //////////////////////////////////////////////////////////////////////////
263 /// @brief Pass-thru for single component.
264 /// @param pSrc - source data in SOA form
265 /// @param pDst - output data in AOS form
266 INLINE static void Transpose(const uint8_t* pSrc, uint8_t* pDst)
267 {
268 memcpy(pDst, pSrc, (bpp * KNOB_SIMD_WIDTH) / 8);
269 }
270 };
271
272 //////////////////////////////////////////////////////////////////////////
273 /// Transpose8_8_8_8
274 //////////////////////////////////////////////////////////////////////////
275 struct Transpose8_8_8_8
276 {
277 //////////////////////////////////////////////////////////////////////////
278 /// @brief Performs an SOA to AOS conversion for packed 8_8_8_8 data.
279 /// @param pSrc - source data in SOA form
280 /// @param pDst - output data in AOS form
281 INLINE static void Transpose(const uint8_t* pSrc, uint8_t* pDst)
282 {
283 simdscalari src = _simd_load_si((const simdscalari*)pSrc);
284 #if KNOB_SIMD_WIDTH == 8
285 #if KNOB_ARCH == KNOB_ARCH_AVX
286 __m128i c0c1 = _mm256_castsi256_si128(src); // rrrrrrrrgggggggg
287 __m128i c2c3 = _mm_castps_si128(_mm256_extractf128_ps(_mm256_castsi256_ps(src), 1)); // bbbbbbbbaaaaaaaa
288 __m128i c0c2 = _mm_unpacklo_epi64(c0c1, c2c3); // rrrrrrrrbbbbbbbb
289 __m128i c1c3 = _mm_unpackhi_epi64(c0c1, c2c3); // ggggggggaaaaaaaa
290 __m128i c01 = _mm_unpacklo_epi8(c0c2, c1c3); // rgrgrgrgrgrgrgrg
291 __m128i c23 = _mm_unpackhi_epi8(c0c2, c1c3); // babababababababa
292 __m128i c0123lo = _mm_unpacklo_epi16(c01, c23); // rgbargbargbargba
293 __m128i c0123hi = _mm_unpackhi_epi16(c01, c23); // rgbargbargbargba
294 _mm_store_si128((__m128i*)pDst, c0123lo);
295 _mm_store_si128((__m128i*)(pDst + 16), c0123hi);
296 #elif KNOB_ARCH == KNOB_ARCH_AVX2
297 simdscalari dst01 = _mm256_shuffle_epi8(src,
298 _mm256_set_epi32(0x0f078080, 0x0e068080, 0x0d058080, 0x0c048080, 0x80800b03, 0x80800a02, 0x80800901, 0x80800800));
299 simdscalari dst23 = _mm256_permute2x128_si256(src, src, 0x01);
300 dst23 = _mm256_shuffle_epi8(dst23,
301 _mm256_set_epi32(0x80800f07, 0x80800e06, 0x80800d05, 0x80800c04, 0x0b038080, 0x0a028080, 0x09018080, 0x08008080));
302 simdscalari dst = _mm256_or_si256(dst01, dst23);
303 _simd_store_si((simdscalari*)pDst, dst);
304 #endif
305 #else
306 #error Unsupported vector width
307 #endif
308 }
309 };
310
311 //////////////////////////////////////////////////////////////////////////
312 /// Transpose8_8_8
313 //////////////////////////////////////////////////////////////////////////
314 struct Transpose8_8_8
315 {
316 //////////////////////////////////////////////////////////////////////////
317 /// @brief Performs an SOA to AOS conversion for packed 8_8_8 data.
318 /// @param pSrc - source data in SOA form
319 /// @param pDst - output data in AOS form
320 INLINE static void Transpose(const uint8_t* pSrc, uint8_t* pDst) = delete;
321 };
322
323 //////////////////////////////////////////////////////////////////////////
324 /// Transpose8_8
325 //////////////////////////////////////////////////////////////////////////
326 struct Transpose8_8
327 {
328 //////////////////////////////////////////////////////////////////////////
329 /// @brief Performs an SOA to AOS conversion for packed 8_8 data.
330 /// @param pSrc - source data in SOA form
331 /// @param pDst - output data in AOS form
332 INLINE static void Transpose(const uint8_t* pSrc, uint8_t* pDst)
333 {
334 simdscalari src = _simd_load_si((const simdscalari*)pSrc);
335
336 #if KNOB_SIMD_WIDTH == 8
337 __m128i rg = _mm256_castsi256_si128(src); // rrrrrrrr gggggggg
338 __m128i g = _mm_unpackhi_epi64(rg, rg); // gggggggg gggggggg
339 rg = _mm_unpacklo_epi8(rg, g);
340 _mm_store_si128((__m128i*)pDst, rg);
341 #else
342 #error Unsupported vector width
343 #endif
344 }
345 };
346
347 //////////////////////////////////////////////////////////////////////////
348 /// Transpose32_32_32_32
349 //////////////////////////////////////////////////////////////////////////
350 struct Transpose32_32_32_32
351 {
352 //////////////////////////////////////////////////////////////////////////
353 /// @brief Performs an SOA to AOS conversion for packed 32_32_32_32 data.
354 /// @param pSrc - source data in SOA form
355 /// @param pDst - output data in AOS form
356 INLINE static void Transpose(const uint8_t* pSrc, uint8_t* pDst)
357 {
358 #if KNOB_SIMD_WIDTH == 8
359 simdscalar src0 = _simd_load_ps((const float*)pSrc);
360 simdscalar src1 = _simd_load_ps((const float*)pSrc + 8);
361 simdscalar src2 = _simd_load_ps((const float*)pSrc + 16);
362 simdscalar src3 = _simd_load_ps((const float*)pSrc + 24);
363
364 __m128 vDst[8];
365 vTranspose4x8(vDst, src0, src1, src2, src3);
366 _mm_store_ps((float*)pDst, vDst[0]);
367 _mm_store_ps((float*)pDst+4, vDst[1]);
368 _mm_store_ps((float*)pDst+8, vDst[2]);
369 _mm_store_ps((float*)pDst+12, vDst[3]);
370 _mm_store_ps((float*)pDst+16, vDst[4]);
371 _mm_store_ps((float*)pDst+20, vDst[5]);
372 _mm_store_ps((float*)pDst+24, vDst[6]);
373 _mm_store_ps((float*)pDst+28, vDst[7]);
374 #else
375 #error Unsupported vector width
376 #endif
377 }
378 };
379
380 //////////////////////////////////////////////////////////////////////////
381 /// Transpose32_32_32
382 //////////////////////////////////////////////////////////////////////////
383 struct Transpose32_32_32
384 {
385 //////////////////////////////////////////////////////////////////////////
386 /// @brief Performs an SOA to AOS conversion for packed 32_32_32 data.
387 /// @param pSrc - source data in SOA form
388 /// @param pDst - output data in AOS form
389 INLINE static void Transpose(const uint8_t* pSrc, uint8_t* pDst)
390 {
391 #if KNOB_SIMD_WIDTH == 8
392 simdscalar src0 = _simd_load_ps((const float*)pSrc);
393 simdscalar src1 = _simd_load_ps((const float*)pSrc + 8);
394 simdscalar src2 = _simd_load_ps((const float*)pSrc + 16);
395
396 __m128 vDst[8];
397 vTranspose3x8(vDst, src0, src1, src2);
398 _mm_store_ps((float*)pDst, vDst[0]);
399 _mm_store_ps((float*)pDst + 4, vDst[1]);
400 _mm_store_ps((float*)pDst + 8, vDst[2]);
401 _mm_store_ps((float*)pDst + 12, vDst[3]);
402 _mm_store_ps((float*)pDst + 16, vDst[4]);
403 _mm_store_ps((float*)pDst + 20, vDst[5]);
404 _mm_store_ps((float*)pDst + 24, vDst[6]);
405 _mm_store_ps((float*)pDst + 28, vDst[7]);
406 #else
407 #error Unsupported vector width
408 #endif
409 }
410 };
411
412 //////////////////////////////////////////////////////////////////////////
413 /// Transpose32_32
414 //////////////////////////////////////////////////////////////////////////
415 struct Transpose32_32
416 {
417 //////////////////////////////////////////////////////////////////////////
418 /// @brief Performs an SOA to AOS conversion for packed 32_32 data.
419 /// @param pSrc - source data in SOA form
420 /// @param pDst - output data in AOS form
421 INLINE static void Transpose(const uint8_t* pSrc, uint8_t* pDst)
422 {
423 const float* pfSrc = (const float*)pSrc;
424 __m128 src_r0 = _mm_load_ps(pfSrc + 0);
425 __m128 src_r1 = _mm_load_ps(pfSrc + 4);
426 __m128 src_g0 = _mm_load_ps(pfSrc + 8);
427 __m128 src_g1 = _mm_load_ps(pfSrc + 12);
428
429 __m128 dst0 = _mm_unpacklo_ps(src_r0, src_g0);
430 __m128 dst1 = _mm_unpackhi_ps(src_r0, src_g0);
431 __m128 dst2 = _mm_unpacklo_ps(src_r1, src_g1);
432 __m128 dst3 = _mm_unpackhi_ps(src_r1, src_g1);
433
434 float* pfDst = (float*)pDst;
435 _mm_store_ps(pfDst + 0, dst0);
436 _mm_store_ps(pfDst + 4, dst1);
437 _mm_store_ps(pfDst + 8, dst2);
438 _mm_store_ps(pfDst + 12, dst3);
439 }
440 };
441
442 //////////////////////////////////////////////////////////////////////////
443 /// Transpose16_16_16_16
444 //////////////////////////////////////////////////////////////////////////
445 struct Transpose16_16_16_16
446 {
447 //////////////////////////////////////////////////////////////////////////
448 /// @brief Performs an SOA to AOS conversion for packed 16_16_16_16 data.
449 /// @param pSrc - source data in SOA form
450 /// @param pDst - output data in AOS form
451 INLINE static void Transpose(const uint8_t* pSrc, uint8_t* pDst)
452 {
453 #if KNOB_SIMD_WIDTH == 8
454 simdscalari src_rg = _simd_load_si((const simdscalari*)pSrc);
455 simdscalari src_ba = _simd_load_si((const simdscalari*)(pSrc + sizeof(simdscalari)));
456
457 __m128i src_r = _mm256_extractf128_si256(src_rg, 0);
458 __m128i src_g = _mm256_extractf128_si256(src_rg, 1);
459 __m128i src_b = _mm256_extractf128_si256(src_ba, 0);
460 __m128i src_a = _mm256_extractf128_si256(src_ba, 1);
461
462 __m128i rg0 = _mm_unpacklo_epi16(src_r, src_g);
463 __m128i rg1 = _mm_unpackhi_epi16(src_r, src_g);
464 __m128i ba0 = _mm_unpacklo_epi16(src_b, src_a);
465 __m128i ba1 = _mm_unpackhi_epi16(src_b, src_a);
466
467 __m128i dst0 = _mm_unpacklo_epi32(rg0, ba0);
468 __m128i dst1 = _mm_unpackhi_epi32(rg0, ba0);
469 __m128i dst2 = _mm_unpacklo_epi32(rg1, ba1);
470 __m128i dst3 = _mm_unpackhi_epi32(rg1, ba1);
471
472 _mm_store_si128(((__m128i*)pDst) + 0, dst0);
473 _mm_store_si128(((__m128i*)pDst) + 1, dst1);
474 _mm_store_si128(((__m128i*)pDst) + 2, dst2);
475 _mm_store_si128(((__m128i*)pDst) + 3, dst3);
476 #else
477 #error Unsupported vector width
478 #endif
479 }
480 };
481
482 //////////////////////////////////////////////////////////////////////////
483 /// Transpose16_16_16
484 //////////////////////////////////////////////////////////////////////////
485 struct Transpose16_16_16
486 {
487 //////////////////////////////////////////////////////////////////////////
488 /// @brief Performs an SOA to AOS conversion for packed 16_16_16 data.
489 /// @param pSrc - source data in SOA form
490 /// @param pDst - output data in AOS form
491 INLINE static void Transpose(const uint8_t* pSrc, uint8_t* pDst)
492 {
493 #if KNOB_SIMD_WIDTH == 8
494 simdscalari src_rg = _simd_load_si((const simdscalari*)pSrc);
495
496 __m128i src_r = _mm256_extractf128_si256(src_rg, 0);
497 __m128i src_g = _mm256_extractf128_si256(src_rg, 1);
498 __m128i src_b = _mm_load_si128((const __m128i*)(pSrc + sizeof(simdscalari)));
499 __m128i src_a = _mm_undefined_si128();
500
501 __m128i rg0 = _mm_unpacklo_epi16(src_r, src_g);
502 __m128i rg1 = _mm_unpackhi_epi16(src_r, src_g);
503 __m128i ba0 = _mm_unpacklo_epi16(src_b, src_a);
504 __m128i ba1 = _mm_unpackhi_epi16(src_b, src_a);
505
506 __m128i dst0 = _mm_unpacklo_epi32(rg0, ba0);
507 __m128i dst1 = _mm_unpackhi_epi32(rg0, ba0);
508 __m128i dst2 = _mm_unpacklo_epi32(rg1, ba1);
509 __m128i dst3 = _mm_unpackhi_epi32(rg1, ba1);
510
511 _mm_store_si128(((__m128i*)pDst) + 0, dst0);
512 _mm_store_si128(((__m128i*)pDst) + 1, dst1);
513 _mm_store_si128(((__m128i*)pDst) + 2, dst2);
514 _mm_store_si128(((__m128i*)pDst) + 3, dst3);
515 #else
516 #error Unsupported vector width
517 #endif
518 }
519 };
520
521 //////////////////////////////////////////////////////////////////////////
522 /// Transpose16_16
523 //////////////////////////////////////////////////////////////////////////
524 struct Transpose16_16
525 {
526 //////////////////////////////////////////////////////////////////////////
527 /// @brief Performs an SOA to AOS conversion for packed 16_16 data.
528 /// @param pSrc - source data in SOA form
529 /// @param pDst - output data in AOS form
530 INLINE static void Transpose(const uint8_t* pSrc, uint8_t* pDst)
531 {
532 simdscalar src = _simd_load_ps((const float*)pSrc);
533
534 #if KNOB_SIMD_WIDTH == 8
535 __m128 comp0 = _mm256_castps256_ps128(src);
536 __m128 comp1 = _mm256_extractf128_ps(src, 1);
537
538 __m128i comp0i = _mm_castps_si128(comp0);
539 __m128i comp1i = _mm_castps_si128(comp1);
540
541 __m128i resLo = _mm_unpacklo_epi16(comp0i, comp1i);
542 __m128i resHi = _mm_unpackhi_epi16(comp0i, comp1i);
543
544 _mm_store_si128((__m128i*)pDst, resLo);
545 _mm_store_si128((__m128i*)pDst + 1, resHi);
546 #else
547 #error Unsupported vector width
548 #endif
549 }
550 };
551
552 //////////////////////////////////////////////////////////////////////////
553 /// Transpose24_8
554 //////////////////////////////////////////////////////////////////////////
555 struct Transpose24_8
556 {
557 //////////////////////////////////////////////////////////////////////////
558 /// @brief Performs an SOA to AOS conversion for packed 24_8 data.
559 /// @param pSrc - source data in SOA form
560 /// @param pDst - output data in AOS form
561 static void Transpose(const uint8_t* pSrc, uint8_t* pDst) = delete;
562 };
563
564 //////////////////////////////////////////////////////////////////////////
565 /// Transpose32_8_24
566 //////////////////////////////////////////////////////////////////////////
567 struct Transpose32_8_24
568 {
569 //////////////////////////////////////////////////////////////////////////
570 /// @brief Performs an SOA to AOS conversion for packed 32_8_24 data.
571 /// @param pSrc - source data in SOA form
572 /// @param pDst - output data in AOS form
573 static void Transpose(const uint8_t* pSrc, uint8_t* pDst) = delete;
574 };
575
576
577
578 //////////////////////////////////////////////////////////////////////////
579 /// Transpose4_4_4_4
580 //////////////////////////////////////////////////////////////////////////
581 struct Transpose4_4_4_4
582 {
583 //////////////////////////////////////////////////////////////////////////
584 /// @brief Performs an SOA to AOS conversion for packed 4_4_4_4 data.
585 /// @param pSrc - source data in SOA form
586 /// @param pDst - output data in AOS form
587 static void Transpose(const uint8_t* pSrc, uint8_t* pDst) = delete;
588 };
589
590 //////////////////////////////////////////////////////////////////////////
591 /// Transpose5_6_5
592 //////////////////////////////////////////////////////////////////////////
593 struct Transpose5_6_5
594 {
595 //////////////////////////////////////////////////////////////////////////
596 /// @brief Performs an SOA to AOS conversion for packed 5_6_5 data.
597 /// @param pSrc - source data in SOA form
598 /// @param pDst - output data in AOS form
599 static void Transpose(const uint8_t* pSrc, uint8_t* pDst) = delete;
600 };
601
602 //////////////////////////////////////////////////////////////////////////
603 /// Transpose9_9_9_5
604 //////////////////////////////////////////////////////////////////////////
605 struct Transpose9_9_9_5
606 {
607 //////////////////////////////////////////////////////////////////////////
608 /// @brief Performs an SOA to AOS conversion for packed 9_9_9_5 data.
609 /// @param pSrc - source data in SOA form
610 /// @param pDst - output data in AOS form
611 static void Transpose(const uint8_t* pSrc, uint8_t* pDst) = delete;
612 };
613
614 //////////////////////////////////////////////////////////////////////////
615 /// Transpose5_5_5_1
616 //////////////////////////////////////////////////////////////////////////
617 struct Transpose5_5_5_1
618 {
619 //////////////////////////////////////////////////////////////////////////
620 /// @brief Performs an SOA to AOS conversion for packed 5_5_5_1 data.
621 /// @param pSrc - source data in SOA form
622 /// @param pDst - output data in AOS form
623 static void Transpose(const uint8_t* pSrc, uint8_t* pDst) = delete;
624 };
625
626 //////////////////////////////////////////////////////////////////////////
627 /// Transpose10_10_10_2
628 //////////////////////////////////////////////////////////////////////////
629 struct Transpose10_10_10_2
630 {
631 //////////////////////////////////////////////////////////////////////////
632 /// @brief Performs an SOA to AOS conversion for packed 10_10_10_2 data.
633 /// @param pSrc - source data in SOA form
634 /// @param pDst - output data in AOS form
635 static void Transpose(const uint8_t* pSrc, uint8_t* pDst) = delete;
636 };
637
638 //////////////////////////////////////////////////////////////////////////
639 /// Transpose11_11_10
640 //////////////////////////////////////////////////////////////////////////
641 struct Transpose11_11_10
642 {
643 //////////////////////////////////////////////////////////////////////////
644 /// @brief Performs an SOA to AOS conversion for packed 11_11_10 data.
645 /// @param pSrc - source data in SOA form
646 /// @param pDst - output data in AOS form
647 static void Transpose(const uint8_t* pSrc, uint8_t* pDst) = delete;
648 };
649
650 // helper function to unroll loops
651 template<int Begin, int End, int Step = 1>
652 struct UnrollerL {
653 template<typename Lambda>
654 INLINE static void step(Lambda& func) {
655 func(Begin);
656 UnrollerL<Begin + Step, End, Step>::step(func);
657 }
658 };
659
660 template<int End, int Step>
661 struct UnrollerL<End, End, Step> {
662 template<typename Lambda>
663 static void step(Lambda& func) {
664 }
665 };
666
667 // general CRC compute
668 INLINE
669 uint32_t ComputeCRC(uint32_t crc, const void *pData, uint32_t size)
670 {
671 #if defined(_WIN64) || defined(__x86_64__)
672 uint32_t sizeInQwords = size / sizeof(uint64_t);
673 uint32_t sizeRemainderBytes = size % sizeof(uint64_t);
674 uint64_t* pDataWords = (uint64_t*)pData;
675 for (uint32_t i = 0; i < sizeInQwords; ++i)
676 {
677 crc = (uint32_t)_mm_crc32_u64(crc, *pDataWords++);
678 }
679 #else
680 uint32_t sizeInDwords = size / sizeof(uint32_t);
681 uint32_t sizeRemainderBytes = size % sizeof(uint32_t);
682 uint32_t* pDataWords = (uint32_t*)pData;
683 for (uint32_t i = 0; i < sizeInDwords; ++i)
684 {
685 crc = _mm_crc32_u32(crc, *pDataWords++);
686 }
687 #endif
688
689 uint8_t* pRemainderBytes = (uint8_t*)pDataWords;
690 for (uint32_t i = 0; i < sizeRemainderBytes; ++i)
691 {
692 crc = _mm_crc32_u8(crc, *pRemainderBytes++);
693 }
694
695 return crc;
696 }
697
698 //////////////////////////////////////////////////////////////////////////
699 /// Add byte offset to any-type pointer
700 //////////////////////////////////////////////////////////////////////////
701 template <typename T>
702 INLINE
703 static T* PtrAdd(T* p, intptr_t offset)
704 {
705 intptr_t intp = reinterpret_cast<intptr_t>(p);
706 return reinterpret_cast<T*>(intp + offset);
707 }
708
709 //////////////////////////////////////////////////////////////////////////
710 /// Is a power-of-2?
711 //////////////////////////////////////////////////////////////////////////
712 template <typename T>
713 INLINE
714 static bool IsPow2(T value)
715 {
716 return value == (value & (0 - value));
717 }
718
719 //////////////////////////////////////////////////////////////////////////
720 /// Align down to specified alignment
721 /// Note: IsPow2(alignment) MUST be true
722 //////////////////////////////////////////////////////////////////////////
723 template <typename T1, typename T2>
724 INLINE
725 static T1 AlignDownPow2(T1 value, T2 alignment)
726 {
727 SWR_ASSERT(IsPow2(alignment));
728 return value & ~T1(alignment - 1);
729 }
730
731 //////////////////////////////////////////////////////////////////////////
732 /// Align up to specified alignment
733 /// Note: IsPow2(alignment) MUST be true
734 //////////////////////////////////////////////////////////////////////////
735 template <typename T1, typename T2>
736 INLINE
737 static T1 AlignUpPow2(T1 value, T2 alignment)
738 {
739 return AlignDownPow2(value + T1(alignment - 1), alignment);
740 }
741
742 //////////////////////////////////////////////////////////////////////////
743 /// Align up ptr to specified alignment
744 /// Note: IsPow2(alignment) MUST be true
745 //////////////////////////////////////////////////////////////////////////
746 template <typename T1, typename T2>
747 INLINE
748 static T1* AlignUpPow2(T1* value, T2 alignment)
749 {
750 return reinterpret_cast<T1*>(
751 AlignDownPow2(reinterpret_cast<uintptr_t>(value) + uintptr_t(alignment - 1), alignment));
752 }
753
754 //////////////////////////////////////////////////////////////////////////
755 /// Align down to specified alignment
756 //////////////////////////////////////////////////////////////////////////
757 template <typename T1, typename T2>
758 INLINE
759 static T1 AlignDown(T1 value, T2 alignment)
760 {
761 if (IsPow2(alignment)) { return AlignDownPow2(value, alignment); }
762 return value - T1(value % alignment);
763 }
764
765 //////////////////////////////////////////////////////////////////////////
766 /// Align down to specified alignment
767 //////////////////////////////////////////////////////////////////////////
768 template <typename T1, typename T2>
769 INLINE
770 static T1* AlignDown(T1* value, T2 alignment)
771 {
772 return (T1*)AlignDown(uintptr_t(value), alignment);
773 }
774
775 //////////////////////////////////////////////////////////////////////////
776 /// Align up to specified alignment
777 /// Note: IsPow2(alignment) MUST be true
778 //////////////////////////////////////////////////////////////////////////
779 template <typename T1, typename T2>
780 INLINE
781 static T1 AlignUp(T1 value, T2 alignment)
782 {
783 return AlignDown(value + T1(alignment - 1), alignment);
784 }
785
786 //////////////////////////////////////////////////////////////////////////
787 /// Align up to specified alignment
788 /// Note: IsPow2(alignment) MUST be true
789 //////////////////////////////////////////////////////////////////////////
790 template <typename T1, typename T2>
791 INLINE
792 static T1* AlignUp(T1* value, T2 alignment)
793 {
794 return AlignDown(PtrAdd(value, alignment - 1), alignment);
795 }
796
797 //////////////////////////////////////////////////////////////////////////
798 /// Helper structure used to access an array of elements that don't
799 /// correspond to a typical word size.
800 //////////////////////////////////////////////////////////////////////////
801 template<typename T, size_t BitsPerElementT, size_t ArrayLenT>
802 class BitsArray
803 {
804 private:
805 static const size_t BITS_PER_WORD = sizeof(size_t) * 8;
806 static const size_t ELEMENTS_PER_WORD = BITS_PER_WORD / BitsPerElementT;
807 static const size_t NUM_WORDS = (ArrayLenT + ELEMENTS_PER_WORD - 1) / ELEMENTS_PER_WORD;
808 static const size_t ELEMENT_MASK = (size_t(1) << BitsPerElementT) - 1;
809
810 static_assert(ELEMENTS_PER_WORD * BitsPerElementT == BITS_PER_WORD,
811 "Element size must an integral fraction of pointer size");
812
813 size_t m_words[NUM_WORDS] = {};
814
815 public:
816
817 T operator[] (size_t elementIndex) const
818 {
819 size_t word = m_words[elementIndex / ELEMENTS_PER_WORD];
820 word >>= ((elementIndex % ELEMENTS_PER_WORD) * BitsPerElementT);
821 return T(word & ELEMENT_MASK);
822 }
823 };
824
825 // Recursive template used to auto-nest conditionals. Converts dynamic boolean function
826 // arguments to static template arguments.
827 template <typename TermT, typename... ArgsB>
828 struct TemplateArgUnroller
829 {
830 // Last Arg Terminator
831 static typename TermT::FuncType GetFunc(bool bArg)
832 {
833 if (bArg)
834 {
835 return TermT::template GetFunc<ArgsB..., std::true_type>();
836 }
837
838 return TermT::template GetFunc<ArgsB..., std::false_type>();
839 }
840
841 // Recursively parse args
842 template <typename... TArgsT>
843 static typename TermT::FuncType GetFunc(bool bArg, TArgsT... remainingArgs)
844 {
845 if (bArg)
846 {
847 return TemplateArgUnroller<TermT, ArgsB..., std::true_type>::GetFunc(remainingArgs...);
848 }
849
850 return TemplateArgUnroller<TermT, ArgsB..., std::false_type>::GetFunc(remainingArgs...);
851 }
852
853 // Last Arg Terminator
854 template <typename... TArgsT>
855 static typename TermT::FuncType GetFunc(uint32_t iArg)
856 {
857 switch(iArg)
858 {
859 case 0: return TermT::template GetFunc<ArgsB..., std::integral_constant<uint32_t, 0>>();
860 case 1: return TermT::template GetFunc<ArgsB..., std::integral_constant<uint32_t, 1>>();
861 case 2: return TermT::template GetFunc<ArgsB..., std::integral_constant<uint32_t, 2>>();
862 case 3: return TermT::template GetFunc<ArgsB..., std::integral_constant<uint32_t, 3>>();
863 case 4: return TermT::template GetFunc<ArgsB..., std::integral_constant<uint32_t, 4>>();
864 default: SWR_ASSUME(false); return nullptr;
865 }
866 }
867
868 // Recursively parse args
869 template <typename... TArgsT>
870 static typename TermT::FuncType GetFunc(uint32_t iArg, TArgsT... remainingArgs)
871 {
872 switch(iArg)
873 {
874 case 0: return TemplateArgUnroller<TermT, ArgsB..., std::integral_constant<uint32_t, 0>>::GetFunc(remainingArgs...);
875 case 1: return TemplateArgUnroller<TermT, ArgsB..., std::integral_constant<uint32_t, 1>>::GetFunc(remainingArgs...);
876 case 2: return TemplateArgUnroller<TermT, ArgsB..., std::integral_constant<uint32_t, 2>>::GetFunc(remainingArgs...);
877 case 3: return TemplateArgUnroller<TermT, ArgsB..., std::integral_constant<uint32_t, 3>>::GetFunc(remainingArgs...);
878 case 4: return TemplateArgUnroller<TermT, ArgsB..., std::integral_constant<uint32_t, 4>>::GetFunc(remainingArgs...);
879 default: SWR_ASSUME(false); return nullptr;
880 }
881 }
882 };
883
884