swr/rast: simdlib cleanup, clipper stack space fixes
[mesa.git] / src / gallium / drivers / swr / rasterizer / common / simdlib_256_avx512.inl
1 /****************************************************************************
2 * Copyright (C) 2017 Intel Corporation. All Rights Reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 ****************************************************************************/
23 #if !defined(__SIMD_LIB_AVX512_HPP__)
24 #error Do not include this file directly, use "simdlib.hpp" instead.
25 #endif
26
27 //============================================================================
28 // SIMD256 AVX (512) implementation
29 //
30 // Since this implementation inherits from the AVX (2) implementation,
31 // the only operations below ones that replace AVX (2) operations.
32 // These use native AVX512 instructions with masking to enable a larger
33 // register set.
34 //============================================================================
35
36 private:
37 static SIMDINLINE __m512 __conv(Float r)
38 {
39 return _mm512_castps256_ps512(r.v);
40 }
41 static SIMDINLINE __m512d __conv(Double r)
42 {
43 return _mm512_castpd256_pd512(r.v);
44 }
45 static SIMDINLINE __m512i __conv(Integer r)
46 {
47 return _mm512_castsi256_si512(r.v);
48 }
49 static SIMDINLINE Float __conv(__m512 r)
50 {
51 return _mm512_castps512_ps256(r);
52 }
53 static SIMDINLINE Double __conv(__m512d r)
54 {
55 return _mm512_castpd512_pd256(r);
56 }
57 static SIMDINLINE Integer __conv(__m512i r)
58 {
59 return _mm512_castsi512_si256(r);
60 }
61
62 public:
63 #define SIMD_WRAPPER_1_(op, intrin, mask) \
64 static SIMDINLINE Float SIMDCALL op(Float a) \
65 { \
66 return __conv(_mm512_maskz_##intrin((mask), __conv(a))); \
67 }
68 #define SIMD_WRAPPER_1(op) SIMD_WRAPPER_1_(op, op, __mmask16(0xff))
69
70 #define SIMD_WRAPPER_1I_(op, intrin, mask) \
71 template <int ImmT> \
72 static SIMDINLINE Float SIMDCALL op(Float a) \
73 { \
74 return __conv(_mm512_maskz_##intrin((mask), __conv(a), ImmT)); \
75 }
76 #define SIMD_WRAPPER_1I(op) SIMD_WRAPPER_1I_(op, op, __mmask16(0xff))
77
78 #define SIMD_WRAPPER_2_(op, intrin, mask) \
79 static SIMDINLINE Float SIMDCALL op(Float a, Float b) \
80 { \
81 return __conv(_mm512_maskz_##intrin((mask), __conv(a), __conv(b))); \
82 }
83 #define SIMD_WRAPPER_2(op) SIMD_WRAPPER_2_(op, op, __mmask16(0xff))
84
85 #define SIMD_WRAPPER_2I(op) \
86 template <int ImmT> \
87 static SIMDINLINE Float SIMDCALL op(Float a, Float b) \
88 { \
89 return __conv(_mm512_maskz_##op(0xff, __conv(a), __conv(b), ImmT)); \
90 }
91
92 #define SIMD_WRAPPER_3_(op, intrin, mask) \
93 static SIMDINLINE Float SIMDCALL op(Float a, Float b, Float c) \
94 { \
95 return __conv(_mm512_maskz_##intrin((mask), __conv(a), __conv(b), __conv(c))); \
96 }
97 #define SIMD_WRAPPER_3(op) SIMD_WRAPPER_3_(op, op, __mmask16(0xff))
98
99 #define SIMD_DWRAPPER_2I(op) \
100 template <int ImmT> \
101 static SIMDINLINE Double SIMDCALL op(Double a, Double b) \
102 { \
103 return __conv(_mm512_maskz_##op(0xf, __conv(a), __conv(b), ImmT)); \
104 }
105
106 #define SIMD_IWRAPPER_1_(op, intrin, mask) \
107 static SIMDINLINE Integer SIMDCALL op(Integer a) \
108 { \
109 return __conv(_mm512_maskz_##intrin((mask), __conv(a))); \
110 }
111 #define SIMD_IWRAPPER_1_32(op) SIMD_IWRAPPER_1_(op, op, __mmask16(0xff))
112
113 #define SIMD_IWRAPPER_1I_(op, intrin, mask) \
114 template <int ImmT> \
115 static SIMDINLINE Integer SIMDCALL op(Integer a) \
116 { \
117 return __conv(_mm512_maskz_##intrin((mask), __conv(a), ImmT)); \
118 }
119 #define SIMD_IWRAPPER_1I_32(op) SIMD_IWRAPPER_1I_(op, op, __mmask16(0xff))
120
121 #define SIMD_IWRAPPER_2_(op, intrin, mask) \
122 static SIMDINLINE Integer SIMDCALL op(Integer a, Integer b) \
123 { \
124 return __conv(_mm512_maskz_##intrin((mask), __conv(a), __conv(b))); \
125 }
126 #define SIMD_IWRAPPER_2_32(op) SIMD_IWRAPPER_2_(op, op, __mmask16(0xff))
127
128 #define SIMD_IWRAPPER_2I(op) \
129 template <int ImmT> \
130 static SIMDINLINE Integer SIMDCALL op(Integer a, Integer b) \
131 { \
132 return __conv(_mm512_maskz_##op(0xff, __conv(a), __conv(b), ImmT)); \
133 }
134
135 //-----------------------------------------------------------------------
136 // Single precision floating point arithmetic operations
137 //-----------------------------------------------------------------------
138 SIMD_WRAPPER_2(add_ps); // return a + b
139 SIMD_WRAPPER_2(div_ps); // return a / b
140 SIMD_WRAPPER_3(fmadd_ps); // return (a * b) + c
141 SIMD_WRAPPER_3(fmsub_ps); // return (a * b) - c
142 SIMD_WRAPPER_2(max_ps); // return (a > b) ? a : b
143 SIMD_WRAPPER_2(min_ps); // return (a < b) ? a : b
144 SIMD_WRAPPER_2(mul_ps); // return a * b
145 SIMD_WRAPPER_1_(rcp_ps, rcp14_ps, __mmask16(0xff)); // return 1.0f / a
146 SIMD_WRAPPER_1_(rsqrt_ps, rsqrt14_ps, __mmask16(0xff)); // return 1.0f / sqrt(a)
147 SIMD_WRAPPER_2(sub_ps); // return a - b
148
149 //-----------------------------------------------------------------------
150 // Integer (various width) arithmetic operations
151 //-----------------------------------------------------------------------
152 SIMD_IWRAPPER_1_32(abs_epi32); // return absolute_value(a) (int32)
153 SIMD_IWRAPPER_2_32(add_epi32); // return a + b (int32)
154 SIMD_IWRAPPER_2_32(max_epi32); // return (a > b) ? a : b (int32)
155 SIMD_IWRAPPER_2_32(max_epu32); // return (a > b) ? a : b (uint32)
156 SIMD_IWRAPPER_2_32(min_epi32); // return (a < b) ? a : b (int32)
157 SIMD_IWRAPPER_2_32(min_epu32); // return (a < b) ? a : b (uint32)
158 SIMD_IWRAPPER_2_32(mul_epi32); // return a * b (int32)
159
160 // SIMD_IWRAPPER_2_8(add_epi8); // return a + b (int8)
161 // SIMD_IWRAPPER_2_8(adds_epu8); // return ((a + b) > 0xff) ? 0xff : (a + b) (uint8)
162
163 // return (a * b) & 0xFFFFFFFF
164 //
165 // Multiply the packed 32-bit integers in a and b, producing intermediate 64-bit integers,
166 // and store the low 32 bits of the intermediate integers in dst.
167 SIMD_IWRAPPER_2_32(mullo_epi32);
168 SIMD_IWRAPPER_2_32(sub_epi32); // return a - b (int32)
169
170 // SIMD_IWRAPPER_2_64(sub_epi64); // return a - b (int64)
171 // SIMD_IWRAPPER_2_8(subs_epu8); // return (b > a) ? 0 : (a - b) (uint8)
172
173 //-----------------------------------------------------------------------
174 // Logical operations
175 //-----------------------------------------------------------------------
176 SIMD_IWRAPPER_2_(and_si, and_epi32, __mmask16(0xff)); // return a & b (int)
177 SIMD_IWRAPPER_2_(andnot_si, andnot_epi32, __mmask16(0xff)); // return (~a) & b (int)
178 SIMD_IWRAPPER_2_(or_si, or_epi32, __mmask16(0xff)); // return a | b (int)
179 SIMD_IWRAPPER_2_(xor_si, xor_epi32, __mmask16(0xff)); // return a ^ b (int)
180
181 //-----------------------------------------------------------------------
182 // Shift operations
183 //-----------------------------------------------------------------------
184 SIMD_IWRAPPER_1I_32(slli_epi32); // return a << ImmT
185 SIMD_IWRAPPER_2_32(sllv_epi32); // return a << b (uint32)
186 SIMD_IWRAPPER_1I_32(srai_epi32); // return a >> ImmT (int32)
187 SIMD_IWRAPPER_1I_32(srli_epi32); // return a >> ImmT (uint32)
188 SIMD_IWRAPPER_2_32(srlv_epi32); // return a >> b (uint32)
189
190 // use AVX2 version
191 // SIMD_IWRAPPER_1I_(srli_si, srli_si256); // return a >> (ImmT*8) (uint)
192
193 //-----------------------------------------------------------------------
194 // Conversion operations (Use AVX2 versions)
195 //-----------------------------------------------------------------------
196 // SIMD_IWRAPPER_1L(cvtepu8_epi16, 0xffff); // return (int16)a (uint8 --> int16)
197 // SIMD_IWRAPPER_1L(cvtepu8_epi32, 0xff); // return (int32)a (uint8 --> int32)
198 // SIMD_IWRAPPER_1L(cvtepu16_epi32, 0xff); // return (int32)a (uint16 --> int32)
199 // SIMD_IWRAPPER_1L(cvtepu16_epi64, 0xf); // return (int64)a (uint16 --> int64)
200 // SIMD_IWRAPPER_1L(cvtepu32_epi64, 0xf); // return (int64)a (uint32 --> int64)
201
202 //-----------------------------------------------------------------------
203 // Comparison operations (Use AVX2 versions
204 //-----------------------------------------------------------------------
205 // SIMD_IWRAPPER_2_CMP(cmpeq_epi8); // return a == b (int8)
206 // SIMD_IWRAPPER_2_CMP(cmpeq_epi16); // return a == b (int16)
207 // SIMD_IWRAPPER_2_CMP(cmpeq_epi32); // return a == b (int32)
208 // SIMD_IWRAPPER_2_CMP(cmpeq_epi64); // return a == b (int64)
209 // SIMD_IWRAPPER_2_CMP(cmpgt_epi8,); // return a > b (int8)
210 // SIMD_IWRAPPER_2_CMP(cmpgt_epi16); // return a > b (int16)
211 // SIMD_IWRAPPER_2_CMP(cmpgt_epi32); // return a > b (int32)
212 // SIMD_IWRAPPER_2_CMP(cmpgt_epi64); // return a > b (int64)
213 //
214 // static SIMDINLINE Integer SIMDCALL cmplt_epi32(Integer a, Integer b) // return a < b (int32)
215 //{
216 // return cmpgt_epi32(b, a);
217 //}
218
219 //-----------------------------------------------------------------------
220 // Blend / shuffle / permute operations
221 //-----------------------------------------------------------------------
222 // SIMD_IWRAPPER_2_8(packs_epi16); // int16 --> int8 See documentation for _mm256_packs_epi16
223 // and _mm512_packs_epi16 SIMD_IWRAPPER_2_16(packs_epi32); // int32 --> int16 See documentation
224 // for _mm256_packs_epi32 and _mm512_packs_epi32 SIMD_IWRAPPER_2_8(packus_epi16); // uint16 -->
225 // uint8 See documentation for _mm256_packus_epi16 and _mm512_packus_epi16
226 // SIMD_IWRAPPER_2_16(packus_epi32); // uint32 --> uint16 See documentation for
227 // _mm256_packus_epi32 and _mm512_packus_epi32
228
229 // SIMD_IWRAPPER_2_(permute_epi32, permutevar8x32_epi32);
230
231 // static SIMDINLINE Float SIMDCALL permute_ps(Float a, Integer swiz) // return a[swiz[i]] for
232 // each 32-bit lane i (float)
233 //{
234 // return _mm256_permutevar8x32_ps(a, swiz);
235 //}
236
237 SIMD_IWRAPPER_1I_32(shuffle_epi32);
238 // template<int ImmT>
239 // static SIMDINLINE Integer SIMDCALL shuffle_epi64(Integer a, Integer b)
240 //{
241 // return castpd_si(shuffle_pd<ImmT>(castsi_pd(a), castsi_pd(b)));
242 //}
243 // SIMD_IWRAPPER_2(shuffle_epi8);
244 SIMD_IWRAPPER_2_32(unpackhi_epi32);
245 SIMD_IWRAPPER_2_32(unpacklo_epi32);
246
247 // SIMD_IWRAPPER_2_16(unpackhi_epi16);
248 // SIMD_IWRAPPER_2_64(unpackhi_epi64);
249 // SIMD_IWRAPPER_2_8(unpackhi_epi8);
250 // SIMD_IWRAPPER_2_16(unpacklo_epi16);
251 // SIMD_IWRAPPER_2_64(unpacklo_epi64);
252 // SIMD_IWRAPPER_2_8(unpacklo_epi8);
253
254 //-----------------------------------------------------------------------
255 // Load / store operations
256 //-----------------------------------------------------------------------
257 static SIMDINLINE Float SIMDCALL
258 load_ps(float const* p) // return *p (loads SIMD width elements from memory)
259 {
260 return __conv(_mm512_maskz_loadu_ps(__mmask16(0xff), p));
261 }
262
263 static SIMDINLINE Integer SIMDCALL load_si(Integer const* p) // return *p
264 {
265 return __conv(_mm512_maskz_loadu_epi32(__mmask16(0xff), p));
266 }
267
268 static SIMDINLINE Float SIMDCALL
269 loadu_ps(float const* p) // return *p (same as load_ps but allows for unaligned mem)
270 {
271 return __conv(_mm512_maskz_loadu_ps(__mmask16(0xff), p));
272 }
273
274 static SIMDINLINE Integer SIMDCALL
275 loadu_si(Integer const* p) // return *p (same as load_si but allows for unaligned mem)
276 {
277 return __conv(_mm512_maskz_loadu_epi32(__mmask16(0xff), p));
278 }
279
280 template <ScaleFactor ScaleT = ScaleFactor::SF_1>
281 static SIMDINLINE Float SIMDCALL
282 i32gather_ps(float const* p, Integer idx) // return *(float*)(((int8*)p) + (idx * ScaleT))
283 {
284 return __conv(_mm512_mask_i32gather_ps(
285 _mm512_setzero_ps(), __mmask16(0xff), __conv(idx), p, static_cast<int>(ScaleT)));
286 }
287
288 // for each element: (mask & (1 << 31)) ? (i32gather_ps<ScaleT>(p, idx), mask = 0) : old
289 template <ScaleFactor ScaleT = ScaleFactor::SF_1>
290 static SIMDINLINE Float SIMDCALL
291 mask_i32gather_ps(Float old, float const* p, Integer idx, Float mask)
292 {
293 __mmask16 m = 0xff;
294 m = _mm512_mask_test_epi32_mask(
295 m, _mm512_castps_si512(__conv(mask)), _mm512_set1_epi32(0x80000000));
296 return __conv(
297 _mm512_mask_i32gather_ps(__conv(old), m, __conv(idx), p, static_cast<int>(ScaleT)));
298 }
299
300 // static SIMDINLINE uint32_t SIMDCALL movemask_epi8(Integer a)
301 // {
302 // __mmask64 m = 0xffffffffull;
303 // return static_cast<uint32_t>(
304 // _mm512_mask_test_epi8_mask(m, __conv(a), _mm512_set1_epi8(0x80)));
305 // }
306
307 static SIMDINLINE void SIMDCALL maskstore_ps(float* p, Integer mask, Float src)
308 {
309 __mmask16 m = 0xff;
310 m = _mm512_mask_test_epi32_mask(m, __conv(mask), _mm512_set1_epi32(0x80000000));
311 _mm512_mask_storeu_ps(p, m, __conv(src));
312 }
313
314 static SIMDINLINE void SIMDCALL
315 store_ps(float* p, Float a) // *p = a (stores all elements contiguously in memory)
316 {
317 _mm512_mask_storeu_ps(p, __mmask16(0xff), __conv(a));
318 }
319
320 static SIMDINLINE void SIMDCALL store_si(Integer* p, Integer a) // *p = a
321 {
322 _mm512_mask_storeu_epi32(p, __mmask16(0xff), __conv(a));
323 }
324
325 static SIMDINLINE Float SIMDCALL vmask_ps(int32_t mask)
326 {
327 return castsi_ps(__conv(_mm512_maskz_set1_epi32(__mmask16(mask & 0xff), -1)));
328 }
329
330 //=======================================================================
331 // Legacy interface (available only in SIMD256 width)
332 //=======================================================================
333
334 #undef SIMD_WRAPPER_1_
335 #undef SIMD_WRAPPER_1
336 #undef SIMD_WRAPPER_1I_
337 #undef SIMD_WRAPPER_1I
338 #undef SIMD_WRAPPER_2_
339 #undef SIMD_WRAPPER_2
340 #undef SIMD_WRAPPER_2I
341 #undef SIMD_WRAPPER_3_
342 #undef SIMD_WRAPPER_3
343 #undef SIMD_IWRAPPER_1_
344 #undef SIMD_IWRAPPER_1_32
345 #undef SIMD_IWRAPPER_1I_
346 #undef SIMD_IWRAPPER_1I_32
347 #undef SIMD_IWRAPPER_2_
348 #undef SIMD_IWRAPPER_2_32
349 #undef SIMD_IWRAPPER_2I