From: Tim Rowley Date: Tue, 11 Oct 2016 17:57:29 +0000 (-0500) Subject: swr: [rasterizer core/sim] 8x2 backend + 16-wide tile clear/load/store X-Git-Url: https://git.libre-soc.org/?a=commitdiff_plain;h=488992221056edaf7111f9290afdf216c5e98d62;p=mesa.git swr: [rasterizer core/sim] 8x2 backend + 16-wide tile clear/load/store Work in progress (disabled). USE_8x2_TILE_BACKEND define in knobs.h enables AVX512 code paths (emulated on non-AVX512 HW). Signed-off-by: Tim Rowley --- diff --git a/src/gallium/drivers/swr/rasterizer/common/os.h b/src/gallium/drivers/swr/rasterizer/common/os.h index 0671c5a56ee..ac52b605cc2 100644 --- a/src/gallium/drivers/swr/rasterizer/common/os.h +++ b/src/gallium/drivers/swr/rasterizer/common/os.h @@ -246,6 +246,9 @@ typedef MEGABYTE GIGABYTE[1024]; #define OSALIGNLINE(RWORD) OSALIGN(RWORD, 64) #define OSALIGNSIMD(RWORD) OSALIGN(RWORD, KNOB_SIMD_BYTES) +#if ENABLE_AVX512_SIMD16 +#define OSALIGNSIMD16(RWORD) OSALIGN(RWORD, KNOB_SIMD16_BYTES) +#endif #include "common/swr_assert.h" diff --git a/src/gallium/drivers/swr/rasterizer/common/simd16intrin.h b/src/gallium/drivers/swr/rasterizer/common/simd16intrin.h index 8a1714ad1ea..56ecf5bfd3d 100644 --- a/src/gallium/drivers/swr/rasterizer/common/simd16intrin.h +++ b/src/gallium/drivers/swr/rasterizer/common/simd16intrin.h @@ -151,12 +151,12 @@ INLINE simd16scalari _simd16_set1_epi32(int a) return result; } -INLINE simd16scalari _simd16_set_epi32(int e7, int e6, int e5, int e4, int e3, int e2, int e1, int e0) +INLINE simd16scalar _simd16_set_ps(float e15, float e14, float e13, float e12, float e11, float e10, float e9, float e8, float e7, float e6, float e5, float e4, float e3, float e2, float e1, float e0) { - simd16scalari result; + simd16scalar result; - result.lo = _mm256_set_epi32(e7, e6, e5, e4, e3, e2, e1, e0); - result.hi = _mm256_set_epi32(e7, e6, e5, e4, e3, e2, e1, e0); + result.lo = _mm256_set_ps(e7, e6, e5, e4, e3, e2, e1, e0); + result.hi = _mm256_set_ps(e15, e14, e13, e12, e11, e10, e9, e8); return result; } @@ -171,6 +171,26 @@ INLINE simd16scalari _simd16_set_epi32(int e15, int e14, int e13, int e12, int e return result; } +INLINE simd16scalar _simd16_set_ps(float e7, float e6, float e5, float e4, float e3, float e2, float e1, float e0) +{ + simd16scalar result; + + result.lo = _mm256_set_ps(e7, e6, e5, e4, e3, e2, e1, e0); + result.hi = _mm256_set_ps(e7, e6, e5, e4, e3, e2, e1, e0); + + return result; +} + +INLINE simd16scalari _simd16_set_epi32(int e7, int e6, int e5, int e4, int e3, int e2, int e1, int e0) +{ + simd16scalari result; + + result.lo = _mm256_set_epi32(e7, e6, e5, e4, e3, e2, e1, e0); + result.hi = _mm256_set_epi32(e7, e6, e5, e4, e3, e2, e1, e0); + + return result; +} + INLINE simd16scalar _simd16_load_ps(float const *m) { simd16scalar result; @@ -267,6 +287,58 @@ INLINE void _simd16_store_si(simd16scalari *m, simd16scalari a) _mm256_store_si256(&m[0].hi, a.hi); } +INLINE simdscalar _simd16_extract_ps(simd16scalar a, int imm8) +{ + switch (imm8) + { + case 0: + return a.lo; + case 1: + return a.hi; + } + return _simd_set1_ps(0.0f); +} + +INLINE simdscalari _simd16_extract_si(simd16scalari a, int imm8) +{ + switch (imm8) + { + case 0: + return a.lo; + case 1: + return a.hi; + } + return _simd_set1_epi32(0); +} + +INLINE simd16scalar _simd16_insert_ps(simd16scalar a, simdscalar b, int imm8) +{ + switch (imm8) + { + case 0: + a.lo = b; + break; + case 1: + a.hi = b; + break; + } + return a; +} + +INLINE simd16scalari _simd16_insert_si(simd16scalari a, simdscalari b, int imm8) +{ + switch (imm8) + { + case 0: + a.lo = b; + break; + case 1: + a.hi = b; + break; + } + return a; +} + template INLINE simd16scalar _simd16_blend_ps_temp(simd16scalar a, simd16scalar b) { @@ -446,10 +518,10 @@ SIMD16_EMU_AVX512_2(simd16scalari, _simd16_max_epi32, _mm256_max_epi32) SIMD16_EMU_AVX512_2(simd16scalari, _simd16_min_epu32, _mm256_min_epu32) SIMD16_EMU_AVX512_2(simd16scalari, _simd16_max_epu32, _mm256_max_epu32) SIMD16_EMU_AVX512_2(simd16scalari, _simd16_add_epi32, _mm256_add_epi32) -SIMD16_EMU_AVX512_2(simd16scalari, _simd16_and_si, _mm256_and_si256) -SIMD16_EMU_AVX512_2(simd16scalari, _simd16_andnot_si, _mm256_andnot_si256) -SIMD16_EMU_AVX512_2(simd16scalari, _simd16_or_si, _mm256_or_si256) -SIMD16_EMU_AVX512_2(simd16scalari, _simd16_xor_si, _mm256_xor_si256) +SIMD16_EMU_AVX512_2(simd16scalari, _simd16_and_si, _simd_and_si) +SIMD16_EMU_AVX512_2(simd16scalari, _simd16_andnot_si, _simd_andnot_si) +SIMD16_EMU_AVX512_2(simd16scalari, _simd16_or_si, _simd_or_si) +SIMD16_EMU_AVX512_2(simd16scalari, _simd16_xor_si, _simd_xor_si) SIMD16_EMU_AVX512_2(simd16scalari, _simd16_cmpeq_epi32, _mm256_cmpeq_epi32) SIMD16_EMU_AVX512_2(simd16scalari, _simd16_cmpgt_epi32, _mm256_cmpgt_epi32) @@ -463,16 +535,18 @@ INLINE int _simd16_testz_ps(simd16scalar a, simd16scalar b) #define _simd16_cmplt_epi32(a, b) _simd16_cmpgt_epi32(b, a) -SIMD16_EMU_AVX512_2(simd16scalari, _simd16_unpacklo_epi32, _mm256_unpacklo_epi32) -SIMD16_EMU_AVX512_2(simd16scalari, _simd16_unpackhi_epi32, _mm256_unpackhi_epi32) +SIMD16_EMU_AVX512_2(simd16scalari, _simd16_unpacklo_epi32, _simd_unpacklo_epi32) +SIMD16_EMU_AVX512_2(simd16scalari, _simd16_unpackhi_epi32, _simd_unpackhi_epi32) +SIMD16_EMU_AVX512_2(simd16scalari, _simd16_unpacklo_epi64, _simd_unpacklo_epi64) +SIMD16_EMU_AVX512_2(simd16scalari, _simd16_unpackhi_epi64, _simd_unpackhi_epi64) template INLINE simd16scalari _simd16_slli_epi32_temp(simd16scalari a) { simd16scalari result; - result.lo = _mm256_slli_epi32(a.lo, imm8); - result.hi = _mm256_slli_epi32(a.hi, imm8); + result.lo = _simd_slli_epi32(a.lo, imm8); + result.hi = _simd_slli_epi32(a.hi, imm8); return result; } @@ -484,8 +558,8 @@ INLINE simd16scalari _simd16_srai_epi32_temp(simd16scalari a) { simd16scalari result; - result.lo = _mm256_srai_epi32(a.lo, imm8); - result.hi = _mm256_srai_epi32(a.hi, imm8); + result.lo = _simd_srai_epi32(a.lo, imm8); + result.hi = _simd_srai_epi32(a.hi, imm8); return result; } @@ -497,8 +571,8 @@ INLINE simd16scalari _simd16_srli_epi32_temp(simd16scalari a) { simd16scalari result; - result.lo = _mm256_srli_epi32(a.lo, imm8); - result.hi = _mm256_srli_epi32(a.hi, imm8); + result.lo = _simd_srli_epi32(a.lo, imm8); + result.hi = _simd_srli_epi32(a.hi, imm8); return result; } @@ -534,28 +608,78 @@ SIMD16_EMU_AVX512_2(simd16scalari, _simd16_cmpgt_epi16, _mm256_cmpgt_epi16) SIMD16_EMU_AVX512_2(simd16scalari, _simd16_cmpeq_epi8, _mm256_cmpeq_epi8) SIMD16_EMU_AVX512_2(simd16scalari, _simd16_cmpgt_epi8, _mm256_cmpgt_epi8) -INLINE simd16scalar _simd16_permute_ps(simd16scalar a, simd16scalari b) +INLINE simd16scalar _simd16_permute_ps(simd16scalar a, simd16scalari i) { simd16scalar result; - result.lo = _mm256_permutevar8x32_ps(a.lo, b.lo); - result.hi = _mm256_permutevar8x32_ps(a.hi, b.hi); + const simdscalari mask = _simd_set1_epi32(7); + + simdscalar lolo = _simd_permute_ps(a.lo, _simd_and_si(i.lo, mask)); + simdscalar lohi = _simd_permute_ps(a.hi, _simd_and_si(i.lo, mask)); + + simdscalar hilo = _simd_permute_ps(a.lo, _simd_and_si(i.hi, mask)); + simdscalar hihi = _simd_permute_ps(a.hi, _simd_and_si(i.hi, mask)); + + result.lo = _simd_blendv_ps(lolo, lohi, _simd_castsi_ps(_simd_cmpgt_epi32(i.lo, mask))); + result.hi = _simd_blendv_ps(hilo, hihi, _simd_castsi_ps(_simd_cmpgt_epi32(i.hi, mask))); return result; } -SIMD16_EMU_AVX512_2(simd16scalari, _simd16_permute_epi32, _mm256_permutevar8x32_epi32) +INLINE simd16scalari _simd16_permute_epi32(simd16scalari a, simd16scalari i) +{ + return _simd16_castps_si(_simd16_permute_ps(_simd16_castsi_ps(a), i)); +} SIMD16_EMU_AVX512_2(simd16scalari, _simd16_srlv_epi32, _mm256_srlv_epi32) SIMD16_EMU_AVX512_2(simd16scalari, _simd16_sllv_epi32, _mm256_sllv_epi32) +template +INLINE simd16scalar _simd16_permute2f128_ps_temp(simd16scalar a, simd16scalar b) +{ + simd16scalar result; + + result.lo = _simd_permute2f128_ps(a.lo, a.hi, ((imm8 & 0x03) << 0) | ((imm8 & 0x0C) << 2)); + result.hi = _simd_permute2f128_ps(b.lo, b.hi, ((imm8 & 0x30) >> 4) | ((imm8 & 0xC0) >> 2)); + + return result; +} + +#define _simd16_permute2f128_ps(a, b, imm8) _simd16_permute2f128_ps_temp(a, b) + +template +INLINE simd16scalard _simd16_permute2f128_pd_temp(simd16scalard a, simd16scalard b) +{ + simd16scalard result; + + result.lo = _simd_permute2f128_pd(a.lo, a.hi, ((imm8 & 0x03) << 0) | ((imm8 & 0x0C) << 2)); + result.hi = _simd_permute2f128_pd(b.lo, b.hi, ((imm8 & 0x30) >> 4) | ((imm8 & 0xC0) >> 2)); + + return result; +} + +#define _simd16_permute2f128_pd(a, b, imm8) _simd16_permute2f128_pd_temp(a, b) + +template +INLINE simd16scalari _simd16_permute2f128_si_temp(simd16scalari a, simd16scalari b) +{ + simd16scalari result; + + result.lo = _simd_permute2f128_si(a.lo, a.hi, ((imm8 & 0x03) << 0) | ((imm8 & 0x0C) << 2)); + result.hi = _simd_permute2f128_si(b.lo, b.hi, ((imm8 & 0x30) >> 4) | ((imm8 & 0xC0) >> 2)); + + return result; +} + +#define _simd16_permute2f128_si(a, b, imm8) _simd16_permute2f128_si_temp(a, b) + template INLINE simd16scalar _simd16_shuffle_ps_temp(simd16scalar a, simd16scalar b) { simd16scalar result; - result.lo = _mm256_shuffle_ps(a.lo, b.lo, imm8); - result.hi = _mm256_shuffle_ps(a.hi, b.hi, imm8); + result.lo = _simd_shuffle_ps(a.lo, b.lo, imm8); + result.hi = _simd_shuffle_ps(a.hi, b.hi, imm8); return result; } @@ -563,17 +687,48 @@ INLINE simd16scalar _simd16_shuffle_ps_temp(simd16scalar a, simd16scalar b) #define _simd16_shuffle_ps(a, b, imm8) _simd16_shuffle_ps_temp(a, b) template -INLINE simd16scalari _simd16_permute_128_temp(simd16scalari a, simd16scalari b) +INLINE simd16scalard _simd16_shuffle_pd_temp(simd16scalard a, simd16scalard b) { - simd16scalari result; + simd16scalard result; - result.lo = _mm256_permute2x128_si256(a.lo, b.lo, imm8); - result.hi = _mm256_permute2x128_si256(a.hi, b.hi, imm8); + result.lo = _simd_shuffle_pd(a.lo, b.lo, (imm8 & 15)); + result.hi = _simd_shuffle_pd(a.hi, b.hi, (imm8 >> 4)); return result; } -#define _simd16_permute_128(a, b, imm8) _simd16_permute_128_temp(a, b) +#define _simd16_shuffle_pd(a, b, imm8) _simd16_shuffle_pd_temp(a, b) + +template +INLINE simd16scalari _simd16_shuffle_epi32_temp(simd16scalari a, simd16scalari b) +{ + return _simd16_castps_si(_simd16_shuffle_ps(_simd16_castsi_ps(a), _simd16_castsi_ps(b), imm8)); +} + +#define _simd16_shuffle_epi32(a, b, imm8) _simd16_shuffle_epi32_temp(a, b) + +template +INLINE simd16scalari _simd16_shuffle_epi64_temp(simd16scalari a, simd16scalari b) +{ + return _simd16_castpd_si(_simd16_shuffle_pd(_simd16_castsi_pd(a), _simd16_castsi_pd(b), imm8)); +} + +#define _simd16_shuffle_epi64(a, b, imm8) _simd16_shuffle_epi64_temp(a, b) + +INLINE simd16mask _simd16_int2mask(int mask) +{ + return mask; +} + +INLINE int _simd16_mask2int(simd16mask mask) +{ + return mask; +} + +INLINE simd16mask _simd16_cmplt_ps_mask(simd16scalar a, simd16scalar b) +{ + return _simd16_movemask_ps(_simd16_cmplt_ps(a, b)); +} // convert bitmask to vector mask INLINE simd16scalar vMask16(int32_t mask) @@ -591,21 +746,13 @@ INLINE simd16scalar vMask16(int32_t mask) INLINE simd16mask _simd16_scalari2mask(simd16scalari mask) { - __m512i flag = _mm512_set1_epi32(0x80000000); - - __m512i temp = _mm512_and_epi32(mask, flag); - - return _mm512_cmpeq_epu32_mask(temp, flag); + return _mm512_cmpneq_epu32_mask(mask, _mm512_setzero_epi32()); } #if 0 INLINE simd16mask _simd16_scalard2mask(simd16scalard mask) { - __m512i flag = _mm512_set1_epi64(0x8000000000000000); - - __m512 tempi = _mm512_and_epi64(_mm512_castpd_si512(mask), flag); - - return _mm512_cmpeq_epu64_mask(temp, flag); + return _mm512_cmpneq_epu64_mask(mask, _mm512_setzero_epi64()); } #endif @@ -615,22 +762,24 @@ INLINE simd16mask _simd16_scalard2mask(simd16scalard mask) #define _simd16_set1_epi8 _mm512_set1_epi8 #define _simd16_set1_epi32 _mm512_set1_epi32 -INLINE simd16scalari _simd16_set_epi32(int e7, int e6, int e5, int e4, int e3, int e2, int e1, int e0) +INLINE simd16scalar _simd16_set_ps(float e15, float e14, float e13, float e12, float e11, float e10, float e9, float e8, float e7, float e6, float e5, float e4, float e3, float e2, float e1, float e0) { - simd16scalari result; - - result = _mm512_set_epi32(e7, e6, e5, e4, e3, e2, e1, e0, e7, e6, e5, e4, e3, e2, e1, e0); - - return result; + return _mm512_set_ps(e15, e14, e13, e12, e11, e10, e9, e8, e7, e6, e5, e4, e3, e2, e1, e0); } INLINE simd16scalari _simd16_set_epi32(int e15, int e14, int e13, int e12, int e11, int e10, int e9, int e8, int e7, int e6, int e5, int e4, int e3, int e2, int e1, int e0) { - simd16scalari result; + return _mm512_set_epi32(e15, e14, e13, e12, e11, e10, e9, e8, e7, e6, e5, e4, e3, e2, e1, e0); +} - result = _mm512_set_epi32(e15, e14, e13, e12, e11, e10, e9, e8, e7, e6, e5, e4, e3, e2, e1, e0); +INLINE simd16scalar _simd16_set_ps(float e7, float e6, float e5, float e4, float e3, float e2, float e1, float e0) +{ + return _mm512_set_ps(e7, e6, e5, e4, e3, e2, e1, e0, e7, e6, e5, e4, e3, e2, e1, e0); +} - return result; +INLINE simd16scalari _simd16_set_epi32(int e7, int e6, int e5, int e4, int e3, int e2, int e1, int e0) +{ + return _mm512_set_epi32(e7, e6, e5, e4, e3, e2, e1, e0, e7, e6, e5, e4, e3, e2, e1, e0); } #define _simd16_load_ps _mm512_load_ps @@ -638,12 +787,16 @@ INLINE simd16scalari _simd16_set_epi32(int e15, int e14, int e13, int e12, int e #if 1 #define _simd16_load1_ps _simd16_broadcast_ss #endif -#define _simd16_load_si _mm256_load_si256 -#define _simd16_loadu_si _mm256_loadu_si256 +#define _simd16_load_si _mm512_load_si512 +#define _simd16_loadu_si _mm512_loadu_si512 #define _simd16_broadcast_ss(m) _mm512_extload_ps(m, _MM_UPCONV_PS_NONE, _MM_BROADCAST_1X16, 0) #define _simd16_broadcast_ps(m) _mm512_extload_ps(m, _MM_UPCONV_PS_NONE, _MM_BROADCAST_4X16, 0) #define _simd16_store_ps _mm512_store_ps #define _simd16_store_si _mm512_store_si512 +#define _simd16_extract_ps _mm512_extractf32x8_ps +#define _simd16_extract_si _mm512_extracti32x8_epi32 +#define _simd16_insert_ps _mm512_insertf32x8 +#define _simd16_insert_si _mm512_inserti32x8 INLINE void _simd16_maskstore_ps(float *m, simd16scalari mask, simd16scalar a) { @@ -678,7 +831,7 @@ INLINE simd16scalari _simd16_blendv_epi32(simd16scalari a, simd16scalari b, cons #define _simd16_mul_ps _mm512_mul_ps #define _simd16_add_ps _mm512_add_ps #define _simd16_sub_ps _mm512_sub_ps -#define _simd16_rsqrt_ps _mm512_rsqrt23_ps +#define _simd16_rsqrt_ps _mm512_rsqrt14_ps #define _simd16_min_ps _mm512_min_ps #define _simd16_max_ps _mm512_max_ps @@ -710,7 +863,7 @@ INLINE simd16scalar _simd16_cmp_ps_temp(simd16scalar a, simd16scalar b) { simd16mask k = _mm512_cmpeq_ps_mask(a, b); - return _mm512_castsi256_ps(_mm512_mask_blend_epi32(k, _mm512_setzero_epi32(), _mm512_set1_epi32(0xFFFFFFFF))); + return _mm512_castsi512_ps(_mm512_mask_blend_epi32(k, _mm512_setzero_epi32(), _mm512_set1_epi32(0xFFFFFFFF))); } #define _simd16_cmp_ps(a, b, comp) _simd16_cmp_ps_temp(a, b) @@ -787,6 +940,8 @@ INLINE int _simd16_testz_ps(simd16scalar a, simd16scalar b) #define _simd16_unpacklo_epi32 _mm512_unpacklo_epi32 #define _simd16_unpackhi_epi32 _mm512_unpackhi_epi32 +#define _simd16_unpacklo_epi64 _mm512_unpacklo_epi64 +#define _simd16_unpackhi_epi64 _mm512_unpackhi_epi64 #define _simd16_slli_epi32 _mm512_slli_epi32 #define _simd16_srli_epi32 _mm512_srli_epi32 #define _simd16_srai_epi32 _mm512_srai_epi32 @@ -844,33 +999,46 @@ INLINE simd16scalari _simd16_cmpgt_epi8(simd16scalari a, simd16scalari b) return _mm512_mask_blend_epi8(k, _mm512_setzero_si512(), _mm512_set1_epi32(0xFFFFFFFF)); } -#if 0 -INLINE simd16scalar _simd16_permute_ps(simd16scalar a, simd16scalari b) -{ - simd16scalar result; +#define _simd16_permute_ps(a, i) _mm512_permutexvar_ps(i, a) +#define _simd16_permute_epi32(a, i) _mm512_permutexvar_epi32(i, a) +#define _simd16_sllv_epi32 _mm512_srlv_epi32 +#define _simd16_srlv_epi32 _mm512_sllv_epi32 +#define _simd16_permute2f128_ps _mm512_shuffle_f32x4 +#define _simd16_permute2f128_pd _mm512_shuffle_f64x2 +#define _simd16_permute2f128_si _mm512_shuffle_i32x4 +#define _simd16_shuffle_ps _mm512_shuffle_ps +#define _simd16_shuffle_pd _mm512_shuffle_pd - result.lo = _mm256_permutevar8x32_ps(a.lo, b.lo); - result.hi = _mm256_permutevar8x32_ps(a.hi, b.hi); - - return result; +template +INLINE simd16scalari _simd16_shuffle_epi32_temp(simd16scalari a, simd16scalari b) +{ + return _simd16_castps_si(_simd16_shuffle_ps(_simd16_castsi_ps(a), _simd16_castsi_ps(b), imm8)); } -INLINE (simd16scalari _simd16_permute_epi32(simd16scalari a, simd16scalari b) +#define _simd16_shuffle_epi32(a, b, imm8) _simd16_shuffle_epi32_temp(a, b) + +template +INLINE simd16scalari _simd16_shuffle_epi64_temp(simd16scalari a, simd16scalari b) { - simd16scalar result; + return _simd16_castpd_si(_simd16_shuffle_pd(_simd16_castsi_pd(a), _simd16_castsi_pd(b), imm8)); +} - result.lo = _mm256_permutevar8x32_epi32(a.lo, b.lo); - result.hi = _mm256_permutevar8x32_epi32(a.hi, b.hi); +#define _simd16_shuffle_epi64(a, b, imm8) _simd16_shuffle_epi64_temp(a, b) - return result; +INLINE simd16mask _simd16_int2mask(int mask) +{ + return _mm512_int2mask(mask); } -#endif +INLINE int _simd16_mask2int(simd16mask mask) +{ + return _mm512_mask2int(mask); +} -#define _simd16_sllv_epi32 _mm512_srlv_epi32 -#define _simd16_srlv_epi32 _mm512_sllv_epi32 -#define _simd16_shuffle_ps _mm512_shuffle_ps -#define _simd16_permute_128 _mm512_permute4f128_epi32 +INLINE simd16mask _simd16_cmplt_ps_mask(simd16scalar a, simd16scalar b) +{ + return _mm512_cmplt_ps_mask(a, b); +} // convert bitmask to vector mask INLINE simd16scalar vMask16(int32_t mask) diff --git a/src/gallium/drivers/swr/rasterizer/common/simdintrin.h b/src/gallium/drivers/swr/rasterizer/common/simdintrin.h index 3ad37de3d49..7671031cee8 100644 --- a/src/gallium/drivers/swr/rasterizer/common/simdintrin.h +++ b/src/gallium/drivers/swr/rasterizer/common/simdintrin.h @@ -296,6 +296,7 @@ __m256i _simdemu_sllv_epi32(__m256i vA, __m256i vCount) #define _simd_cmplt_epi32 _simdemu_cmplt_epi32 #define _simd_cmpgt_epi32 _simdemu_cmpgt_epi32 #define _simd_or_si _simdemu_or_si +#define _simd_xor_si _simdemu_xor_si #define _simd_castps_si _mm256_castps_si256 #define _simd_adds_epu8 _simdemu_adds_epu8 #define _simd_subs_epu8 _simdemu_subs_epu8 @@ -327,6 +328,7 @@ SIMD_EMU_EPI(_simdemu_cmpeq_epi32, _mm_cmpeq_epi32) SIMD_EMU_EPI(_simdemu_cmplt_epi32, _mm_cmplt_epi32) SIMD_EMU_EPI(_simdemu_cmpgt_epi32, _mm_cmpgt_epi32) SIMD_EMU_EPI(_simdemu_or_si, _mm_or_si128) +SIMD_EMU_EPI(_simdemu_xor_si, _mm_xor_si128) SIMD_EMU_EPI(_simdemu_adds_epu8, _mm_adds_epu8) SIMD_EMU_EPI(_simdemu_subs_epu8, _mm_subs_epu8) SIMD_EMU_EPI(_simdemu_add_epi8, _mm_add_epi8) @@ -339,6 +341,8 @@ SIMD_EMU_EPI(_simdemu_cmpeq_epi16, _mm_cmpeq_epi16) #define _simd_unpacklo_epi32(a, b) _mm256_castps_si256(_mm256_unpacklo_ps(_mm256_castsi256_ps(a), _mm256_castsi256_ps(b))) #define _simd_unpackhi_epi32(a, b) _mm256_castps_si256(_mm256_unpackhi_ps(_mm256_castsi256_ps(a), _mm256_castsi256_ps(b))) +#define _simd_unpacklo_epi64(a, b) _mm256_castpd_si256(_mm256_unpacklo_pd(_mm256_castsi256_pd(a), _mm256_castsi256_pd(b))) +#define _simd_unpackhi_epi64(a, b) _mm256_castpd_si256(_mm256_unpackhi_pd(_mm256_castsi256_pd(a), _mm256_castsi256_pd(b))) #define _simd_slli_epi32(a,i) _simdemu_slli_epi32(a,i) #define _simd_srai_epi32(a,i) _simdemu_srai_epi32(a,i) @@ -433,6 +437,63 @@ int _simdemu_movemask_epi8(__m256i a) return (resHi << 16) | resLo; } + +INLINE +__m256i _simd_cvtepu8_epi32(__m128i a) +{ + __m128i resultlo = _mm_cvtepu8_epi32(a); + __m128i resulthi = _mm_shuffle_epi8(a, _mm_set_epi32(0x80808007, 0x80808006, 0x80808005, 0x80808004)); + + __m256i result = _mm256_castsi128_si256(resultlo); + + return _mm256_insertf128_si256(result, resulthi, 1); +} + +INLINE +__m256i _simd_cvtepu16_epi32(__m128i a) +{ + __m128i resultlo = _mm_cvtepu16_epi32(a); + __m128i resulthi = _mm_shuffle_epi8(a, _mm_set_epi32(0x80800F0E, 0x80800D0C, 0x80800B0A, 0x80800908)); + + __m256i result = _mm256_castsi128_si256(resultlo); + + return _mm256_insertf128_si256(result, resulthi, 1); +} + +INLINE +__m256i _simd_packus_epi32(__m256i a, __m256i b) +{ + __m128i alo = _mm256_extractf128_si256(a, 0); + __m128i ahi = _mm256_extractf128_si256(a, 1); + + __m128i blo = _mm256_extractf128_si256(b, 0); + __m128i bhi = _mm256_extractf128_si256(b, 1); + + __m128i resultlo = _mm_packus_epi32(alo, blo); + __m128i resulthi = _mm_packus_epi32(ahi, bhi); + + __m256i result = _mm256_castsi128_si256(resultlo); + + return _mm256_insertf128_si256(result, resulthi, 1); +} + +INLINE +__m256i _simd_packs_epi32(__m256i a, __m256i b) +{ + __m128i alo = _mm256_extractf128_si256(a, 0); + __m128i ahi = _mm256_extractf128_si256(a, 1); + + __m128i blo = _mm256_extractf128_si256(b, 0); + __m128i bhi = _mm256_extractf128_si256(b, 1); + + __m128i resultlo = _mm_packs_epi32(alo, blo); + __m128i resulthi = _mm_packs_epi32(ahi, bhi); + + __m256i result = _mm256_castsi128_si256(resultlo); + + return _mm256_insertf128_si256(result, resulthi, 1); +} + #else #define _simd_mul_epi32 _mm256_mul_epi32 @@ -450,10 +511,13 @@ int _simdemu_movemask_epi8(__m256i a) #define _simd_cmplt_epi32(a,b) _mm256_cmpgt_epi32(b,a) #define _simd_cmpgt_epi32(a,b) _mm256_cmpgt_epi32(a,b) #define _simd_or_si _mm256_or_si256 +#define _simd_xor_si _mm256_xor_si256 #define _simd_castps_si _mm256_castps_si256 #define _simd_unpacklo_epi32 _mm256_unpacklo_epi32 #define _simd_unpackhi_epi32 _mm256_unpackhi_epi32 +#define _simd_unpacklo_epi64 _mm256_unpacklo_epi64 +#define _simd_unpackhi_epi64 _mm256_unpackhi_epi64 #define _simd_srli_si(a,i) _simdemu_srli_si128(a) #define _simd_slli_epi32 _mm256_slli_epi32 @@ -479,19 +543,23 @@ int _simdemu_movemask_epi8(__m256i a) #define _simd_cmpeq_epi16 _mm256_cmpeq_epi16 #define _simd_movemask_epi8 _mm256_movemask_epi8 #define _simd_permute_ps _mm256_permutevar8x32_ps +#define _simd_permute_epi32 _mm256_permutevar8x32_epi32 #define _simd_srlv_epi32 _mm256_srlv_epi32 #define _simd_sllv_epi32 _mm256_sllv_epi32 +#define _simd_cvtepu8_epi32 _mm256_cvtepu8_epi32 +#define _simd_cvtepu16_epi32 _mm256_cvtepu16_epi32 +#define _simd_packus_epi32 _mm256_packus_epi32 +#define _simd_packs_epi32 _mm256_packs_epi32 -INLINE -simdscalari _simd_permute_epi32(simdscalari a, simdscalari index) -{ - return _simd_castps_si(_mm256_permutevar8x32_ps(_mm256_castsi256_ps(a), index)); -} #endif -#define _simd_permute_128 _mm256_permute2f128_si256 -#define _simd_shuffleps_epi32(vA, vB, imm) _mm256_castps_si256(_mm256_shuffle_ps(_mm256_castsi256_ps(vA), _mm256_castsi256_ps(vB), imm)) +#define _simd_permute2f128_ps _mm256_permute2f128_ps +#define _simd_permute2f128_pd _mm256_permute2f128_pd +#define _simd_permute2f128_si _mm256_permute2f128_si256 #define _simd_shuffle_ps _mm256_shuffle_ps +#define _simd_shuffle_pd _mm256_shuffle_pd +#define _simd_shuffle_epi32(a, b, imm8) _mm256_castps_si256(_mm256_shuffle_ps(_mm256_castsi256_ps(a), _mm256_castsi256_ps(b), imm8)) +#define _simd_shuffle_epi64(a, b, imm8) _mm256_castps_si256(_mm256_shuffle_pd(_mm256_castsi256_pd(a), _mm256_castsi256_pd(b), imm8)) #define _simd_set1_epi32 _mm256_set1_epi32 #define _simd_set_epi32 _mm256_set_epi32 #define _simd_set1_epi8 _mm256_set1_epi8 @@ -506,7 +574,6 @@ simdscalari _simd_permute_epi32(simdscalari a, simdscalari index) #define _simd_testz_ps _mm256_testz_ps #define _simd_xor_ps _mm256_xor_ps - INLINE simdscalari _simd_blendv_epi32(simdscalari a, simdscalari b, simdscalar mask) { @@ -1060,16 +1127,6 @@ INLINE simdscalar _simd_shuffle_ps(simdscalar a, simdscalar b, const int imm8) return result; } -INLINE simdscalari _simd_permute_128(simdscalari a, simdscalari b, const int imm8) -{ - simdscalari result; - - result.lo = _mm256_permute2x128_si256(a.lo, b.lo, imm8); - result.hi = _mm256_permute2x128_si256(a.hi, b.hi, imm8); - - return result; -} - // convert bitmask to vector mask INLINE simdscalar vMask(int32_t mask) { diff --git a/src/gallium/drivers/swr/rasterizer/core/backend.cpp b/src/gallium/drivers/swr/rasterizer/core/backend.cpp index 143bd258afd..f71c2b2d345 100644 --- a/src/gallium/drivers/swr/rasterizer/core/backend.cpp +++ b/src/gallium/drivers/swr/rasterizer/core/backend.cpp @@ -101,21 +101,59 @@ void ClearRasterTile(uint8_t *pTileBuffer, simdvector &value) auto lambda = [&](int32_t comp) { FormatTraits::storeSOA(comp, pTileBuffer, value.v[comp]); + pTileBuffer += (KNOB_SIMD_WIDTH * FormatTraits::GetBPC(comp) / 8); }; const uint32_t numIter = (KNOB_TILE_Y_DIM / SIMD_TILE_Y_DIM) * (KNOB_TILE_X_DIM / SIMD_TILE_X_DIM); + for (uint32_t i = 0; i < numIter; ++i) { UnrollerL<0, FormatTraits::numComps, 1>::step(lambda); } } +#if USE_8x2_TILE_BACKEND +template +void ClearRasterTile(uint8_t *pTileBuffer, simd16vector &value) +{ + auto lambda = [&](int32_t comp) + { + FormatTraits::storeSOA(comp, pTileBuffer, value.v[comp]); + + pTileBuffer += (KNOB_SIMD16_WIDTH * FormatTraits::GetBPC(comp) / 8); + }; + + const uint32_t numIter = (KNOB_TILE_Y_DIM / SIMD16_TILE_Y_DIM) * (KNOB_TILE_X_DIM / SIMD16_TILE_X_DIM); + + for (uint32_t i = 0; i < numIter; ++i) + { + UnrollerL<0, FormatTraits::numComps, 1>::step(lambda); + } +} + +#endif template INLINE void ClearMacroTile(DRAW_CONTEXT *pDC, SWR_RENDERTARGET_ATTACHMENT rt, uint32_t macroTile, DWORD clear[4], const SWR_RECT& rect) { // convert clear color to hottile format // clear color is in RGBA float/uint32 +#if USE_8x2_TILE_BACKEND + simd16vector vClear; + for (uint32_t comp = 0; comp < FormatTraits::numComps; ++comp) + { + simd16scalar vComp; + vComp = _simd16_load1_ps((const float*)&clear[comp]); + if (FormatTraits::isNormalized(comp)) + { + vComp = _simd16_mul_ps(vComp, _simd16_set1_ps(FormatTraits::fromFloat(comp))); + vComp = _simd16_castsi_ps(_simd16_cvtps_epi32(vComp)); + } + vComp = FormatTraits::pack(comp, vComp); + vClear.v[FormatTraits::swizzle(comp)] = vComp; + } + +#else simdvector vClear; for (uint32_t comp = 0; comp < FormatTraits::numComps; ++comp) { @@ -130,6 +168,7 @@ INLINE void ClearMacroTile(DRAW_CONTEXT *pDC, SWR_RENDERTARGET_ATTACHMENT rt, ui vClear.v[FormatTraits::swizzle(comp)] = vComp; } +#endif uint32_t tileX, tileY; MacroTileMgr::getTileIndices(macroTile, tileX, tileY); @@ -471,6 +510,10 @@ void BackendSingleSample(DRAW_CONTEXT *pDC, uint32_t workerId, uint32_t x, uint3 for(uint32_t xx = x; xx < x + KNOB_TILE_X_DIM; xx += SIMD_TILE_X_DIM) { +#if USE_8x2_TILE_BACKEND + const bool useAlternateOffset = ((xx & SIMD_TILE_X_DIM) != 0); + +#endif if(coverageMask & MASK) { psContext.vX.UL = _simd_add_ps(vULOffsetsX, _simd_set1_ps((float)xx)); @@ -578,7 +621,11 @@ void BackendSingleSample(DRAW_CONTEXT *pDC, uint32_t workerId, uint32_t x, uint3 // output merger AR_BEGIN(BEOutputMerger, pDC->drawId); +#if USE_8x2_TILE_BACKEND + OutputMerger(psContext, pColorBase, 0, pBlendState, state.pfnBlendFunc, vCoverageMask, depthPassMask, pPSState->numRenderTargets, useAlternateOffset); +#else OutputMerger(psContext, pColorBase, 0, pBlendState, state.pfnBlendFunc, vCoverageMask, depthPassMask, pPSState->numRenderTargets); +#endif // do final depth write after all pixel kills if (!pPSState->forceEarlyZ) @@ -599,10 +646,20 @@ Endtile: pDepthBase += (KNOB_SIMD_WIDTH * FormatTraits::bpp) / 8; pStencilBase += (KNOB_SIMD_WIDTH * FormatTraits::bpp) / 8; - for(uint32_t rt = 0; rt < NumRT; ++rt) +#if USE_8x2_TILE_BACKEND + if (useAlternateOffset) + { + for (uint32_t rt = 0; rt < NumRT; ++rt) + { + pColorBase[rt] += (2 * KNOB_SIMD_WIDTH * FormatTraits::bpp) / 8; + } + } +#else + for (uint32_t rt = 0; rt < NumRT; ++rt) { pColorBase[rt] += (KNOB_SIMD_WIDTH * FormatTraits::bpp) / 8; } +#endif AR_END(BEEndTile, 0); } } @@ -675,6 +732,10 @@ void BackendSampleRate(DRAW_CONTEXT *pDC, uint32_t workerId, uint32_t x, uint32_ for (uint32_t xx = x; xx < x + KNOB_TILE_X_DIM; xx += SIMD_TILE_X_DIM) { +#if USE_8x2_TILE_BACKEND + const bool useAlternateOffset = ((xx & SIMD_TILE_X_DIM) != 0); + +#endif psContext.vX.UL = _simd_add_ps(vULOffsetsX, _simd_set1_ps((float)xx)); // pixel center psContext.vX.center = _simd_add_ps(vCenterOffsetsX, _simd_set1_ps((float)xx)); @@ -814,7 +875,11 @@ void BackendSampleRate(DRAW_CONTEXT *pDC, uint32_t workerId, uint32_t x, uint32_ // output merger AR_BEGIN(BEOutputMerger, pDC->drawId); +#if USE_8x2_TILE_BACKEND + OutputMerger(psContext, pColorBase, sample, pBlendState, state.pfnBlendFunc, vCoverageMask, depthPassMask, pPSState->numRenderTargets, useAlternateOffset); +#else OutputMerger(psContext, pColorBase, sample, pBlendState, state.pfnBlendFunc, vCoverageMask, depthPassMask, pPSState->numRenderTargets); +#endif // do final depth write after all pixel kills if (!pPSState->forceEarlyZ) @@ -837,10 +902,20 @@ Endtile: pDepthBase += (KNOB_SIMD_WIDTH * FormatTraits::bpp) / 8; pStencilBase += (KNOB_SIMD_WIDTH * FormatTraits::bpp) / 8; +#if USE_8x2_TILE_BACKEND + if (useAlternateOffset) + { + for (uint32_t rt = 0; rt < NumRT; ++rt) + { + pColorBase[rt] += (2 * KNOB_SIMD_WIDTH * FormatTraits::bpp) / 8; + } + } +#else for (uint32_t rt = 0; rt < NumRT; ++rt) { pColorBase[rt] += (KNOB_SIMD_WIDTH * FormatTraits::bpp) / 8; } +#endif AR_END(BEEndTile, 0); } } @@ -913,6 +988,10 @@ void BackendPixelRate(DRAW_CONTEXT *pDC, uint32_t workerId, uint32_t x, uint32_t psContext.vY.center = _simd_add_ps(vCenterOffsetsY, _simd_set1_ps((float)yy)); for(uint32_t xx = x; xx < x + KNOB_TILE_X_DIM; xx += SIMD_TILE_X_DIM) { +#if USE_8x2_TILE_BACKEND + const bool useAlternateOffset = ((xx & SIMD_TILE_X_DIM) != 0); + +#endif simdscalar activeLanes; if(!(work.anyCoveredSamples & MASK)) {goto Endtile;}; activeLanes = vMask(work.anyCoveredSamples & MASK); @@ -1030,7 +1109,11 @@ void BackendPixelRate(DRAW_CONTEXT *pDC, uint32_t workerId, uint32_t x, uint32_t } // broadcast the results of the PS to all passing pixels +#if USE_8x2_TILE_BACKEND + OutputMerger(psContext, pColorBase, sample, pBlendState, state.pfnBlendFunc, coverageMask, depthMask, pPSState->numRenderTargets, useAlternateOffset); +#else OutputMerger(psContext, pColorBase, sample, pBlendState, state.pfnBlendFunc, coverageMask, depthMask, pPSState->numRenderTargets); +#endif if(!pPSState->forceEarlyZ && !T::bForcedSampleCount) { @@ -1057,10 +1140,20 @@ Endtile: pDepthBase += (KNOB_SIMD_WIDTH * FormatTraits::bpp) / 8; pStencilBase += (KNOB_SIMD_WIDTH * FormatTraits::bpp) / 8; +#if USE_8x2_TILE_BACKEND + if (useAlternateOffset) + { + for (uint32_t rt = 0; rt < NumRT; ++rt) + { + pColorBase[rt] += (2 * KNOB_SIMD_WIDTH * FormatTraits::bpp) / 8; + } + } +#else for(uint32_t rt = 0; rt < NumRT; ++rt) { pColorBase[rt] += (KNOB_SIMD_WIDTH * FormatTraits::bpp) / 8; } +#endif AR_END(BEEndTile, 0); } } diff --git a/src/gallium/drivers/swr/rasterizer/core/backend.h b/src/gallium/drivers/swr/rasterizer/core/backend.h index fcc78f71afd..53222eabccd 100644 --- a/src/gallium/drivers/swr/rasterizer/core/backend.h +++ b/src/gallium/drivers/swr/rasterizer/core/backend.h @@ -643,6 +643,78 @@ INLINE void OutputMerger(SWR_PS_CONTEXT &psContext, uint8_t* (&pColorBase)[SWR_N } } +#if USE_8x2_TILE_BACKEND +INLINE void OutputMerger(SWR_PS_CONTEXT &psContext, uint8_t* (&pColorBase)[SWR_NUM_RENDERTARGETS], uint32_t sample, const SWR_BLEND_STATE *pBlendState, + const PFN_BLEND_JIT_FUNC(&pfnBlendFunc)[SWR_NUM_RENDERTARGETS], simdscalar &coverageMask, simdscalar depthPassMask, const uint32_t NumRT, bool useAlternateOffset) +{ + assert(sample == 0); // will need up upate Raster Tile Color Offsets to support more than single sample here.. + + // type safety guaranteed from template instantiation in BEChooser<>::GetFunc + uint32_t rasterTileColorOffset = RasterTileColorOffset(sample); + + if (useAlternateOffset) + { + rasterTileColorOffset += sizeof(simdscalar); + } + + simdvector blendSrc; + simdvector blendOut; + + for (uint32_t rt = 0; rt < NumRT; ++rt) + { + simdscalar *pColorSample = reinterpret_cast(pColorBase[rt] + rasterTileColorOffset); + + const SWR_RENDER_TARGET_BLEND_STATE *pRTBlend = &pBlendState->renderTarget[rt]; + // pfnBlendFunc may not update all channels. Initialize with PS output. + /// TODO: move this into the blend JIT. + blendOut = psContext.shaded[rt]; + + blendSrc[0] = pColorSample[0]; + blendSrc[1] = pColorSample[2]; + blendSrc[2] = pColorSample[4]; + blendSrc[3] = pColorSample[6]; + + // Blend outputs and update coverage mask for alpha test + if (pfnBlendFunc[rt] != nullptr) + { + pfnBlendFunc[rt]( + pBlendState, + psContext.shaded[rt], + psContext.shaded[1], + sample, + reinterpret_cast(&blendSrc), + blendOut, + &psContext.oMask, + reinterpret_cast(&coverageMask)); + } + + // final write mask + simdscalari outputMask = _simd_castps_si(_simd_and_ps(coverageMask, depthPassMask)); + + ///@todo can only use maskstore fast path if bpc is 32. Assuming hot tile is RGBA32_FLOAT. + static_assert(KNOB_COLOR_HOT_TILE_FORMAT == R32G32B32A32_FLOAT, "Unsupported hot tile format"); + + // store with color mask + if (!pRTBlend->writeDisableRed) + { + _simd_maskstore_ps(reinterpret_cast(&pColorSample[0]), outputMask, blendOut.x); + } + if (!pRTBlend->writeDisableGreen) + { + _simd_maskstore_ps(reinterpret_cast(&pColorSample[2]), outputMask, blendOut.y); + } + if (!pRTBlend->writeDisableBlue) + { + _simd_maskstore_ps(reinterpret_cast(&pColorSample[4]), outputMask, blendOut.z); + } + if (!pRTBlend->writeDisableAlpha) + { + _simd_maskstore_ps(reinterpret_cast(&pColorSample[6]), outputMask, blendOut.w); + } + } +} + +#endif template struct SwrBackendTraits diff --git a/src/gallium/drivers/swr/rasterizer/core/format_conversion.h b/src/gallium/drivers/swr/rasterizer/core/format_conversion.h index 344758eefe5..63df2ab2910 100644 --- a/src/gallium/drivers/swr/rasterizer/core/format_conversion.h +++ b/src/gallium/drivers/swr/rasterizer/core/format_conversion.h @@ -194,3 +194,173 @@ INLINE void StoreSOA(const simdvector &src, uint8_t *pDst) UnrollerL<0, FormatTraits::numComps, 1>::step(lambda); } + +#if ENABLE_AVX512_SIMD16 +////////////////////////////////////////////////////////////////////////// +/// @brief Load SIMD packed pixels in SOA format and converts to +/// SOA RGBA32_FLOAT format. +/// @param pSrc - source data in SOA form +/// @param dst - output data in SOA form +template +INLINE void LoadSOA(const uint8_t *pSrc, simd16vector &dst) +{ + // fast path for float32 + if ((FormatTraits::GetType(0) == SWR_TYPE_FLOAT) && (FormatTraits::GetBPC(0) == 32)) + { + auto lambda = [&](int comp) + { + simd16scalar vComp = _simd16_load_ps(reinterpret_cast(pSrc + comp * sizeof(simd16scalar))); + + dst.v[FormatTraits::swizzle(comp)] = vComp; + }; + + UnrollerL<0, FormatTraits::numComps, 1>::step(lambda); + return; + } + + auto lambda = [&](int comp) + { + // load SIMD components + simd16scalar vComp = FormatTraits::loadSOA_16(comp, pSrc); + + // unpack + vComp = FormatTraits::unpack(comp, vComp); + + // convert + if (FormatTraits::isNormalized(comp)) + { + vComp = _simd16_cvtepi32_ps(_simd16_castps_si(vComp)); + vComp = _simd16_mul_ps(vComp, _simd16_set1_ps(FormatTraits::toFloat(comp))); + } + + dst.v[FormatTraits::swizzle(comp)] = vComp; + + pSrc += (FormatTraits::GetBPC(comp) * KNOB_SIMD16_WIDTH) / 8; + }; + + UnrollerL<0, FormatTraits::numComps, 1>::step(lambda); +} + +////////////////////////////////////////////////////////////////////////// +/// @brief Clamps the given component based on the requirements on the +/// Format template arg +/// @param vComp - SIMD vector of floats +/// @param Component - component +template +INLINE simd16scalar Clamp(simd16scalar vComp, uint32_t Component) +{ + if (FormatTraits::isNormalized(Component)) + { + if (FormatTraits::GetType(Component) == SWR_TYPE_UNORM) + { + vComp = _simd16_max_ps(vComp, _simd16_setzero_ps()); + } + + if (FormatTraits::GetType(Component) == SWR_TYPE_SNORM) + { + vComp = _simd16_max_ps(vComp, _simd16_set1_ps(-1.0f)); + } + vComp = _simd16_min_ps(vComp, _simd16_set1_ps(1.0f)); + } + else if (FormatTraits::GetBPC(Component) < 32) + { + if (FormatTraits::GetType(Component) == SWR_TYPE_UINT) + { + int iMax = (1 << FormatTraits::GetBPC(Component)) - 1; + int iMin = 0; + simd16scalari vCompi = _simd16_castps_si(vComp); + vCompi = _simd16_max_epu32(vCompi, _simd16_set1_epi32(iMin)); + vCompi = _simd16_min_epu32(vCompi, _simd16_set1_epi32(iMax)); + vComp = _simd16_castsi_ps(vCompi); + } + else if (FormatTraits::GetType(Component) == SWR_TYPE_SINT) + { + int iMax = (1 << (FormatTraits::GetBPC(Component) - 1)) - 1; + int iMin = -1 - iMax; + simd16scalari vCompi = _simd16_castps_si(vComp); + vCompi = _simd16_max_epi32(vCompi, _simd16_set1_epi32(iMin)); + vCompi = _simd16_min_epi32(vCompi, _simd16_set1_epi32(iMax)); + vComp = _simd16_castsi_ps(vCompi); + } + } + + return vComp; +} + +////////////////////////////////////////////////////////////////////////// +/// @brief Normalize the given component based on the requirements on the +/// Format template arg +/// @param vComp - SIMD vector of floats +/// @param Component - component +template +INLINE simd16scalar Normalize(simd16scalar vComp, uint32_t Component) +{ + if (FormatTraits::isNormalized(Component)) + { + vComp = _simd16_mul_ps(vComp, _simd16_set1_ps(FormatTraits::fromFloat(Component))); + vComp = _simd16_castsi_ps(_simd16_cvtps_epi32(vComp)); + } + return vComp; +} + +////////////////////////////////////////////////////////////////////////// +/// @brief Convert and store simdvector of pixels in SOA +/// RGBA32_FLOAT to SOA format +/// @param src - source data in SOA form +/// @param dst - output data in SOA form +template +INLINE void StoreSOA(const simd16vector &src, uint8_t *pDst) +{ + // fast path for float32 + if ((FormatTraits::GetType(0) == SWR_TYPE_FLOAT) && (FormatTraits::GetBPC(0) == 32)) + { + for (uint32_t comp = 0; comp < FormatTraits::numComps; ++comp) + { + simd16scalar vComp = src.v[FormatTraits::swizzle(comp)]; + + // Gamma-correct + if (FormatTraits::isSRGB) + { + if (comp < 3) // Input format is always RGBA32_FLOAT. + { + vComp = FormatTraits::convertSrgb(comp, vComp); + } + } + + _simd16_store_ps(reinterpret_cast(pDst + comp * sizeof(simd16scalar)), vComp); + } + return; + } + + auto lambda = [&](int comp) + { + simd16scalar vComp = src.v[FormatTraits::swizzle(comp)]; + + // Gamma-correct + if (FormatTraits::isSRGB) + { + if (comp < 3) // Input format is always RGBA32_FLOAT. + { + vComp = FormatTraits::convertSrgb(comp, vComp); + } + } + + // clamp + vComp = Clamp(vComp, comp); + + // normalize + vComp = Normalize(vComp, comp); + + // pack + vComp = FormatTraits::pack(comp, vComp); + + // store + FormatTraits::storeSOA(comp, pDst, vComp); + + pDst += (FormatTraits::GetBPC(comp) * KNOB_SIMD16_WIDTH) / 8; + }; + + UnrollerL<0, FormatTraits::numComps, 1>::step(lambda); +} + +#endif diff --git a/src/gallium/drivers/swr/rasterizer/core/format_types.h b/src/gallium/drivers/swr/rasterizer/core/format_types.h index 5deed2e8f20..fcb137d52df 100644 --- a/src/gallium/drivers/swr/rasterizer/core/format_types.h +++ b/src/gallium/drivers/swr/rasterizer/core/format_types.h @@ -40,6 +40,12 @@ struct PackTraits static void storeSOA(uint8_t *pDst, simdscalar src) = delete; static simdscalar unpack(simdscalar &in) = delete; static simdscalar pack(simdscalar &in) = delete; +#if ENABLE_AVX512_SIMD16 + static simd16scalar loadSOA_16(const uint8_t *pSrc) = delete; + static void storeSOA(uint8_t *pDst, simd16scalar src) = delete; + static simd16scalar unpack(simd16scalar &in) = delete; + static simd16scalar pack(simd16scalar &in) = delete; +#endif }; ////////////////////////////////////////////////////////////////////////// @@ -54,9 +60,14 @@ struct PackTraits<0, false> static void storeSOA(uint8_t *pDst, simdscalar src) { return; } static simdscalar unpack(simdscalar &in) { return _simd_setzero_ps(); } static simdscalar pack(simdscalar &in) { return _simd_setzero_ps(); } +#if ENABLE_AVX512_SIMD16 + static simd16scalar loadSOA_16(const uint8_t *pSrc) { return _simd16_setzero_ps(); } + static void storeSOA(uint8_t *pDst, simd16scalar src) { return; } + static simd16scalar unpack(simd16scalar &in) { return _simd16_setzero_ps(); } + static simd16scalar pack(simd16scalar &in) { return _simd16_setzero_ps(); } +#endif }; - ////////////////////////////////////////////////////////////////////////// /// PackTraits - Helpers for packing / unpacking 8 bit unsigned channels ////////////////////////////////////////////////////////////////////////// @@ -123,7 +134,7 @@ struct PackTraits<8, false> result.lo = _mm256_cvtepu8_epi32(src); - result.hi = _mm256_cvtepu8_epi32(_mm_bsrli_si128(src, 8)); + result.hi = _mm256_cvtepu8_epi32(_mm_srli_si128(src, 8)); return _simd_castsi_ps(result); #endif @@ -157,6 +168,55 @@ struct PackTraits<8, false> #error Unsupported vector width #endif } +#if ENABLE_AVX512_SIMD16 + + static simd16scalar loadSOA_16(const uint8_t *pSrc) + { + simd16scalar result = _simd16_setzero_ps(); + simdscalar resultlo = _simd_setzero_ps(); + + const __m128 src = _mm_load_ps(reinterpret_cast(pSrc)); + + resultlo = _mm256_insertf128_ps(resultlo, src, 0); + result = _simd16_insert_ps(result, resultlo, 0); + + return result; + } + + static void storeSOA(uint8_t *pDst, simd16scalar src) + { + // store simd16 bytes + _mm_store_ps(reinterpret_cast(pDst), _mm256_castps256_ps128(_simd16_extract_ps(src, 0))); + } + + static simd16scalar unpack(simd16scalar &in) + { + simd16scalari result = _simd16_setzero_si(); + + __m128i src = _mm_castps_si128(_mm256_castps256_ps128(_simd16_extract_ps(in, 0))); + + result = _simd16_insert_si(result, _simd_cvtepu8_epi32(src), 0); + result = _simd16_insert_si(result, _simd_cvtepu8_epi32(_mm_srli_si128(src, 8)), 1); + + return _simd16_castsi_ps(result); + } + + static simd16scalar pack(simd16scalar &in) + { + simd16scalari result = _simd16_setzero_si(); + simdscalari resultlo = _simd_setzero_si(); + + __m128i templo = _mm_packus_epi32(_mm256_castsi256_si128(_mm256_castps_si256(_simd16_extract_ps(in, 0))), _mm256_extractf128_si256(_mm256_castps_si256(_simd16_extract_ps(in, 0)), 1)); + __m128i temphi = _mm_packus_epi32(_mm256_castsi256_si128(_mm256_castps_si256(_simd16_extract_ps(in, 1))), _mm256_extractf128_si256(_mm256_castps_si256(_simd16_extract_ps(in, 1)), 1)); + + __m128i temp = _mm_packus_epi16(templo, temphi); + + resultlo = _mm256_inserti128_si256(resultlo, temp, 0); + result = _simd16_insert_si(result, resultlo, 0); + + return _simd16_castsi_ps(result); + } +#endif }; ////////////////////////////////////////////////////////////////////////// @@ -226,7 +286,7 @@ struct PackTraits<8, true> result.lo = _mm256_cvtepu8_epi32(src); - result.hi = _mm256_cvtepu8_epi32(_mm_bsrli_si128(src, 8)); + result.hi = _mm256_cvtepu8_epi32(_mm_srli_si128(src, 8)); return _simd_castsi_ps(result); #endif @@ -260,6 +320,55 @@ struct PackTraits<8, true> #error Unsupported vector width #endif } +#if ENABLE_AVX512_SIMD16 + + static simd16scalar loadSOA_16(const uint8_t *pSrc) + { + simd16scalar result = _simd16_setzero_ps(); + simdscalar resultlo = _simd_setzero_ps(); + + const __m128 src = _mm_load_ps(reinterpret_cast(pSrc)); + + resultlo = _mm256_insertf128_ps(resultlo, src, 0); + result = _simd16_insert_ps(result, resultlo, 0); + + return result; + } + + static void storeSOA(uint8_t *pDst, simd16scalar src) + { + // store simd16 bytes + _mm_store_ps(reinterpret_cast(pDst), _mm256_castps256_ps128(_simd16_extract_ps(src, 0))); + } + + static simd16scalar unpack(simd16scalar &in) + { + simd16scalari result = _simd16_setzero_si(); + + __m128i src = _mm_castps_si128(_mm256_castps256_ps128(_simd16_extract_ps(in, 0))); + + result = _simd16_insert_si(result, _simd_cvtepu8_epi32(src), 0); + result = _simd16_insert_si(result, _simd_cvtepu8_epi32(_mm_srli_si128(src, 8)), 1); + + return _simd16_castsi_ps(result); + } + + static simd16scalar pack(simd16scalar &in) + { + simd16scalari result = _simd16_setzero_si(); + simdscalari resultlo = _simd_setzero_si(); + + __m128i templo = _mm_packs_epi32(_mm256_castsi256_si128(_mm256_castps_si256(_simd16_extract_ps(in, 0))), _mm256_extractf128_si256(_mm256_castps_si256(_simd16_extract_ps(in, 0)), 1)); + __m128i temphi = _mm_packs_epi32(_mm256_castsi256_si128(_mm256_castps_si256(_simd16_extract_ps(in, 1))), _mm256_extractf128_si256(_mm256_castps_si256(_simd16_extract_ps(in, 1)), 1)); + + __m128i temp = _mm_packs_epi16(templo, temphi); + + resultlo = _mm256_inserti128_si256(resultlo, temp, 0); + result = _simd16_insert_si(result, resultlo, 0); + + return _simd16_castsi_ps(result); + } +#endif }; ////////////////////////////////////////////////////////////////////////// @@ -360,6 +469,49 @@ struct PackTraits<16, false> #error Unsupported vector width #endif } +#if ENABLE_AVX512_SIMD16 + + static simd16scalar loadSOA_16(const uint8_t *pSrc) + { + simd16scalar result = _simd16_setzero_ps(); + + simdscalar resultlo = _simd_load_ps(reinterpret_cast(pSrc)); + + result = _simd16_insert_ps(result, resultlo, 0); + + return result; + } + + static void storeSOA(uint8_t *pDst, simd16scalar src) + { + _simd_store_ps(reinterpret_cast(pDst), _simd16_extract_ps(src, 0)); + } + + static simd16scalar unpack(simd16scalar &in) + { + simd16scalari result = _simd16_setzero_si(); + + result = _simd16_insert_si(result, _simd_cvtepu16_epi32(_mm256_extracti128_si256(_mm256_castps_si256(_simd16_extract_ps(in, 0)), 0)), 0); + result = _simd16_insert_si(result, _simd_cvtepu16_epi32(_mm256_extracti128_si256(_mm256_castps_si256(_simd16_extract_ps(in, 0)), 1)), 1); + + return _simd16_castsi_ps(result); + } + + static simd16scalar pack(simd16scalar &in) + { + simd16scalari result = _simd16_setzero_si(); + + simdscalari inlo = _simd_castps_si(_simd16_extract_ps(in, 0)); + simdscalari inhi = _simd_castps_si(_simd16_extract_ps(in, 1)); + + simdscalari templo = _simd_permute2f128_si(inlo, inhi, 0x20); + simdscalari temphi = _simd_permute2f128_si(inlo, inhi, 0x31); + + result = _simd16_insert_si(result, _simd_packus_epi32(templo, temphi), 0); + + return _simd16_castsi_ps(result); + } +#endif }; ////////////////////////////////////////////////////////////////////////// @@ -461,6 +613,49 @@ struct PackTraits<16, true> #error Unsupported vector width #endif } +#if ENABLE_AVX512_SIMD16 + + static simd16scalar loadSOA_16(const uint8_t *pSrc) + { + simd16scalar result = _simd16_setzero_ps(); + + simdscalar resultlo = _simd_load_ps(reinterpret_cast(pSrc)); + + result = _simd16_insert_ps(result, resultlo, 0); + + return result; + } + + static void storeSOA(uint8_t *pDst, simd16scalar src) + { + _simd_store_ps(reinterpret_cast(pDst), _simd16_extract_ps(src, 0)); + } + + static simd16scalar unpack(simd16scalar &in) + { + simd16scalari result = _simd16_setzero_si(); + + result = _simd16_insert_si(result, _simd_cvtepu16_epi32(_mm256_extracti128_si256(_mm256_castps_si256(_simd16_extract_ps(in, 0)), 0)), 0); + result = _simd16_insert_si(result, _simd_cvtepu16_epi32(_mm256_extracti128_si256(_mm256_castps_si256(_simd16_extract_ps(in, 0)), 1)), 1); + + return _simd16_castsi_ps(result); + } + + static simd16scalar pack(simd16scalar &in) + { + simd16scalari result = _simd16_setzero_si(); + + simdscalari inlo = _simd_castps_si(_simd16_extract_ps(in, 0)); + simdscalari inhi = _simd_castps_si(_simd16_extract_ps(in, 1)); + + simdscalari templo = _simd_permute2f128_si(inlo, inhi, 0x20); + simdscalari temphi = _simd_permute2f128_si(inlo, inhi, 0x31); + + result = _simd16_insert_si(result, _simd_packus_epi32(templo, temphi), 0); + + return _simd16_castsi_ps(result); + } +#endif }; ////////////////////////////////////////////////////////////////////////// @@ -475,6 +670,28 @@ struct PackTraits<32, false> static void storeSOA(uint8_t *pDst, simdscalar src) { _simd_store_ps((float*)pDst, src); } static simdscalar unpack(simdscalar &in) { return in; } static simdscalar pack(simdscalar &in) { return in; } +#if ENABLE_AVX512_SIMD16 + + static simd16scalar loadSOA_16(const uint8_t *pSrc) + { + return _simd16_load_ps(reinterpret_cast(pSrc)); + } + + static void storeSOA(uint8_t *pDst, simd16scalar src) + { + _simd16_store_ps(reinterpret_cast(pDst), src); + } + + static simd16scalar unpack(simd16scalar &in) + { + return in; + } + + static simd16scalar pack(simd16scalar &in) + { + return in; + } +#endif }; ////////////////////////////////////////////////////////////////////////// @@ -689,10 +906,10 @@ inline static __m128 powf_wrapper(__m128 Base, float Exp) { float *f = (float *)(&Base); - return _mm_set_ps(powf(f[0], Exp), - powf(f[1], Exp), + return _mm_set_ps(powf(f[3], Exp), powf(f[2], Exp), - powf(f[3], Exp)); + powf(f[1], Exp), + powf(f[0], Exp)); } static inline __m128 ConvertFloatToSRGB2(__m128& Src) @@ -756,6 +973,115 @@ static inline __m128 ConvertFloatToSRGB2(__m128& Src) return Result; } +#if ENABLE_AVX512_SIMD16 +template< unsigned expnum, unsigned expden, unsigned coeffnum, unsigned coeffden > +inline static simd16scalar fastpow(simd16scalar value) +{ + static const float factor1 = exp2(127.0f * expden / expnum - 127.0f) + * powf(1.0f * coeffnum / coeffden, 1.0f * expden / expnum); + + // Apply a constant pre-correction factor. + simd16scalar result = _simd16_mul_ps(value, _simd16_set1_ps(factor1)); + + // Reinterpret arg as integer to obtain logarithm. + //asm("cvtdq2ps %1, %0" : "=x" (result) : "x" (result)); + result = _simd16_cvtepi32_ps(_simd16_castps_si(result)); + + // Multiply logarithm by power. + result = _simd16_mul_ps(result, _simd16_set1_ps(1.0f * expnum / expden)); + + // Convert back to "integer" to exponentiate. + //asm("cvtps2dq %1, %0" : "=x" (result) : "x" (result)); + result = _simd16_castsi_ps(_simd16_cvtps_epi32(result)); + + return result; +} + +inline static simd16scalar pow512_4(simd16scalar arg) +{ + // 5/12 is too small, so compute the 4th root of 20/12 instead. + // 20/12 = 5/3 = 1 + 2/3 = 2 - 1/3. 2/3 is a suitable argument for fastpow. + // weighting coefficient: a^-1/2 = 2 a; a = 2^-2/3 + simd16scalar xf = fastpow< 2, 3, int(0.629960524947437 * 1e9), int(1e9) >(arg); + simd16scalar xover = _simd16_mul_ps(arg, xf); + + simd16scalar xfm1 = _simd16_rsqrt_ps(xf); + simd16scalar x2 = _simd16_mul_ps(arg, arg); + simd16scalar xunder = _simd16_mul_ps(x2, xfm1); + + // sqrt2 * over + 2 * sqrt2 * under + simd16scalar xavg = _simd16_mul_ps(_simd16_set1_ps(1.0f / (3.0f * 0.629960524947437f) * 0.999852f), _simd16_add_ps(xover, xunder)); + + xavg = _simd16_mul_ps(xavg, _simd16_rsqrt_ps(xavg)); + xavg = _simd16_mul_ps(xavg, _simd16_rsqrt_ps(xavg)); + + return xavg; +} + +inline static simd16scalar powf_wrapper(const simd16scalar base, float exp) +{ + const float *f = reinterpret_cast(&base); + + return _simd16_set_ps( + powf(f[15], exp), + powf(f[14], exp), + powf(f[13], exp), + powf(f[12], exp), + powf(f[11], exp), + powf(f[10], exp), + powf(f[ 9], exp), + powf(f[ 8], exp), + powf(f[ 7], exp), + powf(f[ 6], exp), + powf(f[ 5], exp), + powf(f[ 4], exp), + powf(f[ 3], exp), + powf(f[ 2], exp), + powf(f[ 1], exp), + powf(f[ 0], exp) + ); +} + +// float to SRGB conversion formula +// +// if (value < 0.0031308f) +// value *= 12.92f; +// else +// value = 1.055f * pow(value, 1.0f / 2.4f) - 0.055f; +// +static inline simd16scalar ConvertFloatToSRGB2(const simd16scalar &value) +{ + // create a mask where the source is < the minimal SRGB float value + const simd16mask mask = _simd16_cmplt_ps_mask(value, _simd16_set1_ps(0.0031308f)); + + // if all elements are < the threshold, result = value * 12.92 + simd16scalar result = _simd16_mul_ps(value, _simd16_set1_ps(12.92f)); + + if (_simd16_mask2int(mask) != 0xFFFF) + { + // some elements are >= threshold, result = 1.055 * power(value, 1.0 / 2.4) - 0.055 +#if KNOB_USE_FAST_SRGB == TRUE + // 1.0f / 2.4f is 5.0f / 12.0f which is used for approximation. + simd16scalar result2 = pow512_4(value); +#else + simd16scalar result2 = powf_wrapper(value, 1.0f / 2.4f); +#endif + + result2 = _simd16_mul_ps(result2, _simd16_set1_ps(1.055f)); + result2 = _simd16_sub_ps(result2, _simd16_set1_ps(0.055f)); + +#if (KNOB_ARCH == KNOB_ARCH_AVX512) + // only native AVX512 can directly use the computed mask for the blend operation + result = _mm512_mask_blend_ps(mask, result2, result); +#else + result = _simd16_blendv_ps(result2, result, _simd16_cmplt_ps(value, _simd16_set1_ps(0.0031308f))); +#endif + } + + return result; +} + +#endif ////////////////////////////////////////////////////////////////////////// /// TypeTraits - Format type traits specialization for FLOAT16 ////////////////////////////////////////////////////////////////////////// @@ -892,6 +1218,40 @@ simdscalari result; SWR_ASSERT(0); // @todo return _simd_setzero_ps(); } +#if ENABLE_AVX512_SIMD16 + + static simd16scalar pack(const simd16scalar &in) + { + simd16scalari result = _simd16_setzero_si(); + simdscalari resultlo = _simd_setzero_si(); + +#if (KNOB_ARCH == KNOB_ARCH_AVX) + simdscalar simdlo = pack(_simd16_extract_ps(in, 0)); + simdscalar simdhi = pack(_simd16_extract_ps(in, 1)); + + __m128i templo = _mm256_extractf128_si256(_simd_castps_si(simdlo), 0); + __m128i temphi = _mm256_extractf128_si256(_simd_castps_si(simdhi), 0); + +#else + __m128i templo = _mm256_cvtps_ph(_simd16_extract_ps(in, 0), _MM_FROUND_TRUNC); + __m128i temphi = _mm256_cvtps_ph(_simd16_extract_ps(in, 1), _MM_FROUND_TRUNC); + +#endif + resultlo = _mm256_insertf128_si256(resultlo, templo, 0); + resultlo = _mm256_insertf128_si256(resultlo, temphi, 1); + + result = _simd16_insert_si(result, resultlo, 0); + + return _simd16_castsi_ps(result); + } + + static simd16scalar unpack(const simd16scalar &in) + { + // input is 16 packed float16, output is 16 packed float32 + SWR_ASSERT(0); // @todo + return _simd16_setzero_ps(); + } +#endif }; ////////////////////////////////////////////////////////////////////////// @@ -937,6 +1297,13 @@ template<> struct TypeTraits : PackTraits<32> #endif return in; } +#if ENABLE_AVX512_SIMD16 + + static inline simd16scalar convertSrgb(simd16scalar &in) + { + return ConvertFloatToSRGB2(in); + } +#endif }; ////////////////////////////////////////////////////////////////////////// @@ -1265,4 +1632,95 @@ struct ComponentTraits SWR_ASSERT(0); return TypeTraits::convertSrgb(in); } +#if ENABLE_AVX512_SIMD16 + + INLINE static simd16scalar loadSOA_16(uint32_t comp, const uint8_t* pSrc) + { + switch (comp) + { + case 0: + return TypeTraits::loadSOA_16(pSrc); + case 1: + return TypeTraits::loadSOA_16(pSrc); + case 2: + return TypeTraits::loadSOA_16(pSrc); + case 3: + return TypeTraits::loadSOA_16(pSrc); + } + SWR_ASSERT(0); + return TypeTraits::loadSOA_16(pSrc); + } + + INLINE static void storeSOA(uint32_t comp, uint8_t *pDst, simd16scalar src) + { + switch (comp) + { + case 0: + TypeTraits::storeSOA(pDst, src); + return; + case 1: + TypeTraits::storeSOA(pDst, src); + return; + case 2: + TypeTraits::storeSOA(pDst, src); + return; + case 3: + TypeTraits::storeSOA(pDst, src); + return; + } + SWR_ASSERT(0); + TypeTraits::storeSOA(pDst, src); + } + + INLINE static simd16scalar unpack(uint32_t comp, simd16scalar &in) + { + switch (comp) + { + case 0: + return TypeTraits::unpack(in); + case 1: + return TypeTraits::unpack(in); + case 2: + return TypeTraits::unpack(in); + case 3: + return TypeTraits::unpack(in); + } + SWR_ASSERT(0); + return TypeTraits::unpack(in); + } + + INLINE static simd16scalar pack(uint32_t comp, simd16scalar &in) + { + switch (comp) + { + case 0: + return TypeTraits::pack(in); + case 1: + return TypeTraits::pack(in); + case 2: + return TypeTraits::pack(in); + case 3: + return TypeTraits::pack(in); + } + SWR_ASSERT(0); + return TypeTraits::pack(in); + } + + INLINE static simd16scalar convertSrgb(uint32_t comp, simd16scalar &in) + { + switch (comp) + { + case 0: + return TypeTraits::convertSrgb(in); + case 1: + return TypeTraits::convertSrgb(in); + case 2: + return TypeTraits::convertSrgb(in); + case 3: + return TypeTraits::convertSrgb(in); + } + SWR_ASSERT(0); + return TypeTraits::convertSrgb(in); + } +#endif }; diff --git a/src/gallium/drivers/swr/rasterizer/core/knobs.h b/src/gallium/drivers/swr/rasterizer/core/knobs.h index c01ad67f7c4..b1085268247 100644 --- a/src/gallium/drivers/swr/rasterizer/core/knobs.h +++ b/src/gallium/drivers/swr/rasterizer/core/knobs.h @@ -39,6 +39,7 @@ /////////////////////////////////////////////////////////////////////////////// #define ENABLE_AVX512_SIMD16 0 +#define USE_8x2_TILE_BACKEND 0 /////////////////////////////////////////////////////////////////////////////// // Architecture validation @@ -145,7 +146,7 @@ #endif #if ENABLE_AVX512_SIMD16 -#if KNOB_SIMD16_WIDTH==16 && KNOB_TILE_X_DIM < 4 +#if KNOB_SIMD16_WIDTH == 16 && KNOB_TILE_X_DIM < 8 #error "incompatible width/tile dimensions" #endif #endif @@ -162,8 +163,8 @@ #if ENABLE_AVX512_SIMD16 #if KNOB_SIMD16_WIDTH == 16 -#define SIMD16_TILE_X_DIM 4 -#define SIMD16_TILE_Y_DIM 4 +#define SIMD16_TILE_X_DIM 8 +#define SIMD16_TILE_Y_DIM 2 #else #error "Invalid simd width" #endif diff --git a/src/gallium/drivers/swr/rasterizer/core/tilemgr.cpp b/src/gallium/drivers/swr/rasterizer/core/tilemgr.cpp index bd189abb1a8..804fc4f2699 100644 --- a/src/gallium/drivers/swr/rasterizer/core/tilemgr.cpp +++ b/src/gallium/drivers/swr/rasterizer/core/tilemgr.cpp @@ -196,6 +196,88 @@ HOTTILE* HotTileMgr::GetHotTileNoLoad( return &hotTile; } +#if USE_8x2_TILE_BACKEND +void HotTileMgr::ClearColorHotTile(const HOTTILE* pHotTile) // clear a macro tile from float4 clear data. +{ + // Load clear color into SIMD register... + float *pClearData = (float *)(pHotTile->clearData); + simd16scalar valR = _simd16_broadcast_ss(&pClearData[0]); + simd16scalar valG = _simd16_broadcast_ss(&pClearData[1]); + simd16scalar valB = _simd16_broadcast_ss(&pClearData[2]); + simd16scalar valA = _simd16_broadcast_ss(&pClearData[3]); + + float *pfBuf = (float *)pHotTile->pBuffer; + uint32_t numSamples = pHotTile->numSamples; + + for (uint32_t row = 0; row < KNOB_MACROTILE_Y_DIM; row += KNOB_TILE_Y_DIM) + { + for (uint32_t col = 0; col < KNOB_MACROTILE_X_DIM; col += KNOB_TILE_X_DIM) + { + for (uint32_t si = 0; si < (KNOB_TILE_X_DIM * KNOB_TILE_Y_DIM * numSamples); si += SIMD16_TILE_X_DIM * SIMD16_TILE_Y_DIM) + { + _simd16_store_ps(pfBuf, valR); + pfBuf += KNOB_SIMD16_WIDTH; + + _simd16_store_ps(pfBuf, valG); + pfBuf += KNOB_SIMD16_WIDTH; + + _simd16_store_ps(pfBuf, valB); + pfBuf += KNOB_SIMD16_WIDTH; + + _simd16_store_ps(pfBuf, valA); + pfBuf += KNOB_SIMD16_WIDTH; + } + } + } +} + +void HotTileMgr::ClearDepthHotTile(const HOTTILE* pHotTile) // clear a macro tile from float4 clear data. +{ + // Load clear color into SIMD register... + float *pClearData = (float *)(pHotTile->clearData); + simd16scalar valZ = _simd16_broadcast_ss(&pClearData[0]); + + float *pfBuf = (float *)pHotTile->pBuffer; + uint32_t numSamples = pHotTile->numSamples; + + for (uint32_t row = 0; row < KNOB_MACROTILE_Y_DIM; row += KNOB_TILE_Y_DIM) + { + for (uint32_t col = 0; col < KNOB_MACROTILE_X_DIM; col += KNOB_TILE_X_DIM) + { + for (uint32_t si = 0; si < (KNOB_TILE_X_DIM * KNOB_TILE_Y_DIM * numSamples); si += SIMD16_TILE_X_DIM * SIMD16_TILE_Y_DIM) + { + _simd16_store_ps(pfBuf, valZ); + pfBuf += KNOB_SIMD16_WIDTH; + } + } + } +} + +void HotTileMgr::ClearStencilHotTile(const HOTTILE* pHotTile) +{ + // convert from F32 to U8. + uint8_t clearVal = (uint8_t)(pHotTile->clearData[0]); + //broadcast 32x into __m256i... + simd16scalari valS = _simd16_set1_epi8(clearVal); + + simd16scalari *pBuf = (simd16scalari *)pHotTile->pBuffer; + uint32_t numSamples = pHotTile->numSamples; + + for (uint32_t row = 0; row < KNOB_MACROTILE_Y_DIM; row += KNOB_TILE_Y_DIM) + { + for (uint32_t col = 0; col < KNOB_MACROTILE_X_DIM; col += KNOB_TILE_X_DIM) + { + // We're putting 4 pixels in each of the 32-bit slots, so increment 4 times as quickly. + for (uint32_t si = 0; si < (KNOB_TILE_X_DIM * KNOB_TILE_Y_DIM * numSamples); si += SIMD16_TILE_X_DIM * SIMD16_TILE_Y_DIM * 4) + { + _simd16_store_si(pBuf, valS); + pBuf += 1; + } + } + } +} + +#else void HotTileMgr::ClearColorHotTile(const HOTTILE* pHotTile) // clear a macro tile from float4 clear data. { // Load clear color into SIMD register... @@ -273,6 +355,7 @@ void HotTileMgr::ClearStencilHotTile(const HOTTILE* pHotTile) } } +#endif ////////////////////////////////////////////////////////////////////////// /// @brief InitializeHotTiles /// for draw calls, we initialize the active hot tiles and perform deferred diff --git a/src/gallium/drivers/swr/rasterizer/core/utils.h b/src/gallium/drivers/swr/rasterizer/core/utils.h index 96f061a78af..dd4fa3e74c0 100644 --- a/src/gallium/drivers/swr/rasterizer/core/utils.h +++ b/src/gallium/drivers/swr/rasterizer/core/utils.h @@ -245,6 +245,13 @@ struct TransposeSingleComponent { memcpy(pDst, pSrc, (bpp * KNOB_SIMD_WIDTH) / 8); } +#if ENABLE_AVX512_SIMD16 + + INLINE static void Transpose_16(const uint8_t* pSrc, uint8_t* pDst) + { + memcpy(pDst, pSrc, (bpp * KNOB_SIMD16_WIDTH) / 8); + } +#endif }; ////////////////////////////////////////////////////////////////////////// @@ -299,6 +306,27 @@ struct Transpose8_8_8_8 #error Unsupported vector width #endif } +#if ENABLE_AVX512_SIMD16 + + INLINE static void Transpose_16(const uint8_t* pSrc, uint8_t* pDst) + { + simd16scalari src = _simd16_load_si(reinterpret_cast(pSrc)); + + simd16scalari mask0 = _simd16_set_epi32(0x0f078080, 0x0e068080, 0x0d058080, 0x0c048080, 0x80800b03, 0x80800a02, 0x80800901, 0x80800800); + + simd16scalari dst01 = _simd16_shuffle_epi8(src, mask0); + + simd16scalari perm1 = _simd16_permute2f128_si(src, src, 1); + + simd16scalari mask1 = _simd16_set_epi32(0x80800f07, 0x80800e06, 0x80800d05, 0x80800c04, 0x0b038080, 0x0a028080, 0x09018080, 0x08008080); + + simd16scalari dst23 = _simd16_shuffle_epi8(perm1, mask1); + + simd16scalari dst = _simd16_or_si(dst01, dst23); + + _simd16_store_si(reinterpret_cast(pDst), dst); + } +#endif }; ////////////////////////////////////////////////////////////////////////// @@ -311,6 +339,10 @@ struct Transpose8_8_8 /// @param pSrc - source data in SOA form /// @param pDst - output data in AOS form INLINE static void Transpose(const uint8_t* pSrc, uint8_t* pDst) = delete; +#if ENABLE_AVX512_SIMD16 + + INLINE static void Transpose_16(const uint8_t* pSrc, uint8_t* pDst) = delete; +#endif }; ////////////////////////////////////////////////////////////////////////// @@ -345,6 +377,21 @@ struct Transpose8_8 #error Unsupported vector width #endif } +#if ENABLE_AVX512_SIMD16 + + INLINE static void Transpose_16(const uint8_t* pSrc, uint8_t* pDst) + { + __m256i src = _mm256_load_si256(reinterpret_cast(pSrc)); // rrrrrrrrrrrrrrrrgggggggggggggggg + + __m256i r = _mm256_permute4x64_epi64(src, 0x50); // 0x50 = 01010000b // rrrrrrrrxxxxxxxxrrrrrrrrxxxxxxxx + + __m256i g = _mm256_permute4x64_epi64(src, 0xFA); // 0xFA = 11111010b // ggggggggxxxxxxxxggggggggxxxxxxxx + + __m256i dst = _mm256_unpacklo_epi8(r, g); // rgrgrgrgrgrgrgrgrgrgrgrgrgrgrgrg + + _mm256_store_si256(reinterpret_cast<__m256i *>(pDst), dst); + } +#endif }; ////////////////////////////////////////////////////////////////////////// @@ -409,6 +456,50 @@ struct Transpose32_32_32_32 #error Unsupported vector width #endif } +#if ENABLE_AVX512_SIMD16 + + INLINE static void Transpose_16(const uint8_t* pSrc, uint8_t* pDst) + { + simd16scalar src0 = _simd16_load_ps(reinterpret_cast(pSrc)); + simd16scalar src1 = _simd16_load_ps(reinterpret_cast(pSrc) + 16); + simd16scalar src2 = _simd16_load_ps(reinterpret_cast(pSrc) + 32); + simd16scalar src3 = _simd16_load_ps(reinterpret_cast(pSrc) + 48); + + __m128 vDst[8]; + + vTranspose4x8(vDst, _simd16_extract_ps(src0, 0), _simd16_extract_ps(src1, 0), _simd16_extract_ps(src2, 0), _simd16_extract_ps(src3, 0)); + +#if 1 + _simd16_store_ps(reinterpret_cast(pDst) + 0, reinterpret_cast(vDst)[0]); + _simd16_store_ps(reinterpret_cast(pDst) + 16, reinterpret_cast(vDst)[1]); +#else + _mm_store_ps(reinterpret_cast(pDst), vDst[0]); + _mm_store_ps(reinterpret_cast(pDst) + 4, vDst[1]); + _mm_store_ps(reinterpret_cast(pDst) + 8, vDst[2]); + _mm_store_ps(reinterpret_cast(pDst) + 12, vDst[3]); + _mm_store_ps(reinterpret_cast(pDst) + 16, vDst[4]); + _mm_store_ps(reinterpret_cast(pDst) + 20, vDst[5]); + _mm_store_ps(reinterpret_cast(pDst) + 24, vDst[6]); + _mm_store_ps(reinterpret_cast(pDst) + 28, vDst[7]); +#endif + + vTranspose4x8(vDst, _simd16_extract_ps(src0, 1), _simd16_extract_ps(src1, 1), _simd16_extract_ps(src2, 1), _simd16_extract_ps(src3, 1)); + +#if 1 + _simd16_store_ps(reinterpret_cast(pDst) + 32, reinterpret_cast(vDst)[2]); + _simd16_store_ps(reinterpret_cast(pDst) + 48, reinterpret_cast(vDst)[3]); +#else + _mm_store_ps(reinterpret_cast(pDst) + 32, vDst[0]); + _mm_store_ps(reinterpret_cast(pDst) + 36, vDst[1]); + _mm_store_ps(reinterpret_cast(pDst) + 40, vDst[2]); + _mm_store_ps(reinterpret_cast(pDst) + 44, vDst[3]); + _mm_store_ps(reinterpret_cast(pDst) + 48, vDst[4]); + _mm_store_ps(reinterpret_cast(pDst) + 52, vDst[5]); + _mm_store_ps(reinterpret_cast(pDst) + 56, vDst[6]); + _mm_store_ps(reinterpret_cast(pDst) + 60, vDst[7]); +#endif + } +#endif }; ////////////////////////////////////////////////////////////////////////// @@ -471,6 +562,49 @@ struct Transpose32_32_32 #error Unsupported vector width #endif } +#if ENABLE_AVX512_SIMD16 + + INLINE static void Transpose_16(const uint8_t* pSrc, uint8_t* pDst) + { + simd16scalar src0 = _simd16_load_ps(reinterpret_cast(pSrc)); + simd16scalar src1 = _simd16_load_ps(reinterpret_cast(pSrc) + 16); + simd16scalar src2 = _simd16_load_ps(reinterpret_cast(pSrc) + 32); + + __m128 vDst[8]; + + vTranspose3x8(vDst, _simd16_extract_ps(src0, 0), _simd16_extract_ps(src1, 0), _simd16_extract_ps(src2, 0)); + +#if 1 + _simd16_store_ps(reinterpret_cast(pDst) + 0, reinterpret_cast(vDst)[0]); + _simd16_store_ps(reinterpret_cast(pDst) + 16, reinterpret_cast(vDst)[1]); +#else + _mm_store_ps(reinterpret_cast(pDst), vDst[0]); + _mm_store_ps(reinterpret_cast(pDst) + 4, vDst[1]); + _mm_store_ps(reinterpret_cast(pDst) + 8, vDst[2]); + _mm_store_ps(reinterpret_cast(pDst) + 12, vDst[3]); + _mm_store_ps(reinterpret_cast(pDst) + 16, vDst[4]); + _mm_store_ps(reinterpret_cast(pDst) + 20, vDst[5]); + _mm_store_ps(reinterpret_cast(pDst) + 24, vDst[6]); + _mm_store_ps(reinterpret_cast(pDst) + 28, vDst[7]); +#endif + + vTranspose3x8(vDst, _simd16_extract_ps(src0, 1), _simd16_extract_ps(src1, 1), _simd16_extract_ps(src2, 1)); + +#if 1 + _simd16_store_ps(reinterpret_cast(pDst) + 32, reinterpret_cast(vDst)[2]); + _simd16_store_ps(reinterpret_cast(pDst) + 48, reinterpret_cast(vDst)[3]); +#else + _mm_store_ps(reinterpret_cast(pDst) + 32, vDst[0]); + _mm_store_ps(reinterpret_cast(pDst) + 36, vDst[1]); + _mm_store_ps(reinterpret_cast(pDst) + 40, vDst[2]); + _mm_store_ps(reinterpret_cast(pDst) + 44, vDst[3]); + _mm_store_ps(reinterpret_cast(pDst) + 48, vDst[4]); + _mm_store_ps(reinterpret_cast(pDst) + 52, vDst[5]); + _mm_store_ps(reinterpret_cast(pDst) + 56, vDst[6]); + _mm_store_ps(reinterpret_cast(pDst) + 60, vDst[7]); +#endif + } +#endif }; ////////////////////////////////////////////////////////////////////////// @@ -522,6 +656,30 @@ struct Transpose32_32 #error Unsupported vector width #endif } +#if ENABLE_AVX512_SIMD16 + + INLINE static void Transpose_16(const uint8_t* pSrc, uint8_t* pDst) + { + const float *pfSrc = reinterpret_cast(pSrc); + + __m256 src_r0 = _mm256_load_ps(pfSrc + 0); + __m256 src_r1 = _mm256_load_ps(pfSrc + 8); + __m256 src_g0 = _mm256_load_ps(pfSrc + 16); + __m256 src_g1 = _mm256_load_ps(pfSrc + 24); + + __m256 dst0 = _mm256_unpacklo_ps(src_r0, src_g0); + __m256 dst1 = _mm256_unpackhi_ps(src_r0, src_g0); + __m256 dst2 = _mm256_unpacklo_ps(src_r1, src_g1); + __m256 dst3 = _mm256_unpackhi_ps(src_r1, src_g1); + + float *pfDst = reinterpret_cast(pDst); + + _mm256_store_ps(pfDst + 0, dst0); + _mm256_store_ps(pfDst + 8, dst1); + _mm256_store_ps(pfDst + 16, dst2); + _mm256_store_ps(pfDst + 24, dst3); + } +#endif }; ////////////////////////////////////////////////////////////////////////// @@ -587,6 +745,34 @@ struct Transpose16_16_16_16 #error Unsupported vector width #endif } +#if ENABLE_AVX512_SIMD16 + + INLINE static void Transpose_16(const uint8_t* pSrc, uint8_t* pDst) + { + simd16scalari src_rg = _simd16_load_si(reinterpret_cast(pSrc)); + simd16scalari src_ba = _simd16_load_si(reinterpret_cast(pSrc + sizeof(simd16scalari))); + + __m256i src_r = _simd16_extract_si(src_rg, 0); + __m256i src_g = _simd16_extract_si(src_rg, 1); + __m256i src_b = _simd16_extract_si(src_ba, 0); + __m256i src_a = _simd16_extract_si(src_ba, 1); + + __m256i rg0 = _mm256_unpacklo_epi16(src_r, src_g); + __m256i rg1 = _mm256_unpackhi_epi16(src_r, src_g); + __m256i ba0 = _mm256_unpacklo_epi16(src_b, src_a); + __m256i ba1 = _mm256_unpackhi_epi16(src_b, src_a); + + __m256i dst0 = _mm256_unpacklo_epi32(rg0, ba0); + __m256i dst1 = _mm256_unpackhi_epi32(rg0, ba0); + __m256i dst2 = _mm256_unpacklo_epi32(rg1, ba1); + __m256i dst3 = _mm256_unpackhi_epi32(rg1, ba1); + + _mm256_store_si256(reinterpret_cast<__m256i*>(pDst) + 0, dst0); + _mm256_store_si256(reinterpret_cast<__m256i*>(pDst) + 1, dst1); + _mm256_store_si256(reinterpret_cast<__m256i*>(pDst) + 2, dst2); + _mm256_store_si256(reinterpret_cast<__m256i*>(pDst) + 3, dst3); + } +#endif }; ////////////////////////////////////////////////////////////////////////// @@ -650,6 +836,33 @@ struct Transpose16_16_16 #error Unsupported vector width #endif } +#if ENABLE_AVX512_SIMD16 + + INLINE static void Transpose_16(const uint8_t* pSrc, uint8_t* pDst) + { + simd16scalari src_rg = _simd16_load_si(reinterpret_cast(pSrc)); + + __m256i src_r = _simd16_extract_si(src_rg, 0); + __m256i src_g = _simd16_extract_si(src_rg, 1); + __m256i src_b = _mm256_load_si256(reinterpret_cast(pSrc + sizeof(simd16scalari))); + __m256i src_a = _mm256_undefined_si256(); + + __m256i rg0 = _mm256_unpacklo_epi16(src_r, src_g); + __m256i rg1 = _mm256_unpackhi_epi16(src_r, src_g); + __m256i ba0 = _mm256_unpacklo_epi16(src_b, src_a); + __m256i ba1 = _mm256_unpackhi_epi16(src_b, src_a); + + __m256i dst0 = _mm256_unpacklo_epi32(rg0, ba0); + __m256i dst1 = _mm256_unpackhi_epi32(rg0, ba0); + __m256i dst2 = _mm256_unpacklo_epi32(rg1, ba1); + __m256i dst3 = _mm256_unpackhi_epi32(rg1, ba1); + + _mm256_store_si256(reinterpret_cast<__m256i*>(pDst) + 0, dst0); + _mm256_store_si256(reinterpret_cast<__m256i*>(pDst) + 1, dst1); + _mm256_store_si256(reinterpret_cast<__m256i*>(pDst) + 2, dst2); + _mm256_store_si256(reinterpret_cast<__m256i*>(pDst) + 3, dst3); + } +#endif }; ////////////////////////////////////////////////////////////////////////// @@ -692,6 +905,23 @@ struct Transpose16_16 #error Unsupported vector width #endif } +#if ENABLE_AVX512_SIMD16 + + INLINE static void Transpose_16(const uint8_t* pSrc, uint8_t* pDst) + { + simd16scalari result = _simd16_setzero_si(); + + simd16scalari src = _simd16_castps_si(_simd16_load_ps(reinterpret_cast(pSrc))); + + simdscalari srclo = _simd16_extract_si(src, 0); + simdscalari srchi = _simd16_extract_si(src, 1); + + result = _simd16_insert_si(result, _mm256_unpacklo_epi16(srclo, srchi), 0); + result = _simd16_insert_si(result, _mm256_unpackhi_epi16(srclo, srchi), 1); + + _simd16_store_si(reinterpret_cast(pDst), result); + } +#endif }; ////////////////////////////////////////////////////////////////////////// @@ -704,6 +934,10 @@ struct Transpose24_8 /// @param pSrc - source data in SOA form /// @param pDst - output data in AOS form static void Transpose(const uint8_t* pSrc, uint8_t* pDst) = delete; +#if ENABLE_AVX512_SIMD16 + + static void Transpose_16(const uint8_t* pSrc, uint8_t* pDst) = delete; +#endif }; ////////////////////////////////////////////////////////////////////////// @@ -716,9 +950,11 @@ struct Transpose32_8_24 /// @param pSrc - source data in SOA form /// @param pDst - output data in AOS form static void Transpose(const uint8_t* pSrc, uint8_t* pDst) = delete; -}; - +#if ENABLE_AVX512_SIMD16 + static void Transpose_16(const uint8_t* pSrc, uint8_t* pDst) = delete; +#endif +}; ////////////////////////////////////////////////////////////////////////// /// Transpose4_4_4_4 @@ -730,6 +966,10 @@ struct Transpose4_4_4_4 /// @param pSrc - source data in SOA form /// @param pDst - output data in AOS form static void Transpose(const uint8_t* pSrc, uint8_t* pDst) = delete; +#if ENABLE_AVX512_SIMD16 + + static void Transpose_16(const uint8_t* pSrc, uint8_t* pDst) = delete; +#endif }; ////////////////////////////////////////////////////////////////////////// @@ -742,6 +982,10 @@ struct Transpose5_6_5 /// @param pSrc - source data in SOA form /// @param pDst - output data in AOS form static void Transpose(const uint8_t* pSrc, uint8_t* pDst) = delete; +#if ENABLE_AVX512_SIMD16 + + static void Transpose_16(const uint8_t* pSrc, uint8_t* pDst) = delete; +#endif }; ////////////////////////////////////////////////////////////////////////// @@ -754,6 +998,10 @@ struct Transpose9_9_9_5 /// @param pSrc - source data in SOA form /// @param pDst - output data in AOS form static void Transpose(const uint8_t* pSrc, uint8_t* pDst) = delete; +#if ENABLE_AVX512_SIMD16 + + static void Transpose_16(const uint8_t* pSrc, uint8_t* pDst) = delete; +#endif }; ////////////////////////////////////////////////////////////////////////// @@ -766,6 +1014,10 @@ struct Transpose5_5_5_1 /// @param pSrc - source data in SOA form /// @param pDst - output data in AOS form static void Transpose(const uint8_t* pSrc, uint8_t* pDst) = delete; +#if ENABLE_AVX512_SIMD16 + + static void Transpose_16(const uint8_t* pSrc, uint8_t* pDst) = delete; +#endif }; ////////////////////////////////////////////////////////////////////////// @@ -790,6 +1042,10 @@ struct Transpose10_10_10_2 /// @param pSrc - source data in SOA form /// @param pDst - output data in AOS form static void Transpose(const uint8_t* pSrc, uint8_t* pDst) = delete; +#if ENABLE_AVX512_SIMD16 + + static void Transpose_16(const uint8_t* pSrc, uint8_t* pDst) = delete; +#endif }; ////////////////////////////////////////////////////////////////////////// @@ -802,6 +1058,10 @@ struct Transpose11_11_10 /// @param pSrc - source data in SOA form /// @param pDst - output data in AOS form static void Transpose(const uint8_t* pSrc, uint8_t* pDst) = delete; +#if ENABLE_AVX512_SIMD16 + + static void Transpose_16(const uint8_t* pSrc, uint8_t* pDst) = delete; +#endif }; // helper function to unroll loops diff --git a/src/gallium/drivers/swr/rasterizer/memory/LoadTile.h b/src/gallium/drivers/swr/rasterizer/memory/LoadTile.h index a0ac3fec07e..858f16266e9 100644 --- a/src/gallium/drivers/swr/rasterizer/memory/LoadTile.h +++ b/src/gallium/drivers/swr/rasterizer/memory/LoadTile.h @@ -67,6 +67,21 @@ struct LoadRasterTile uint32_t x, uint32_t y, uint8_t* pDst) { +#if USE_8x2_TILE_BACKEND + typedef SimdTile_16 SimdT; + + SimdT* pDstSimdTiles = (SimdT*)pDst; + + // Compute which simd tile we're accessing within 8x8 tile. + // i.e. Compute linear simd tile coordinate given (x, y) in pixel coordinates. + uint32_t simdIndex = (y / SIMD16_TILE_Y_DIM) * (KNOB_TILE_X_DIM / SIMD16_TILE_X_DIM) + (x / SIMD16_TILE_X_DIM); + + SimdT* pSimdTile = &pDstSimdTiles[simdIndex]; + + uint32_t simdOffset = (y % SIMD16_TILE_Y_DIM) * SIMD16_TILE_X_DIM + (x % SIMD16_TILE_X_DIM); + + pSimdTile->SetSwizzledColor(simdOffset, srcColor); +#else typedef SimdTile SimdT; SimdT* pDstSimdTiles = (SimdT*)pDst; @@ -80,6 +95,7 @@ struct LoadRasterTile uint32_t simdOffset = (y % SIMD_TILE_Y_DIM) * SIMD_TILE_X_DIM + (x % SIMD_TILE_X_DIM); pSimdTile->SetSwizzledColor(simdOffset, srcColor); +#endif } ////////////////////////////////////////////////////////////////////////// diff --git a/src/gallium/drivers/swr/rasterizer/memory/StoreTile.h b/src/gallium/drivers/swr/rasterizer/memory/StoreTile.h index 95a1adbc8db..21ee443841c 100644 --- a/src/gallium/drivers/swr/rasterizer/memory/StoreTile.h +++ b/src/gallium/drivers/swr/rasterizer/memory/StoreTile.h @@ -158,6 +158,25 @@ struct StorePixels<32, 2> } }; +#if USE_8x2_TILE_BACKEND +template <> +struct StorePixels<32, 4> +{ + static void Store(const uint8_t* pSrc, uint8_t* (&ppDsts)[4]) + { + __m128i quad0 = _mm_load_si128(&reinterpret_cast(pSrc)[0]); + __m128i quad1 = _mm_load_si128(&reinterpret_cast(pSrc)[1]); + __m128i quad2 = _mm_load_si128(&reinterpret_cast(pSrc)[2]); + __m128i quad3 = _mm_load_si128(&reinterpret_cast(pSrc)[3]); + + _mm_storeu_si128(reinterpret_cast<__m128i *>(ppDsts[0]), _mm_unpacklo_epi64(quad0, quad1)); + _mm_storeu_si128(reinterpret_cast<__m128i *>(ppDsts[1]), _mm_unpackhi_epi64(quad0, quad1)); + _mm_storeu_si128(reinterpret_cast<__m128i *>(ppDsts[2]), _mm_unpacklo_epi64(quad2, quad3)); + _mm_storeu_si128(reinterpret_cast<__m128i *>(ppDsts[3]), _mm_unpackhi_epi64(quad2, quad3)); + } +}; + +#endif ////////////////////////////////////////////////////////////////////////// /// StorePixels (32-bit pixel specialization) /// @brief Stores a 4x2 (AVX) raster-tile to two rows. @@ -228,6 +247,21 @@ struct ConvertPixelsSOAtoAOS template INLINE static void Convert(const uint8_t* pSrc, uint8_t* (&ppDsts)[NumDests]) { +#if USE_8x2_TILE_BACKEND + static const uint32_t MAX_RASTER_TILE_BYTES = 16 * 16; // 16 pixels * 16 bytes per pixel + + OSALIGNSIMD16(uint8_t) soaTile[MAX_RASTER_TILE_BYTES]; + OSALIGNSIMD16(uint8_t) aosTile[MAX_RASTER_TILE_BYTES]; + + // Convert from SrcFormat --> DstFormat + simd16vector src; + LoadSOA(pSrc, src); + StoreSOA(src, soaTile); + + // Convert from SOA --> AOS + FormatTraits::TransposeT::Transpose_16(soaTile, aosTile); + +#else static const uint32_t MAX_RASTER_TILE_BYTES = 128; // 8 pixels * 16 bytes per pixel OSALIGNSIMD(uint8_t) soaTile[MAX_RASTER_TILE_BYTES]; @@ -241,6 +275,7 @@ struct ConvertPixelsSOAtoAOS // Convert from SOA --> AOS FormatTraits::TransposeT::Transpose(soaTile, aosTile); +#endif // Store data into destination StorePixels::bpp, NumDests>::Store(aosTile, ppDsts); } @@ -261,6 +296,15 @@ struct ConvertPixelsSOAtoAOS template INLINE static void Convert(const uint8_t* pSrc, uint8_t* (&ppDsts)[NumDests]) { +#if USE_8x2_TILE_BACKEND + static const uint32_t MAX_RASTER_TILE_BYTES = 16 * 16; // 16 pixels * 16 bytes per pixel + + OSALIGNSIMD16(uint8_t) aosTile[MAX_RASTER_TILE_BYTES]; + + // Convert from SOA --> AOS + FormatTraits::TransposeT::Transpose_16(pSrc, aosTile); + +#else static const uint32_t MAX_RASTER_TILE_BYTES = 128; // 8 pixels * 16 bytes per pixel OSALIGNSIMD(uint8_t) aosTile[MAX_RASTER_TILE_BYTES]; @@ -268,6 +312,7 @@ struct ConvertPixelsSOAtoAOS // Convert from SOA --> AOS FormatTraits::TransposeT::Transpose(pSrc, aosTile); +#endif // Store data into destination StorePixels::bpp, NumDests>::Store(aosTile, ppDsts); } @@ -348,6 +393,73 @@ struct ConvertPixelsSOAtoAOS template INLINE static void Convert(const uint8_t* pSrc, uint8_t* (&ppDsts)[NumDests]) { +#if USE_8x2_TILE_BACKEND + static const uint32_t MAX_RASTER_TILE_BYTES = 16 * 4; // 16 pixels * 4 bytes per pixel + + OSALIGNSIMD16(uint8_t) soaTile[MAX_RASTER_TILE_BYTES]; + OSALIGNSIMD16(uint8_t) aosTile[MAX_RASTER_TILE_BYTES]; + + // Convert from SrcFormat --> DstFormat + simd16vector src; + LoadSOA(pSrc, src); + StoreSOA(src, soaTile); + + // Convert from SOA --> AOS + FormatTraits::TransposeT::Transpose_16(soaTile, aosTile); + + // Store data into destination but don't overwrite the X8 bits + // Each 4-pixel row is 16-bytes +#if 1 + simdscalari loadlo = _simd_load_si(reinterpret_cast(aosTile)); + simdscalari loadhi = _simd_load_si(reinterpret_cast(aosTile + sizeof(simdscalari))); + + simdscalari templo = _simd_unpacklo_epi64(loadlo, loadhi); + simdscalari temphi = _simd_unpackhi_epi64(loadlo, loadhi); + + simdscalari destlo = _mm256_loadu2_m128i(reinterpret_cast<__m128i *>(ppDsts[1]), reinterpret_cast<__m128i *>(ppDsts[0])); + simdscalari desthi = _mm256_loadu2_m128i(reinterpret_cast<__m128i *>(ppDsts[3]), reinterpret_cast<__m128i *>(ppDsts[2])); + + simdscalari mask = _simd_set1_epi32(0xFFFFFF); + + destlo = _simd_or_si(_simd_andnot_si(mask, destlo), _simd_and_si(mask, templo)); + desthi = _simd_or_si(_simd_andnot_si(mask, desthi), _simd_and_si(mask, templo)); + + _mm256_storeu2_m128i(reinterpret_cast<__m128i *>(ppDsts[1]), reinterpret_cast<__m128i *>(ppDsts[0]), destlo); + _mm256_storeu2_m128i(reinterpret_cast<__m128i *>(ppDsts[3]), reinterpret_cast<__m128i *>(ppDsts[2]), desthi); +#else + __m128i *pZRow01 = (__m128i*)aosTile; + __m128i vQuad00 = _mm_load_si128(pZRow01); + __m128i vQuad01 = _mm_load_si128(pZRow01 + 1); + __m128i vQuad02 = _mm_load_si128(pZRow01 + 2); + __m128i vQuad03 = _mm_load_si128(pZRow01 + 3); + + __m128i vRow00 = _mm_unpacklo_epi64(vQuad00, vQuad01); + __m128i vRow10 = _mm_unpackhi_epi64(vQuad00, vQuad01); + __m128i vRow20 = _mm_unpacklo_epi64(vQuad02, vQuad03); + __m128i vRow30 = _mm_unpackhi_epi64(vQuad02, vQuad03); + + __m128i vDst0 = _mm_loadu_si128((const __m128i*)ppDsts[0]); + __m128i vDst1 = _mm_loadu_si128((const __m128i*)ppDsts[1]); + __m128i vDst2 = _mm_loadu_si128((const __m128i*)ppDsts[2]); + __m128i vDst3 = _mm_loadu_si128((const __m128i*)ppDsts[3]); + + __m128i vMask = _mm_set1_epi32(0xFFFFFF); + + vDst0 = _mm_andnot_si128(vMask, vDst0); + vDst0 = _mm_or_si128(vDst0, _mm_and_si128(vRow00, vMask)); + vDst1 = _mm_andnot_si128(vMask, vDst1); + vDst1 = _mm_or_si128(vDst1, _mm_and_si128(vRow10, vMask)); + vDst2 = _mm_andnot_si128(vMask, vDst2); + vDst2 = _mm_or_si128(vDst2, _mm_and_si128(vRow20, vMask)); + vDst3 = _mm_andnot_si128(vMask, vDst3); + vDst3 = _mm_or_si128(vDst3, _mm_and_si128(vRow10, vMask)); + + _mm_storeu_si128((__m128i*)ppDsts[0], vDst0); + _mm_storeu_si128((__m128i*)ppDsts[1], vDst1); + _mm_storeu_si128((__m128i*)ppDsts[2], vDst2); + _mm_storeu_si128((__m128i*)ppDsts[3], vDst3); +#endif +#else static const uint32_t MAX_RASTER_TILE_BYTES = 128; // 8 pixels * 16 bytes per pixel OSALIGNSIMD(uint8_t) soaTile[MAX_RASTER_TILE_BYTES]; @@ -382,9 +494,138 @@ struct ConvertPixelsSOAtoAOS _mm_storeu_si128((__m128i*)ppDsts[0], vDst0); _mm_storeu_si128((__m128i*)ppDsts[1], vDst1); +#endif } }; +#if USE_8x2_TILE_BACKEND +template +INLINE static void FlatConvert(const uint8_t* pSrc, uint8_t* pDst0, uint8_t* pDst1, uint8_t* pDst2, uint8_t* pDst3) +{ + // swizzle rgba -> bgra while we load + simd16scalar comp0 = _simd16_load_ps(reinterpret_cast(pSrc + FormatTraits::swizzle(0) * sizeof(simd16scalar))); // float32 rrrrrrrrrrrrrrrr + simd16scalar comp1 = _simd16_load_ps(reinterpret_cast(pSrc + FormatTraits::swizzle(1) * sizeof(simd16scalar))); // float32 gggggggggggggggg + simd16scalar comp2 = _simd16_load_ps(reinterpret_cast(pSrc + FormatTraits::swizzle(2) * sizeof(simd16scalar))); // float32 bbbbbbbbbbbbbbbb + simd16scalar comp3 = _simd16_load_ps(reinterpret_cast(pSrc + FormatTraits::swizzle(3) * sizeof(simd16scalar))); // float32 aaaaaaaaaaaaaaaa + + // clamp + const simd16scalar zero = _simd16_setzero_ps(); + const simd16scalar ones = _simd16_set1_ps(1.0f); + + comp0 = _simd16_max_ps(comp0, zero); + comp0 = _simd16_min_ps(comp0, ones); + + comp1 = _simd16_max_ps(comp1, zero); + comp1 = _simd16_min_ps(comp1, ones); + + comp2 = _simd16_max_ps(comp2, zero); + comp2 = _simd16_min_ps(comp2, ones); + + comp3 = _simd16_max_ps(comp3, zero); + comp3 = _simd16_min_ps(comp3, ones); + + if (FormatTraits::isSRGB) + { + // Gamma-correct only rgb + comp0 = FormatTraits::convertSrgb(0, comp0); + comp1 = FormatTraits::convertSrgb(1, comp1); + comp2 = FormatTraits::convertSrgb(2, comp2); + } + + // convert float components from 0.0f .. 1.0f to correct scale for 0 .. 255 dest format + comp0 = _simd16_mul_ps(comp0, _simd16_set1_ps(FormatTraits::fromFloat(0))); + comp1 = _simd16_mul_ps(comp1, _simd16_set1_ps(FormatTraits::fromFloat(1))); + comp2 = _simd16_mul_ps(comp2, _simd16_set1_ps(FormatTraits::fromFloat(2))); + comp3 = _simd16_mul_ps(comp3, _simd16_set1_ps(FormatTraits::fromFloat(3))); + + // moving to 16 wide integer vector types + simd16scalari src0 = _simd16_cvtps_epi32(comp0); // padded byte rrrrrrrrrrrrrrrr + simd16scalari src1 = _simd16_cvtps_epi32(comp1); // padded byte gggggggggggggggg + simd16scalari src2 = _simd16_cvtps_epi32(comp2); // padded byte bbbbbbbbbbbbbbbb + simd16scalari src3 = _simd16_cvtps_epi32(comp3); // padded byte aaaaaaaaaaaaaaaa + +#if 1 + // SOA to AOS conversion + src1 = _simd16_slli_epi32(src1, 8); + src2 = _simd16_slli_epi32(src2, 16); + src3 = _simd16_slli_epi32(src3, 24); + + simd16scalari final = _simd16_or_si(_simd16_or_si(src0, src1), _simd16_or_si(src2, src3)); // 0 1 2 3 4 5 6 7 8 9 A B C D E F + + // de-swizzle conversion +#if 1 + simd16scalari final0 = _simd16_permute2f128_si(final, final, 0xA0); // (2, 2, 0, 0) // 0 1 2 3 0 1 2 3 8 9 A B 8 9 A B + simd16scalari final1 = _simd16_permute2f128_si(final, final, 0xF5); // (3, 3, 1, 1) // 4 5 6 7 4 5 6 7 C D E F C D E F + + final = _simd16_shuffle_epi64(final0, final1, 0xCC); // (1 1 0 0 1 1 0 0) // 0 1 4 5 2 3 6 7 8 9 C D A B E F + +#else + final = _simd16_permute_epi32(final, _simd16_set_epi32(15, 14, 11, 10, 13, 12, 9, 8, 7, 6, 3, 2, 5, 4, 1, 0)); + +#endif +#endif +#if KNOB_ARCH == KNOB_ARCH_AVX + + // splitting into two sets of 4 wide integer vector types + // because AVX doesn't have instructions to support this operation at 8 wide +#if 0 + __m128i srcLo0 = _mm256_castsi256_si128(src0); // 000r000r000r000r + __m128i srcLo1 = _mm256_castsi256_si128(src1); // 000g000g000g000g + __m128i srcLo2 = _mm256_castsi256_si128(src2); // 000b000b000b000b + __m128i srcLo3 = _mm256_castsi256_si128(src3); // 000a000a000a000a + + __m128i srcHi0 = _mm256_extractf128_si256(src0, 1); // 000r000r000r000r + __m128i srcHi1 = _mm256_extractf128_si256(src1, 1); // 000g000g000g000g + __m128i srcHi2 = _mm256_extractf128_si256(src2, 1); // 000b000b000b000b + __m128i srcHi3 = _mm256_extractf128_si256(src3, 1); // 000a000a000a000a + + srcLo1 = _mm_slli_si128(srcLo1, 1); // 00g000g000g000g0 + srcHi1 = _mm_slli_si128(srcHi1, 1); // 00g000g000g000g0 + srcLo2 = _mm_slli_si128(srcLo2, 2); // 0b000b000b000b00 + srcHi2 = _mm_slli_si128(srcHi2, 2); // 0b000b000b000b00 + srcLo3 = _mm_slli_si128(srcLo3, 3); // a000a000a000a000 + srcHi3 = _mm_slli_si128(srcHi3, 3); // a000a000a000a000 + + srcLo0 = _mm_or_si128(srcLo0, srcLo1); // 00gr00gr00gr00gr + srcLo2 = _mm_or_si128(srcLo2, srcLo3); // ab00ab00ab00ab00 + + srcHi0 = _mm_or_si128(srcHi0, srcHi1); // 00gr00gr00gr00gr + srcHi2 = _mm_or_si128(srcHi2, srcHi3); // ab00ab00ab00ab00 + + srcLo0 = _mm_or_si128(srcLo0, srcLo2); // abgrabgrabgrabgr + srcHi0 = _mm_or_si128(srcHi0, srcHi2); // abgrabgrabgrabgr + + // unpack into rows that get the tiling order correct + __m128i vRow00 = _mm_unpacklo_epi64(srcLo0, srcHi0); // abgrabgrabgrabgrabgrabgrabgrabgr + __m128i vRow10 = _mm_unpackhi_epi64(srcLo0, srcHi0); + + __m256i final = _mm256_castsi128_si256(vRow00); + final = _mm256_insertf128_si256(final, vRow10, 1); + +#else +#if 0 + simd16scalari final = _simd16_setzero_si(); + +#endif +#endif +#elif KNOB_ARCH >= KNOB_ARCH_AVX2 + // logic is as above, only wider +#if 0 + src1 = _simd16_slli_epi32(src1, 8); + src2 = _simd16_slli_epi32(src2, 16); + src3 = _simd16_slli_epi32(src3, 24); + + simd16scalari final = _simd16_or_si(_simd16_or_si(src0, src1), _simd16_or_si(src2, src3)); + + final = _simd16_permute_epi32(final, _simd16_set_epi32(15, 14, 11, 10, 13, 12, 9, 8, 7, 6, 3, 2, 5, 4, 1, 0)); + +#endif +#endif + _mm256_storeu2_m128i(reinterpret_cast<__m128i *>(pDst1), reinterpret_cast<__m128i *>(pDst0), _simd16_extract_si(final, 0)); + _mm256_storeu2_m128i(reinterpret_cast<__m128i *>(pDst3), reinterpret_cast<__m128i *>(pDst2), _simd16_extract_si(final, 1)); +} + +#endif template INLINE static void FlatConvert(const uint8_t* pSrc, uint8_t* pDst, uint8_t* pDst1) { @@ -477,10 +718,16 @@ INLINE static void FlatConvert(const uint8_t* pSrc, uint8_t* pDst, uint8_t* pDst src2 = _mm256_or_si256(src2, src3); __m256i final = _mm256_or_si256(src0, src2); - +#if 0 + + __m256i perm = _mm256_set_epi32(7, 6, 3, 2, 5, 4, 1, 0); + + final = _mm256_permutevar8x32_epi32(final, perm); +#else + // adjust the data to get the tiling order correct 0 1 2 3 -> 0 2 1 3 final = _mm256_permute4x64_epi64(final, 0xD8); - +#endif #endif _mm256_storeu2_m128i((__m128i*)pDst1, (__m128i*)pDst, final); @@ -618,7 +865,11 @@ struct ConvertPixelsSOAtoAOS < R32G32B32A32_FLOAT, R8G8B8A8_UNORM > template INLINE static void Convert(const uint8_t* pSrc, uint8_t* (&ppDsts)[NumDests]) { +#if USE_8x2_TILE_BACKEND + FlatConvert(pSrc, ppDsts[0], ppDsts[1], ppDsts[2], ppDsts[3]); +#else FlatConvert(pSrc, ppDsts[0], ppDsts[1]); +#endif } }; @@ -638,7 +889,11 @@ struct ConvertPixelsSOAtoAOS < R32G32B32A32_FLOAT, R8G8B8A8_UNORM_SRGB > template INLINE static void Convert(const uint8_t* pSrc, uint8_t* (&ppDsts)[NumDests]) { +#if USE_8x2_TILE_BACKEND + FlatConvert(pSrc, ppDsts[0], ppDsts[1], ppDsts[2], ppDsts[3]); +#else FlatConvert(pSrc, ppDsts[0], ppDsts[1]); +#endif } }; @@ -668,6 +923,21 @@ struct StoreRasterTile uint32_t x, uint32_t y, float outputColor[4]) { +#if USE_8x2_TILE_BACKEND + typedef SimdTile_16 SimdT; + + SimdT* pSrcSimdTiles = (SimdT*)pSrc; + + // Compute which simd tile we're accessing within 8x8 tile. + // i.e. Compute linear simd tile coordinate given (x, y) in pixel coordinates. + uint32_t simdIndex = (y / SIMD16_TILE_Y_DIM) * (KNOB_TILE_X_DIM / SIMD16_TILE_X_DIM) + (x / SIMD16_TILE_X_DIM); + + SimdT* pSimdTile = &pSrcSimdTiles[simdIndex]; + + uint32_t simdOffset = (y % SIMD16_TILE_Y_DIM) * SIMD16_TILE_X_DIM + (x % SIMD16_TILE_X_DIM); + + pSimdTile->GetSwizzledColor(simdOffset, outputColor); +#else typedef SimdTile SimdT; SimdT* pSrcSimdTiles = (SimdT*)pSrc; @@ -681,6 +951,7 @@ struct StoreRasterTile uint32_t simdOffset = (y % SIMD_TILE_Y_DIM) * SIMD_TILE_X_DIM + (x % SIMD_TILE_X_DIM); pSimdTile->GetSwizzledColor(simdOffset, outputColor); +#endif } ////////////////////////////////////////////////////////////////////////// @@ -861,6 +1132,32 @@ struct OptStoreRasterTile< TilingTraits, SrcFormat, DstFormat uint8_t* pDst = (uint8_t*)ComputeSurfaceAddress(x, y, pDstSurface->arrayIndex + renderTargetArrayIndex, pDstSurface->arrayIndex + renderTargetArrayIndex, sampleNum, pDstSurface->lod, pDstSurface); +#if USE_8x2_TILE_BACKEND + uint8_t* ppRows[] = { pDst, pDst + pDstSurface->pitch, pDst + (SIMD16_TILE_X_DIM * DST_BYTES_PER_PIXEL) / 2, pDst + pDstSurface->pitch + (SIMD16_TILE_X_DIM * DST_BYTES_PER_PIXEL) / 2 }; + + for (uint32_t row = 0; row < KNOB_TILE_Y_DIM / SIMD16_TILE_Y_DIM; ++row) + { + uint8_t* ppStartRows[] = { ppRows[0], ppRows[1], ppRows[2], ppRows[3] }; + + for (uint32_t col = 0; col < KNOB_TILE_X_DIM / SIMD16_TILE_X_DIM; ++col) + { + // Format conversion and convert from SOA to AOS, and store the rows. + ConvertPixelsSOAtoAOS::Convert(pSrc, ppRows); + + ppRows[0] += SIMD16_TILE_X_DIM * DST_BYTES_PER_PIXEL; + ppRows[1] += SIMD16_TILE_X_DIM * DST_BYTES_PER_PIXEL; + ppRows[2] += SIMD16_TILE_X_DIM * DST_BYTES_PER_PIXEL; + ppRows[3] += SIMD16_TILE_X_DIM * DST_BYTES_PER_PIXEL; + + pSrc += KNOB_SIMD16_WIDTH * SRC_BYTES_PER_PIXEL; + } + + ppRows[0] = ppStartRows[0] + SIMD16_TILE_Y_DIM * pDstSurface->pitch; + ppRows[1] = ppStartRows[1] + SIMD16_TILE_Y_DIM * pDstSurface->pitch; + ppRows[2] = ppStartRows[2] + SIMD16_TILE_Y_DIM * pDstSurface->pitch; + ppRows[3] = ppStartRows[3] + SIMD16_TILE_Y_DIM * pDstSurface->pitch; + } +#else uint8_t* ppRows[] = { pDst, pDst + pDstSurface->pitch }; for (uint32_t row = 0; row < KNOB_TILE_Y_DIM / SIMD_TILE_Y_DIM; ++row) @@ -880,6 +1177,7 @@ struct OptStoreRasterTile< TilingTraits, SrcFormat, DstFormat ppRows[0] = ppStartRows[0] + 2 * pDstSurface->pitch; ppRows[1] = ppStartRows[1] + 2 * pDstSurface->pitch; } +#endif } }; @@ -1212,6 +1510,7 @@ template struct OptStoreRasterTile< TilingTraits, SrcFormat, DstFormat> { typedef StoreRasterTile, SrcFormat, DstFormat> GenericStoreTile; + static const size_t SRC_BYTES_PER_PIXEL = FormatTraits::bpp / 8; ////////////////////////////////////////////////////////////////////////// /// @brief Stores an 8x8 raster tile to the destination surface. @@ -1241,6 +1540,19 @@ struct OptStoreRasterTile< TilingTraits, SrcFormat, Ds uint8_t* pCol0 = (uint8_t*)ComputeSurfaceAddress(x, y, pDstSurface->arrayIndex + renderTargetArrayIndex, pDstSurface->arrayIndex + renderTargetArrayIndex, sampleNum, pDstSurface->lod, pDstSurface); +#if USE_8x2_TILE_BACKEND + // The Hot Tile uses a row-major tiling mode and has a larger memory footprint. So we iterate in a row-major pattern. + for (uint32_t row = 0; row < KNOB_TILE_Y_DIM; row += SIMD16_TILE_Y_DIM) + { + uint8_t *pRow = pCol0 + row * DestRowWidthBytes; + + uint8_t *ppDsts[] = { pRow, pRow + DestRowWidthBytes, pRow + DestColumnBytes, pRow + DestColumnBytes + DestRowWidthBytes }; + + ConvertPixelsSOAtoAOS::Convert(pSrc, ppDsts); + + pSrc += KNOB_SIMD16_WIDTH * SRC_BYTES_PER_PIXEL; + } +#else // Increment by a whole SIMD. 4x2 for AVX. 2x2 for SSE. uint32_t pSrcInc = (FormatTraits::bpp * KNOB_SIMD_WIDTH) / 8; @@ -1261,6 +1573,7 @@ struct OptStoreRasterTile< TilingTraits, SrcFormat, Ds ConvertPixelsSOAtoAOS::Convert(pSrc, ppDsts); pSrc += pSrcInc; } +#endif } }; diff --git a/src/gallium/drivers/swr/rasterizer/memory/TilingFunctions.h b/src/gallium/drivers/swr/rasterizer/memory/TilingFunctions.h index 9b412f8b344..0694a99808f 100644 --- a/src/gallium/drivers/swr/rasterizer/memory/TilingFunctions.h +++ b/src/gallium/drivers/swr/rasterizer/memory/TilingFunctions.h @@ -153,6 +153,107 @@ struct SimdTile } }; +#if ENABLE_AVX512_SIMD16 +////////////////////////////////////////////////////////////////////////// +/// SimdTile 8x2 for AVX-512 +////////////////////////////////////////////////////////////////////////// + +template +struct SimdTile_16 +{ + // SimdTile is SOA (e.g. rrrrrrrrrrrrrrrr gggggggggggggggg bbbbbbbbbbbbbbbb aaaaaaaaaaaaaaaa ) + float color[FormatTraits::numComps][KNOB_SIMD16_WIDTH]; + + ////////////////////////////////////////////////////////////////////////// + /// @brief Retrieve color from simd. + /// @param index - linear index to color within simd. + /// @param outputColor - output color + INLINE void GetSwizzledColor( + uint32_t index, + float outputColor[4]) + { + // SOA pattern for 8x2.. + // 0 1 4 5 8 9 C D + // 2 3 6 7 A B E F + // The offset converts pattern to linear + static const uint32_t offset[KNOB_SIMD16_WIDTH] = { 0, 1, 4, 5, 8, 9, 12, 13, 2, 3, 6, 7, 10, 11, 14, 15 }; + + for (uint32_t i = 0; i < FormatTraits::numComps; ++i) + { + outputColor[i] = this->color[FormatTraits::swizzle(i)][offset[index]]; + } + } + + ////////////////////////////////////////////////////////////////////////// + /// @brief Retrieve color from simd. + /// @param index - linear index to color within simd. + /// @param outputColor - output color + INLINE void SetSwizzledColor( + uint32_t index, + const float src[4]) + { + // SOA pattern for 8x2.. + // 0 1 4 5 8 9 C D + // 2 3 6 7 A B E F + // The offset converts pattern to linear + static const uint32_t offset[KNOB_SIMD16_WIDTH] = { 0, 1, 4, 5, 8, 9, 12, 13, 2, 3, 6, 7, 10, 11, 14, 15 }; + + for (uint32_t i = 0; i < FormatTraits::numComps; ++i) + { + this->color[i][offset[index]] = src[i]; + } + } +}; + +template<> +struct SimdTile_16 +{ + // SimdTile is SOA (e.g. rrrrrrrrrrrrrrrr gggggggggggggggg bbbbbbbbbbbbbbbb aaaaaaaaaaaaaaaa ) + uint8_t color[FormatTraits::numComps][KNOB_SIMD16_WIDTH]; + + ////////////////////////////////////////////////////////////////////////// + /// @brief Retrieve color from simd. + /// @param index - linear index to color within simd. + /// @param outputColor - output color + INLINE void GetSwizzledColor( + uint32_t index, + float outputColor[4]) + { + // SOA pattern for 8x2.. + // 0 1 4 5 8 9 C D + // 2 3 6 7 A B E F + // The offset converts pattern to linear + static const uint32_t offset[KNOB_SIMD16_WIDTH] = { 0, 1, 4, 5, 8, 9, 12, 13, 2, 3, 6, 7, 10, 11, 14, 15 }; + + for (uint32_t i = 0; i < FormatTraits::numComps; ++i) + { + uint32_t src = this->color[FormatTraits::swizzle(i)][offset[index]]; + outputColor[i] = *(float*)&src; + } + } + + ////////////////////////////////////////////////////////////////////////// + /// @brief Retrieve color from simd. + /// @param index - linear index to color within simd. + /// @param outputColor - output color + INLINE void SetSwizzledColor( + uint32_t index, + const float src[4]) + { + // SOA pattern for 8x2.. + // 0 1 4 5 8 9 C D + // 2 3 6 7 A B E F + // The offset converts pattern to linear + static const uint32_t offset[KNOB_SIMD16_WIDTH] = { 0, 1, 4, 5, 8, 9, 12, 13, 2, 3, 6, 7, 10, 11, 14, 15 }; + + for (uint32_t i = 0; i < FormatTraits::numComps; ++i) + { + this->color[i][offset[index]] = *(uint8_t*)&src[i]; + } + } +}; + +#endif ////////////////////////////////////////////////////////////////////////// /// @brief Computes lod offset for 1D surface at specified lod. /// @param baseWidth - width of basemip (mip 0).