#define OSALIGNLINE(RWORD) OSALIGN(RWORD, 64)
#define OSALIGNSIMD(RWORD) OSALIGN(RWORD, KNOB_SIMD_BYTES)
-#if ENABLE_AVX512_SIMD16
#define OSALIGNSIMD16(RWORD) OSALIGN(RWORD, KNOB_SIMD16_BYTES)
-#endif
#include "common/swr_assert.h"
#ifndef __SWR_SIMD16INTRIN_H__
#define __SWR_SIMD16INTRIN_H__
-#if ENABLE_AVX512_SIMD16
-
#if KNOB_SIMD16_WIDTH == 16
typedef SIMD512 SIMD16;
#else
#define _simd16_mask2int(mask) int(mask)
#define _simd16_vmask_ps SIMD16::vmask_ps
-#endif // ENABLE_AVX512_SIMD16
-
#endif //__SWR_SIMD16INTRIN_H_
return _simd_castsi_ps(_simd_and_si(ai, _simd_set1_epi32(0x7fffffff)));
}
-#if ENABLE_AVX512_SIMD16
#include "simd16intrin.h"
-#endif // ENABLE_AVX512_SIMD16
#endif //__SWR_SIMDINTRIN_H__
simdscalari const& viewportIdx,
simdscalari const& rtIdx);
-#if ENABLE_AVX512_SIMD16
// function signature for pipeline stages that execute after primitive assembly
typedef void(SIMDCALL* PFN_PROCESS_PRIMS_SIMD16)(DRAW_CONTEXT* pDC,
PA_STATE& pa,
simd16scalari const& viewportIdx,
simd16scalari const& rtIdx);
-#endif
OSALIGNLINE(struct) API_STATE
{
// Vertex Buffers
/// SOA RGBA32_FLOAT format.
/// @param pSrc - source data in SOA form
/// @param dst - output data in SOA form
-template <SWR_FORMAT SrcFormat>
-INLINE void LoadSOA(const uint8_t* pSrc, simdvector& dst)
+template <typename SIMD_T, SWR_FORMAT SrcFormat>
+INLINE void SIMDCALL LoadSOA(const uint8_t* pSrc, Vec4<SIMD_T>& dst)
{
// fast path for float32
if ((FormatTraits<SrcFormat>::GetType(0) == SWR_TYPE_FLOAT) &&
(FormatTraits<SrcFormat>::GetBPC(0) == 32))
{
- auto lambda = [&](int comp) {
- simdscalar vComp = _simd_load_ps((const float*)(pSrc + comp * sizeof(simdscalar)));
+ auto lambda = [&](int comp)
+ {
+ Float<SIMD_T> vComp =
+ SIMD_T::load_ps(reinterpret_cast<const float*>(pSrc + comp * sizeof(Float<SIMD_T>)));
dst.v[FormatTraits<SrcFormat>::swizzle(comp)] = vComp;
};
return;
}
- auto lambda = [&](int comp) {
+ auto lambda = [&](int comp)
+ {
// load SIMD components
- simdscalar vComp = FormatTraits<SrcFormat>::loadSOA(comp, pSrc);
+ Float<SIMD_T> vComp;
+ FormatTraits<SrcFormat>::loadSOA(comp, pSrc, vComp);
// unpack
vComp = FormatTraits<SrcFormat>::unpack(comp, vComp);
// convert
if (FormatTraits<SrcFormat>::isNormalized(comp))
{
- vComp = _simd_cvtepi32_ps(_simd_castps_si(vComp));
- vComp = _simd_mul_ps(vComp, _simd_set1_ps(FormatTraits<SrcFormat>::toFloat(comp)));
+ vComp = SIMD_T::cvtepi32_ps(SIMD_T::castps_si(vComp));
+ vComp = SIMD_T::mul_ps(vComp, SIMD_T::set1_ps(FormatTraits<SrcFormat>::toFloat(comp)));
}
dst.v[FormatTraits<SrcFormat>::swizzle(comp)] = vComp;
- pSrc += (FormatTraits<SrcFormat>::GetBPC(comp) * KNOB_SIMD_WIDTH) / 8;
+ // is there a better way to get this from the SIMD traits?
+ const uint32_t SIMD_WIDTH = sizeof(typename SIMD_T::Float) / sizeof(float);
+
+ pSrc += (FormatTraits<SrcFormat>::GetBPC(comp) * SIMD_WIDTH) / 8;
};
UnrollerL<0, FormatTraits<SrcFormat>::numComps, 1>::step(lambda);
}
+template <SWR_FORMAT SrcFormat>
+INLINE void SIMDCALL LoadSOA(const uint8_t* pSrc, simdvector& dst)
+{
+ LoadSOA<SIMD256, SrcFormat>(pSrc, dst);
+}
+
+template <SWR_FORMAT SrcFormat>
+INLINE void SIMDCALL LoadSOA(const uint8_t* pSrc, simd16vector& dst)
+{
+ LoadSOA<SIMD512, SrcFormat>(pSrc, dst);
+}
+
//////////////////////////////////////////////////////////////////////////
/// @brief Clamps the given component based on the requirements on the
/// Format template arg
/// @param vComp - SIMD vector of floats
/// @param Component - component
-template <SWR_FORMAT Format>
-INLINE simdscalar Clamp(simdscalar const& vC, uint32_t Component)
+template <typename SIMD_T, SWR_FORMAT Format>
+INLINE Float<SIMD_T> SIMDCALL Clamp(Float<SIMD_T> const& v, uint32_t Component)
{
- simdscalar vComp = vC;
+ Float<SIMD_T> vComp = v;
if (FormatTraits<Format>::isNormalized(Component))
{
if (FormatTraits<Format>::GetType(Component) == SWR_TYPE_UNORM)
{
- vComp = _simd_max_ps(vComp, _simd_setzero_ps());
+ vComp = SIMD_T::max_ps(vComp, SIMD_T::setzero_ps());
}
if (FormatTraits<Format>::GetType(Component) == SWR_TYPE_SNORM)
{
- vComp = _simd_max_ps(vComp, _simd_set1_ps(-1.0f));
+ vComp = SIMD_T::max_ps(vComp, SIMD_T::set1_ps(-1.0f));
}
- vComp = _simd_min_ps(vComp, _simd_set1_ps(1.0f));
+ vComp = SIMD_T::min_ps(vComp, SIMD_T::set1_ps(1.0f));
}
else if (FormatTraits<Format>::GetBPC(Component) < 32)
{
if (FormatTraits<Format>::GetType(Component) == SWR_TYPE_UINT)
{
- int iMax = (1 << FormatTraits<Format>::GetBPC(Component)) - 1;
- int iMin = 0;
- simdscalari vCompi = _simd_castps_si(vComp);
- vCompi = _simd_max_epu32(vCompi, _simd_set1_epi32(iMin));
- vCompi = _simd_min_epu32(vCompi, _simd_set1_epi32(iMax));
- vComp = _simd_castsi_ps(vCompi);
+ int iMax = (1 << FormatTraits<Format>::GetBPC(Component)) - 1;
+ int iMin = 0;
+ Integer<SIMD_T> vCompi = SIMD_T::castps_si(vComp);
+ vCompi = SIMD_T::max_epu32(vCompi, SIMD_T::set1_epi32(iMin));
+ vCompi = SIMD_T::min_epu32(vCompi, SIMD_T::set1_epi32(iMax));
+ vComp = SIMD_T::castsi_ps(vCompi);
}
else if (FormatTraits<Format>::GetType(Component) == SWR_TYPE_SINT)
{
- int iMax = (1 << (FormatTraits<Format>::GetBPC(Component) - 1)) - 1;
- int iMin = -1 - iMax;
- simdscalari vCompi = _simd_castps_si(vComp);
- vCompi = _simd_max_epi32(vCompi, _simd_set1_epi32(iMin));
- vCompi = _simd_min_epi32(vCompi, _simd_set1_epi32(iMax));
- vComp = _simd_castsi_ps(vCompi);
+ int iMax = (1 << (FormatTraits<Format>::GetBPC(Component) - 1)) - 1;
+ int iMin = -1 - iMax;
+ Integer<SIMD_T> vCompi = SIMD_T::castps_si(vComp);
+ vCompi = SIMD_T::max_epi32(vCompi, SIMD_T::set1_epi32(iMin));
+ vCompi = SIMD_T::min_epi32(vCompi, SIMD_T::set1_epi32(iMax));
+ vComp = SIMD_T::castsi_ps(vCompi);
}
}
return vComp;
}
-//////////////////////////////////////////////////////////////////////////
-/// @brief Normalize the given component based on the requirements on the
-/// Format template arg
-/// @param vComp - SIMD vector of floats
-/// @param Component - component
template <SWR_FORMAT Format>
-INLINE simdscalar Normalize(simdscalar const& vC, uint32_t Component)
+INLINE simdscalar SIMDCALL Clamp(simdscalar const& v, uint32_t Component)
{
- simdscalar vComp = vC;
- if (FormatTraits<Format>::isNormalized(Component))
- {
- vComp = _simd_mul_ps(vComp, _simd_set1_ps(FormatTraits<Format>::fromFloat(Component)));
- vComp = _simd_castsi_ps(_simd_cvtps_epi32(vComp));
- }
- return vComp;
-}
-
-//////////////////////////////////////////////////////////////////////////
-/// @brief Convert and store simdvector of pixels in SOA
-/// RGBA32_FLOAT to SOA format
-/// @param src - source data in SOA form
-/// @param dst - output data in SOA form
-template <SWR_FORMAT DstFormat>
-INLINE void StoreSOA(const simdvector& src, uint8_t* pDst)
-{
- // fast path for float32
- if ((FormatTraits<DstFormat>::GetType(0) == SWR_TYPE_FLOAT) &&
- (FormatTraits<DstFormat>::GetBPC(0) == 32))
- {
- for (uint32_t comp = 0; comp < FormatTraits<DstFormat>::numComps; ++comp)
- {
- simdscalar vComp = src.v[FormatTraits<DstFormat>::swizzle(comp)];
-
- // Gamma-correct
- if (FormatTraits<DstFormat>::isSRGB)
- {
- if (comp < 3) // Input format is always RGBA32_FLOAT.
- {
- vComp = FormatTraits<R32G32B32A32_FLOAT>::convertSrgb(comp, vComp);
- }
- }
-
- _simd_store_ps((float*)(pDst + comp * sizeof(simdscalar)), vComp);
- }
- return;
- }
-
- auto lambda = [&](int comp) {
- simdscalar vComp = src.v[FormatTraits<DstFormat>::swizzle(comp)];
-
- // Gamma-correct
- if (FormatTraits<DstFormat>::isSRGB)
- {
- if (comp < 3) // Input format is always RGBA32_FLOAT.
- {
- vComp = FormatTraits<R32G32B32A32_FLOAT>::convertSrgb(comp, vComp);
- }
- }
-
- // clamp
- vComp = Clamp<DstFormat>(vComp, comp);
-
- // normalize
- vComp = Normalize<DstFormat>(vComp, comp);
-
- // pack
- vComp = FormatTraits<DstFormat>::pack(comp, vComp);
-
- // store
- FormatTraits<DstFormat>::storeSOA(comp, pDst, vComp);
-
- pDst += (FormatTraits<DstFormat>::GetBPC(comp) * KNOB_SIMD_WIDTH) / 8;
- };
-
- UnrollerL<0, FormatTraits<DstFormat>::numComps, 1>::step(lambda);
+ return Clamp<SIMD256, Format>(v, Component);
}
-#if ENABLE_AVX512_SIMD16
-//////////////////////////////////////////////////////////////////////////
-/// @brief Load SIMD packed pixels in SOA format and converts to
-/// SOA RGBA32_FLOAT format.
-/// @param pSrc - source data in SOA form
-/// @param dst - output data in SOA form
-template <SWR_FORMAT SrcFormat>
-INLINE void SIMDCALL LoadSOA(const uint8_t* pSrc, simd16vector& dst)
+template <SWR_FORMAT Format>
+INLINE simd16scalar SIMDCALL Clamp(simd16scalar const& v, uint32_t Component)
{
- // fast path for float32
- if ((FormatTraits<SrcFormat>::GetType(0) == SWR_TYPE_FLOAT) &&
- (FormatTraits<SrcFormat>::GetBPC(0) == 32))
- {
- auto lambda = [&](int comp) {
- simd16scalar vComp =
- _simd16_load_ps(reinterpret_cast<const float*>(pSrc + comp * sizeof(simd16scalar)));
-
- dst.v[FormatTraits<SrcFormat>::swizzle(comp)] = vComp;
- };
-
- UnrollerL<0, FormatTraits<SrcFormat>::numComps, 1>::step(lambda);
- return;
- }
-
- auto lambda = [&](int comp) {
- // load SIMD components
- simd16scalar vComp = FormatTraits<SrcFormat>::loadSOA_16(comp, pSrc);
-
- // unpack
- vComp = FormatTraits<SrcFormat>::unpack(comp, vComp);
-
- // convert
- if (FormatTraits<SrcFormat>::isNormalized(comp))
- {
- vComp = _simd16_cvtepi32_ps(_simd16_castps_si(vComp));
- vComp = _simd16_mul_ps(vComp, _simd16_set1_ps(FormatTraits<SrcFormat>::toFloat(comp)));
- }
-
- dst.v[FormatTraits<SrcFormat>::swizzle(comp)] = vComp;
-
- pSrc += (FormatTraits<SrcFormat>::GetBPC(comp) * KNOB_SIMD16_WIDTH) / 8;
- };
-
- UnrollerL<0, FormatTraits<SrcFormat>::numComps, 1>::step(lambda);
+ return Clamp<SIMD512, Format>(v, Component);
}
//////////////////////////////////////////////////////////////////////////
-/// @brief Clamps the given component based on the requirements on the
+/// @brief Normalize the given component based on the requirements on the
/// Format template arg
/// @param vComp - SIMD vector of floats
/// @param Component - component
-template <SWR_FORMAT Format>
-INLINE simd16scalar SIMDCALL Clamp(simd16scalar const& v, uint32_t Component)
+template <typename SIMD_T, SWR_FORMAT Format>
+INLINE Float<SIMD_T> SIMDCALL Normalize(Float<SIMD_T> const& vComp, uint32_t Component)
{
- simd16scalar vComp = v;
+ Float<SIMD_T> r = vComp;
if (FormatTraits<Format>::isNormalized(Component))
{
- if (FormatTraits<Format>::GetType(Component) == SWR_TYPE_UNORM)
- {
- vComp = _simd16_max_ps(vComp, _simd16_setzero_ps());
- }
-
- if (FormatTraits<Format>::GetType(Component) == SWR_TYPE_SNORM)
- {
- vComp = _simd16_max_ps(vComp, _simd16_set1_ps(-1.0f));
- }
- vComp = _simd16_min_ps(vComp, _simd16_set1_ps(1.0f));
- }
- else if (FormatTraits<Format>::GetBPC(Component) < 32)
- {
- if (FormatTraits<Format>::GetType(Component) == SWR_TYPE_UINT)
- {
- int iMax = (1 << FormatTraits<Format>::GetBPC(Component)) - 1;
- int iMin = 0;
- simd16scalari vCompi = _simd16_castps_si(vComp);
- vCompi = _simd16_max_epu32(vCompi, _simd16_set1_epi32(iMin));
- vCompi = _simd16_min_epu32(vCompi, _simd16_set1_epi32(iMax));
- vComp = _simd16_castsi_ps(vCompi);
- }
- else if (FormatTraits<Format>::GetType(Component) == SWR_TYPE_SINT)
- {
- int iMax = (1 << (FormatTraits<Format>::GetBPC(Component) - 1)) - 1;
- int iMin = -1 - iMax;
- simd16scalari vCompi = _simd16_castps_si(vComp);
- vCompi = _simd16_max_epi32(vCompi, _simd16_set1_epi32(iMin));
- vCompi = _simd16_min_epi32(vCompi, _simd16_set1_epi32(iMax));
- vComp = _simd16_castsi_ps(vCompi);
- }
+ r = SIMD_T::mul_ps(r, SIMD_T::set1_ps(FormatTraits<Format>::fromFloat(Component)));
+ r = SIMD_T::castsi_ps(SIMD_T::cvtps_epi32(r));
}
+ return r;
+}
- return vComp;
+template <SWR_FORMAT Format>
+INLINE simdscalar SIMDCALL Normalize(simdscalar const& vComp, uint32_t Component)
+{
+ return Normalize<SIMD256, Format>(vComp, Component);
}
-//////////////////////////////////////////////////////////////////////////
-/// @brief Normalize the given component based on the requirements on the
-/// Format template arg
-/// @param vComp - SIMD vector of floats
-/// @param Component - component
template <SWR_FORMAT Format>
INLINE simd16scalar SIMDCALL Normalize(simd16scalar const& vComp, uint32_t Component)
{
- simd16scalar r = vComp;
- if (FormatTraits<Format>::isNormalized(Component))
- {
- r = _simd16_mul_ps(r, _simd16_set1_ps(FormatTraits<Format>::fromFloat(Component)));
- r = _simd16_castsi_ps(_simd16_cvtps_epi32(r));
- }
- return r;
+ return Normalize<SIMD512, Format>(vComp, Component);
}
//////////////////////////////////////////////////////////////////////////
/// RGBA32_FLOAT to SOA format
/// @param src - source data in SOA form
/// @param dst - output data in SOA form
-template <SWR_FORMAT DstFormat>
-INLINE void SIMDCALL StoreSOA(const simd16vector& src, uint8_t* pDst)
+template <typename SIMD_T, SWR_FORMAT DstFormat>
+INLINE void SIMDCALL StoreSOA(const Vec4<SIMD_T>& src, uint8_t* pDst)
{
// fast path for float32
if ((FormatTraits<DstFormat>::GetType(0) == SWR_TYPE_FLOAT) &&
{
for (uint32_t comp = 0; comp < FormatTraits<DstFormat>::numComps; ++comp)
{
- simd16scalar vComp = src.v[FormatTraits<DstFormat>::swizzle(comp)];
+ Float<SIMD_T> vComp = src.v[FormatTraits<DstFormat>::swizzle(comp)];
// Gamma-correct
if (FormatTraits<DstFormat>::isSRGB)
}
}
- _simd16_store_ps(reinterpret_cast<float*>(pDst + comp * sizeof(simd16scalar)), vComp);
+ SIMD_T::store_ps(reinterpret_cast<float*>(pDst + comp * sizeof(simd16scalar)), vComp);
}
return;
}
auto lambda = [&](int comp) {
- simd16scalar vComp = src.v[FormatTraits<DstFormat>::swizzle(comp)];
+ Float<SIMD_T> vComp = src.v[FormatTraits<DstFormat>::swizzle(comp)];
// Gamma-correct
if (FormatTraits<DstFormat>::isSRGB)
}
// clamp
- vComp = Clamp<DstFormat>(vComp, comp);
+ vComp = Clamp<SIMD_T, DstFormat>(vComp, comp);
// normalize
- vComp = Normalize<DstFormat>(vComp, comp);
+ vComp = Normalize<SIMD_T, DstFormat>(vComp, comp);
// pack
vComp = FormatTraits<DstFormat>::pack(comp, vComp);
// store
FormatTraits<DstFormat>::storeSOA(comp, pDst, vComp);
- pDst += (FormatTraits<DstFormat>::GetBPC(comp) * KNOB_SIMD16_WIDTH) / 8;
+ // is there a better way to get this from the SIMD traits?
+ const uint32_t SIMD_WIDTH = sizeof(typename SIMD_T::Float) / sizeof(float);
+
+ pDst += (FormatTraits<DstFormat>::GetBPC(comp) * SIMD_WIDTH) / 8;
};
UnrollerL<0, FormatTraits<DstFormat>::numComps, 1>::step(lambda);
}
-#endif
+template <SWR_FORMAT DstFormat>
+INLINE void SIMDCALL StoreSOA(const simdvector& src, uint8_t* pDst)
+{
+ StoreSOA<SIMD256, DstFormat>(src, pDst);
+}
+
+template <SWR_FORMAT DstFormat>
+INLINE void SIMDCALL StoreSOA(const simd16vector& src, uint8_t* pDst)
+{
+ StoreSOA<SIMD512, DstFormat>(src, pDst);
+}
+
template <uint32_t NumBits, bool Signed = false>
struct PackTraits
{
- static const uint32_t MyNumBits = NumBits;
+ static const uint32_t MyNumBits = NumBits;
+
static simdscalar loadSOA(const uint8_t* pSrc) = delete;
static void storeSOA(uint8_t* pDst, simdscalar const& src) = delete;
static simdscalar unpack(simdscalar& in) = delete;
static simdscalar pack(simdscalar& in) = delete;
-#if ENABLE_AVX512_SIMD16
- static simd16scalar loadSOA_16(const uint8_t* pSrc) = delete;
+
+ static simd16scalar loadSOA_16(const uint8_t* pSrc) = delete;
static void SIMDCALL storeSOA(uint8_t* pDst, simd16scalar const& src) = delete;
static simd16scalar unpack(simd16scalar& in) = delete;
static simd16scalar pack(simd16scalar& in) = delete;
-#endif
};
//////////////////////////////////////////////////////////////////////////
static void storeSOA(uint8_t* pDst, simdscalar const& src) { return; }
static simdscalar unpack(simdscalar& in) { return _simd_setzero_ps(); }
static simdscalar pack(simdscalar& in) { return _simd_setzero_ps(); }
-#if ENABLE_AVX512_SIMD16
- static simd16scalar loadSOA_16(const uint8_t* pSrc) { return _simd16_setzero_ps(); }
+
+ static simd16scalar loadSOA_16(const uint8_t* pSrc) { return _simd16_setzero_ps(); }
static void SIMDCALL storeSOA(uint8_t* pDst, simd16scalar const& src) { return; }
static simd16scalar unpack(simd16scalar& in) { return _simd16_setzero_ps(); }
static simd16scalar pack(simd16scalar& in) { return _simd16_setzero_ps(); }
-#endif
};
//////////////////////////////////////////////////////////////////////////
#error Unsupported vector width
#endif
}
-#if ENABLE_AVX512_SIMD16
static simd16scalar loadSOA_16(const uint8_t* pSrc)
{
static simd16scalar pack(simd16scalar& in)
{
+ // clang-format off
+
simd16scalari result = _simd16_setzero_si();
- simdscalari inlo =
- _simd_castps_si(_simd16_extract_ps(in, 0)); // r0 r1 r2 r3 r4 r5 r6 r7 (32b)
- simdscalari inhi = _simd_castps_si(_simd16_extract_ps(in, 1)); // r8 r9 rA rB rC rD rE rF
+ simdscalari inlo = _simd_castps_si(_simd16_extract_ps(in, 0)); // r0 r1 r2 r3 r4 r5 r6 r7 (32b)
+ simdscalari inhi = _simd_castps_si(_simd16_extract_ps(in, 1)); // r8 r9 rA rB rC rD rE rF
- simdscalari permlo =
- _simd_permute2f128_si(inlo, inhi, 0x20); // r0 r1 r2 r3 r8 r9 rA rB (32b)
- simdscalari permhi =
- _simd_permute2f128_si(inlo, inhi, 0x31); // r4 r5 r6 r7 rC rD rE rF (32b)
+ simdscalari permlo = _simd_permute2f128_si(inlo, inhi, 0x20); // r0 r1 r2 r3 r8 r9 rA rB (32b)
+ simdscalari permhi = _simd_permute2f128_si(inlo, inhi, 0x31); // r4 r5 r6 r7 rC rD rE rF (32b)
- simdscalari pack = _simd_packus_epi32(
- permlo, permhi); // r0 r1 r2 r3 r4 r5 r6 r7 r8 r9 rA rB rC rD rE rF (16b)
+ simdscalari pack = _simd_packus_epi32(permlo, permhi); // r0 r1 r2 r3 r4 r5 r6 r7 r8 r9 rA rB rC rD rE rF (16b)
const simdscalari zero = _simd_setzero_si();
- permlo = _simd_permute2f128_si(
- pack,
- zero,
- 0x20); // (2, 0) // r0 r1 r2 r3 r4 r5 r6 r7 00 00 00 00 00 00 00 00 (16b)
- permhi = _simd_permute2f128_si(
- pack,
- zero,
- 0x31); // (3, 1) // r8 r9 rA rB rC rD rE rF 00 00 00 00 00 00 00 00 (16b)
+ permlo = _simd_permute2f128_si(pack, zero, 0x20); // (2, 0) // r0 r1 r2 r3 r4 r5 r6 r7 00 00 00 00 00 00 00 00 (16b)
+ permhi = _simd_permute2f128_si(pack, zero, 0x31); // (3, 1) // r8 r9 rA rB rC rD rE rF 00 00 00 00 00 00 00 00 (16b)
- pack = _simd_packus_epi16(permlo,
- permhi); // r0 r1 r2 r3 r4 r5 r6 r7 r8 r9 rA rB rC rD rE rF 00 00
- // 00 00 00 00 00 00 00 00 00 00 00 00 00 00 (8b)
+ pack = _simd_packus_epi16(permlo, permhi); // r0 r1 r2 r3 r4 r5 r6 r7 r8 r9 rA rB rC rD rE rF 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 (8b)
result = _simd16_insert_si(result, pack, 0);
return _simd16_castsi_ps(result);
+
+ // clang-format on
}
-#endif
};
//////////////////////////////////////////////////////////////////////////
#error Unsupported vector width
#endif
}
-#if ENABLE_AVX512_SIMD16
static simd16scalar loadSOA_16(const uint8_t* pSrc)
{
static simd16scalar pack(simd16scalar& in)
{
+ // clang-format off
+
simd16scalari result = _simd16_setzero_si();
- simdscalari inlo =
- _simd_castps_si(_simd16_extract_ps(in, 0)); // r0 r1 r2 r3 r4 r5 r6 r7 (32b)
- simdscalari inhi = _simd_castps_si(_simd16_extract_ps(in, 1)); // r8 r9 rA rB rC rD rE rF
+ simdscalari inlo = _simd_castps_si(_simd16_extract_ps(in, 0)); // r0 r1 r2 r3 r4 r5 r6 r7 (32b)
+ simdscalari inhi = _simd_castps_si(_simd16_extract_ps(in, 1)); // r8 r9 rA rB rC rD rE rF
- simdscalari permlo =
- _simd_permute2f128_si(inlo, inhi, 0x20); // r0 r1 r2 r3 r8 r9 rA rB (32b)
- simdscalari permhi =
- _simd_permute2f128_si(inlo, inhi, 0x31); // r4 r5 r6 r7 rC rD rE rF (32b)
+ simdscalari permlo = _simd_permute2f128_si(inlo, inhi, 0x20); // r0 r1 r2 r3 r8 r9 rA rB (32b)
+ simdscalari permhi = _simd_permute2f128_si(inlo, inhi, 0x31); // r4 r5 r6 r7 rC rD rE rF (32b)
- simdscalari pack = _simd_packs_epi32(
- permlo, permhi); // r0 r1 r2 r3 r4 r5 r6 r7 r8 r9 rA rB rC rD rE rF (16b)
+ simdscalari pack = _simd_packs_epi32(permlo, permhi); // r0 r1 r2 r3 r4 r5 r6 r7 r8 r9 rA rB rC rD rE rF (16b)
const simdscalari zero = _simd_setzero_si();
- permlo = _simd_permute2f128_si(
- pack,
- zero,
- 0x20); // (2, 0) // r0 r1 r2 r3 r4 r5 r6 r7 00 00 00 00 00 00 00 00 (16b)
- permhi = _simd_permute2f128_si(
- pack,
- zero,
- 0x31); // (3, 1) // r8 r9 rA rB rC rD rE rF 00 00 00 00 00 00 00 00 (16b)
+ permlo = _simd_permute2f128_si(pack, zero, 0x20); // (2, 0) // r0 r1 r2 r3 r4 r5 r6 r7 00 00 00 00 00 00 00 00 (16b)
+ permhi = _simd_permute2f128_si(pack, zero, 0x31); // (3, 1) // r8 r9 rA rB rC rD rE rF 00 00 00 00 00 00 00 00 (16b)
- pack =
- _simd_packs_epi16(permlo, permhi); // r0 r1 r2 r3 r4 r5 r6 r7 r8 r9 rA rB rC rD rE rF 00
- // 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 (8b)
+ pack = _simd_packs_epi16(permlo, permhi); // r0 r1 r2 r3 r4 r5 r6 r7 r8 r9 rA rB rC rD rE rF 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 (8b)
result = _simd16_insert_si(result, pack, 0);
return _simd16_castsi_ps(result);
+
+ // clang-format on
}
-#endif
};
//////////////////////////////////////////////////////////////////////////
#error Unsupported vector width
#endif
}
-#if ENABLE_AVX512_SIMD16
static simd16scalar loadSOA_16(const uint8_t* pSrc)
{
static simd16scalar pack(simd16scalar& in)
{
+ // clang-format off
+
const simd16scalari zero = _simd16_setzero_si();
- simd16scalari permlo = _simd16_permute2f128_si(
- _simd16_castps_si(in),
- zero,
- 0x08); // (0, 0, 2, 0) // r0 r1 r2 r3 r8 r9 rA rB 00 00 00 00 00 00 00 00 (32b)
- simd16scalari permhi = _simd16_permute2f128_si(
- _simd16_castps_si(in),
- zero,
- 0x0D); // (0, 0, 3, 1) // r4 r5 r6 r7 rC rD rE rF 00 00 00 00 00 00 00 00
+ simd16scalari permlo = _simd16_permute2f128_si(_simd16_castps_si(in), zero, 0x08); // (0, 0, 2, 0) // r0 r1 r2 r3 r8 r9 rA rB 00 00 00 00 00 00 00 00 (32b)
+ simd16scalari permhi = _simd16_permute2f128_si(_simd16_castps_si(in), zero, 0x0D); // (0, 0, 3, 1) // r4 r5 r6 r7 rC rD rE rF 00 00 00 00 00 00 00 00
- simd16scalari result = _simd16_packus_epi32(
- permlo, permhi); // r0 r1 r2 r3 r4 r5 r6 r7 r8 r9 rA rB rC rD rE rF 00 00 00 00 00 00 00
- // 00 00 00 00 00 00 00 00 00 (16b)
+ simd16scalari result = _simd16_packus_epi32(permlo, permhi); // r0 r1 r2 r3 r4 r5 r6 r7 r8 r9 rA rB rC rD rE rF 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 (16b)
return _simd16_castsi_ps(result);
+
+ // clang-format on
}
-#endif
};
//////////////////////////////////////////////////////////////////////////
#error Unsupported vector width
#endif
}
-#if ENABLE_AVX512_SIMD16
static simd16scalar loadSOA_16(const uint8_t* pSrc)
{
static simd16scalar pack(simd16scalar& in)
{
+ // clang-format off
+
const simd16scalari zero = _simd16_setzero_si();
- simd16scalari permlo = _simd16_permute2f128_si(
- _simd16_castps_si(in),
- zero,
- 0x08); // (0, 0, 2, 0) // r0 r1 r2 r3 r8 r9 rA rB 00 00 00 00 00 00 00 00 (32b)
- simd16scalari permhi = _simd16_permute2f128_si(
- _simd16_castps_si(in),
- zero,
- 0x0D); // (0, 0, 3, 1) // r4 r5 r6 r7 rC rD rE rF 00 00 00 00 00 00 00 00
+ simd16scalari permlo = _simd16_permute2f128_si(_simd16_castps_si(in), zero, 0x08); // (0, 0, 2, 0) // r0 r1 r2 r3 r8 r9 rA rB 00 00 00 00 00 00 00 00 (32b)
+ simd16scalari permhi = _simd16_permute2f128_si(_simd16_castps_si(in), zero, 0x0D); // (0, 0, 3, 1) // r4 r5 r6 r7 rC rD rE rF 00 00 00 00 00 00 00 00
- simd16scalari result = _simd16_packs_epi32(
- permlo, permhi); // r0 r1 r2 r3 r4 r5 r6 r7 r8 r9 rA rB rC rD rE rF 00 00 00 00 00 00 00
- // 00 00 00 00 00 00 00 00 00 (16b)
+ simd16scalari result = _simd16_packs_epi32(permlo, permhi); // r0 r1 r2 r3 r4 r5 r6 r7 r8 r9 rA rB rC rD rE rF 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 (16b)
return _simd16_castsi_ps(result);
+
+ // clang-format on
}
-#endif
};
//////////////////////////////////////////////////////////////////////////
}
static simdscalar unpack(simdscalar& in) { return in; }
static simdscalar pack(simdscalar& in) { return in; }
-#if ENABLE_AVX512_SIMD16
static simd16scalar loadSOA_16(const uint8_t* pSrc)
{
static simd16scalar unpack(simd16scalar& in) { return in; }
static simd16scalar pack(simd16scalar& in) { return in; }
-#endif
};
//////////////////////////////////////////////////////////////////////////
return Result;
}
-#if ENABLE_AVX512_SIMD16
template <unsigned expnum, unsigned expden, unsigned coeffnum, unsigned coeffden>
inline static simd16scalar SIMDCALL fastpow(simd16scalar const& value)
{
// only native AVX512 can directly use the computed mask for the blend operation
result = _mm512_mask_blend_ps(mask, result2, result);
#else
- result = _simd16_blendv_ps(
+ result = _simd16_blendv_ps(
result2, result, _simd16_cmplt_ps(value, _simd16_set1_ps(0.0031308f)));
#endif
}
return result;
}
-#endif
//////////////////////////////////////////////////////////////////////////
/// TypeTraits - Format type traits specialization for FLOAT16
//////////////////////////////////////////////////////////////////////////
SWR_NOT_IMPL; // @todo
return _simd_setzero_ps();
}
-#if ENABLE_AVX512_SIMD16
static simd16scalar pack(const simd16scalar& in)
{
SWR_NOT_IMPL; // @todo
return _simd16_setzero_ps();
}
-#endif
};
//////////////////////////////////////////////////////////////////////////
#endif
return in;
}
-#if ENABLE_AVX512_SIMD16
static inline simd16scalar convertSrgb(simd16scalar& in) { return ConvertFloatToSRGB2(in); }
-#endif
};
//////////////////////////////////////////////////////////////////////////
return TypeTraits<X, NumBitsX>::fromFloat();
}
- INLINE static simdscalar loadSOA(uint32_t comp, const uint8_t* pSrc)
+ INLINE static void loadSOA(uint32_t comp, const uint8_t* pSrc, simdscalar& dst)
{
switch (comp)
{
case 0:
- return TypeTraits<X, NumBitsX>::loadSOA(pSrc);
+ dst = TypeTraits<X, NumBitsX>::loadSOA(pSrc);
+ return;
case 1:
- return TypeTraits<Y, NumBitsY>::loadSOA(pSrc);
+ dst = TypeTraits<Y, NumBitsY>::loadSOA(pSrc);
+ return;
case 2:
- return TypeTraits<Z, NumBitsZ>::loadSOA(pSrc);
+ dst = TypeTraits<Z, NumBitsZ>::loadSOA(pSrc);
+ return;
case 3:
- return TypeTraits<W, NumBitsW>::loadSOA(pSrc);
+ dst = TypeTraits<W, NumBitsW>::loadSOA(pSrc);
+ return;
}
SWR_INVALID("Invalid component: %d", comp);
- return TypeTraits<X, NumBitsX>::loadSOA(pSrc);
+ dst = TypeTraits<X, NumBitsX>::loadSOA(pSrc);
}
INLINE static void storeSOA(uint32_t comp, uint8_t* pDst, simdscalar const& src)
SWR_INVALID("Invalid component: %d", comp);
return TypeTraits<X, NumBitsX>::convertSrgb(in);
}
-#if ENABLE_AVX512_SIMD16
- INLINE static simd16scalar loadSOA_16(uint32_t comp, const uint8_t* pSrc)
+ INLINE static void SIMDCALL loadSOA(uint32_t comp, const uint8_t* pSrc, simd16scalar& dst)
{
switch (comp)
{
case 0:
- return TypeTraits<X, NumBitsX>::loadSOA_16(pSrc);
+ dst = TypeTraits<X, NumBitsX>::loadSOA_16(pSrc);
+ return;
case 1:
- return TypeTraits<Y, NumBitsY>::loadSOA_16(pSrc);
+ dst = TypeTraits<Y, NumBitsY>::loadSOA_16(pSrc);
+ return;
case 2:
- return TypeTraits<Z, NumBitsZ>::loadSOA_16(pSrc);
+ dst = TypeTraits<Z, NumBitsZ>::loadSOA_16(pSrc);
+ return;
case 3:
- return TypeTraits<W, NumBitsW>::loadSOA_16(pSrc);
+ dst = TypeTraits<W, NumBitsW>::loadSOA_16(pSrc);
+ return;
}
SWR_INVALID("Invalid component: %d", comp);
- return TypeTraits<X, NumBitsX>::loadSOA_16(pSrc);
+ dst = TypeTraits<X, NumBitsX>::loadSOA_16(pSrc);
}
INLINE static void SIMDCALL storeSOA(uint32_t comp, uint8_t* pDst, simd16scalar const& src)
SWR_INVALID("Invalid component: %d", comp);
return TypeTraits<X, NumBitsX>::convertSrgb(in);
}
-#endif
};
vDst[7] = _simd_extractf128_ps(r02r1xhihi, 1);
}
-#if ENABLE_AVX512_SIMD16
INLINE
void vTranspose4x16(simd16scalar (&dst)[4],
const simd16scalar& src0,
const simd16scalar& src3)
{
const simd16scalari perm =
- _simd16_set_epi32(15,
- 11,
- 7,
- 3,
- 14,
- 10,
- 6,
- 2,
- 13,
- 9,
- 5,
- 1,
- 12,
- 8,
- 4,
- 0); // pre-permute input to setup the right order after all the unpacking
+ _simd16_set_epi32(15, 11, 7, 3, 14, 10, 6, 2, 13, 9, 5, 1, 12, 8, 4, 0);
+
+ // pre-permute input to setup the right order after all the unpacking
simd16scalar pre0 = _simd16_permute_ps(src0, perm); // r
simd16scalar pre1 = _simd16_permute_ps(src1, perm); // g
dst[3] = _simd16_unpackhi_ps(rbhi, gahi);
}
-#endif
INLINE
void vTranspose8x8(simdscalar (&vDst)[8],
const simdscalar& vMask0,
{
memcpy(pDst, pSrc, (bpp * KNOB_SIMD_WIDTH) / 8);
}
-#if ENABLE_AVX512_SIMD16
- INLINE static void Transpose_16(const uint8_t* pSrc, uint8_t* pDst)
+ INLINE static void Transpose_simd16(const uint8_t* pSrc, uint8_t* pDst)
{
memcpy(pDst, pSrc, (bpp * KNOB_SIMD16_WIDTH) / 8);
}
-#endif
};
//////////////////////////////////////////////////////////////////////////
#error Unsupported vector width
#endif
}
-#if ENABLE_AVX512_SIMD16
- INLINE static void Transpose_16(const uint8_t* pSrc, uint8_t* pDst)
+ INLINE static void Transpose_simd16(const uint8_t* pSrc, uint8_t* pDst)
{
- simd4scalari src0 =
- SIMD128::load_si(reinterpret_cast<const simd4scalari*>(pSrc)); // rrrrrrrrrrrrrrrr
- simd4scalari src1 =
- SIMD128::load_si(reinterpret_cast<const simd4scalari*>(pSrc) + 1); // gggggggggggggggg
- simd4scalari src2 =
- SIMD128::load_si(reinterpret_cast<const simd4scalari*>(pSrc) + 2); // bbbbbbbbbbbbbbbb
- simd4scalari src3 =
- SIMD128::load_si(reinterpret_cast<const simd4scalari*>(pSrc) + 3); // aaaaaaaaaaaaaaaa
+#if KNOB_SIMD16_WIDTH == 16
+ // clang-format off
+
+ simd4scalari src0 = SIMD128::load_si(reinterpret_cast<const simd4scalari*>(pSrc)); // rrrrrrrrrrrrrrrr
+ simd4scalari src1 = SIMD128::load_si(reinterpret_cast<const simd4scalari*>(pSrc) + 1); // gggggggggggggggg
+ simd4scalari src2 = SIMD128::load_si(reinterpret_cast<const simd4scalari*>(pSrc) + 2); // bbbbbbbbbbbbbbbb
+ simd4scalari src3 = SIMD128::load_si(reinterpret_cast<const simd4scalari*>(pSrc) + 3); // aaaaaaaaaaaaaaaa
simd16scalari cvt0 = _simd16_cvtepu8_epi32(src0);
simd16scalari cvt1 = _simd16_cvtepu8_epi32(src1);
simd16scalari cvt2 = _simd16_cvtepu8_epi32(src2);
simd16scalari cvt3 = _simd16_cvtepu8_epi32(src3);
- simd16scalari shl1 = _simd16_slli_epi32(cvt1, 8);
+ simd16scalari shl1 = _simd16_slli_epi32(cvt1, 8);
simd16scalari shl2 = _simd16_slli_epi32(cvt2, 16);
simd16scalari shl3 = _simd16_slli_epi32(cvt3, 24);
simd16scalari dst = _simd16_or_si(_simd16_or_si(cvt0, shl1), _simd16_or_si(shl2, shl3));
- _simd16_store_si(reinterpret_cast<simd16scalari*>(pDst),
- dst); // rgbargbargbargbargbargbargbargbargbargbargbargbargbargbargbargba
- }
+ _simd16_store_si(reinterpret_cast<simd16scalari*>(pDst), dst); // rgbargbargbargbargbargbargbargbargbargbargbargbargbargbargbargba
+
+ // clang-format on
+#else
+#error Unsupported vector width
#endif
+ }
};
//////////////////////////////////////////////////////////////////////////
/// @param pSrc - source data in SOA form
/// @param pDst - output data in AOS form
INLINE static void Transpose(const uint8_t* pSrc, uint8_t* pDst) = delete;
-#if ENABLE_AVX512_SIMD16
-
- INLINE static void Transpose_16(const uint8_t* pSrc, uint8_t* pDst) = delete;
-#endif
+ INLINE static void Transpose_simd16(const uint8_t* pSrc, uint8_t* pDst) = delete;
};
//////////////////////////////////////////////////////////////////////////
#error Unsupported vector width
#endif
}
-#if ENABLE_AVX512_SIMD16
- INLINE static void Transpose_16(const uint8_t* pSrc, uint8_t* pDst)
+ INLINE static void Transpose_simd16(const uint8_t* pSrc, uint8_t* pDst)
{
- simd4scalari src0 =
- SIMD128::load_si(reinterpret_cast<const simd4scalari*>(pSrc)); // rrrrrrrrrrrrrrrr
- simd4scalari src1 =
- SIMD128::load_si(reinterpret_cast<const simd4scalari*>(pSrc) + 1); // gggggggggggggggg
+#if KNOB_SIMD16_WIDTH == 16
+ // clang-format off
+
+ simd4scalari src0 = SIMD128::load_si(reinterpret_cast<const simd4scalari*>(pSrc)); // rrrrrrrrrrrrrrrr
+ simd4scalari src1 = SIMD128::load_si(reinterpret_cast<const simd4scalari*>(pSrc) + 1); // gggggggggggggggg
simdscalari cvt0 = _simd_cvtepu8_epi16(src0);
simdscalari cvt1 = _simd_cvtepu8_epi16(src1);
simdscalari dst = _simd_or_si(cvt0, shl1);
- _simd_store_si(reinterpret_cast<simdscalari*>(pDst),
- dst); // rgrgrgrgrgrgrgrgrgrgrgrgrgrgrgrg
- }
+ _simd_store_si(reinterpret_cast<simdscalari*>(pDst), dst); // rgrgrgrgrgrgrgrgrgrgrgrgrgrgrgrg
+
+ // clang-format on
+#else
+#error Unsupported vector width
#endif
+ }
};
//////////////////////////////////////////////////////////////////////////
#error Unsupported vector width
#endif
}
-#if ENABLE_AVX512_SIMD16
- INLINE static void Transpose_16(const uint8_t* pSrc, uint8_t* pDst)
+ INLINE static void Transpose_simd16(const uint8_t* pSrc, uint8_t* pDst)
{
+#if KNOB_SIMD16_WIDTH == 16
+ // clang-format off
+
simd16scalar src0 = _simd16_load_ps(reinterpret_cast<const float*>(pSrc));
simd16scalar src1 = _simd16_load_ps(reinterpret_cast<const float*>(pSrc) + 16);
simd16scalar src2 = _simd16_load_ps(reinterpret_cast<const float*>(pSrc) + 32);
vTranspose4x16(dst, src0, src1, src2, src3);
- _simd16_store_ps(reinterpret_cast<float*>(pDst) + 0, dst[0]);
+ _simd16_store_ps(reinterpret_cast<float*>(pDst) + 0, dst[0]);
_simd16_store_ps(reinterpret_cast<float*>(pDst) + 16, dst[1]);
_simd16_store_ps(reinterpret_cast<float*>(pDst) + 32, dst[2]);
_simd16_store_ps(reinterpret_cast<float*>(pDst) + 48, dst[3]);
- }
+
+ // clang-format on
+#else
+#error Unsupported vector width
#endif
+ }
};
//////////////////////////////////////////////////////////////////////////
#error Unsupported vector width
#endif
}
-#if ENABLE_AVX512_SIMD16
- INLINE static void Transpose_16(const uint8_t* pSrc, uint8_t* pDst)
+ INLINE static void Transpose_simd16(const uint8_t* pSrc, uint8_t* pDst)
{
+#if KNOB_SIMD16_WIDTH == 16
+ // clang-format off
+
simd16scalar src0 = _simd16_load_ps(reinterpret_cast<const float*>(pSrc));
simd16scalar src1 = _simd16_load_ps(reinterpret_cast<const float*>(pSrc) + 16);
simd16scalar src2 = _simd16_load_ps(reinterpret_cast<const float*>(pSrc) + 32);
vTranspose4x16(dst, src0, src1, src2, src3);
- _simd16_store_ps(reinterpret_cast<float*>(pDst) + 0, dst[0]);
+ _simd16_store_ps(reinterpret_cast<float*>(pDst) + 0, dst[0]);
_simd16_store_ps(reinterpret_cast<float*>(pDst) + 16, dst[1]);
_simd16_store_ps(reinterpret_cast<float*>(pDst) + 32, dst[2]);
_simd16_store_ps(reinterpret_cast<float*>(pDst) + 48, dst[3]);
- }
+
+ // clang-format on
+#else
+#error Unsupported vector width
#endif
+ }
};
//////////////////////////////////////////////////////////////////////////
#error Unsupported vector width
#endif
}
-#if ENABLE_AVX512_SIMD16
- INLINE static void Transpose_16(const uint8_t* pSrc, uint8_t* pDst)
+ INLINE static void Transpose_simd16(const uint8_t* pSrc, uint8_t* pDst)
{
- simd16scalar src0 =
- _simd16_load_ps(reinterpret_cast<const float*>(pSrc)); // rrrrrrrrrrrrrrrr
- simd16scalar src1 =
- _simd16_load_ps(reinterpret_cast<const float*>(pSrc) + 16); // gggggggggggggggg
-
- simd16scalar tmp0 =
- _simd16_unpacklo_ps(src0, src1); // r0 g0 r1 g1 r4 g4 r5 g5 r8 g8 r9 g9 rC gC rD gD
- simd16scalar tmp1 =
- _simd16_unpackhi_ps(src0, src1); // r2 g2 r3 g3 r6 g6 r7 g7 rA gA rB gB rE gE rF gF
-
- simd16scalar per0 = _simd16_permute2f128_ps(
- tmp0,
- tmp1,
- 0x44); // (1, 0, 1, 0) // r0 g0 r1 g1 r4 g4 r5 g5 r2 g2 r3 g3 r6 g6 r7 g7
- simd16scalar per1 = _simd16_permute2f128_ps(
- tmp0,
- tmp1,
- 0xEE); // (3, 2, 3, 2) // r8 g8 r9 g9 rC gC rD gD rA gA rB gB rE gE rF gF
-
- simd16scalar dst0 = _simd16_permute2f128_ps(
- per0,
- per0,
- 0xD8); // (3, 1, 2, 0) // r0 g0 r1 g1 r2 g2 r3 g3 r4 g4 r5 g5 r6 g6 r7 g7
- simd16scalar dst1 = _simd16_permute2f128_ps(
- per1,
- per1,
- 0xD8); // (3, 1, 2, 0) // r8 g8 r9 g9 rA gA rB gB rC gC rD gD rE gE rF gF
-
- _simd16_store_ps(reinterpret_cast<float*>(pDst) + 0, dst0); // rgrgrgrgrgrgrgrg
- _simd16_store_ps(reinterpret_cast<float*>(pDst) + 16, dst1); // rgrgrgrgrgrgrgrg
- }
+#if KNOB_SIMD16_WIDTH == 16
+ // clang-format off
+
+ simd16scalar src0 = _simd16_load_ps(reinterpret_cast<const float*>(pSrc)); // rrrrrrrrrrrrrrrr
+ simd16scalar src1 = _simd16_load_ps(reinterpret_cast<const float*>(pSrc) + 16); // gggggggggggggggg
+
+ simd16scalar tmp0 = _simd16_unpacklo_ps(src0, src1); // r0 g0 r1 g1 r4 g4 r5 g5 r8 g8 r9 g9 rC gC rD gD
+ simd16scalar tmp1 = _simd16_unpackhi_ps(src0, src1); // r2 g2 r3 g3 r6 g6 r7 g7 rA gA rB gB rE gE rF gF
+
+ simd16scalar per0 = _simd16_permute2f128_ps(tmp0, tmp1, 0x44); // (1, 0, 1, 0) // r0 g0 r1 g1 r4 g4 r5 g5 r2 g2 r3 g3 r6 g6 r7 g7
+ simd16scalar per1 = _simd16_permute2f128_ps(tmp0, tmp1, 0xEE); // (3, 2, 3, 2) // r8 g8 r9 g9 rC gC rD gD rA gA rB gB rE gE rF gF
+
+ simd16scalar dst0 = _simd16_permute2f128_ps(per0, per0, 0xD8); // (3, 1, 2, 0) // r0 g0 r1 g1 r2 g2 r3 g3 r4 g4 r5 g5 r6 g6 r7 g7
+ simd16scalar dst1 = _simd16_permute2f128_ps(per1, per1, 0xD8); // (3, 1, 2, 0) // r8 g8 r9 g9 rA gA rB gB rC gC rD gD rE gE rF gF
+
+ _simd16_store_ps(reinterpret_cast<float*>(pDst) + 0, dst0); // rgrgrgrgrgrgrgrg
+ _simd16_store_ps(reinterpret_cast<float*>(pDst) + 16, dst1); // rgrgrgrgrgrgrgrg
+
+ // clang-format on
+#else
+#error Unsupported vector width
#endif
+ }
};
//////////////////////////////////////////////////////////////////////////
#error Unsupported vector width
#endif
}
-#if ENABLE_AVX512_SIMD16
- INLINE static void Transpose_16(const uint8_t* pSrc, uint8_t* pDst)
+ INLINE static void Transpose_simd16(const uint8_t* pSrc, uint8_t* pDst)
{
- simdscalari src0 =
- _simd_load_si(reinterpret_cast<const simdscalari*>(pSrc)); // rrrrrrrrrrrrrrrr
- simdscalari src1 =
- _simd_load_si(reinterpret_cast<const simdscalari*>(pSrc) + 1); // gggggggggggggggg
- simdscalari src2 =
- _simd_load_si(reinterpret_cast<const simdscalari*>(pSrc) + 2); // bbbbbbbbbbbbbbbb
- simdscalari src3 =
- _simd_load_si(reinterpret_cast<const simdscalari*>(pSrc) + 3); // aaaaaaaaaaaaaaaa
-
- simdscalari pre0 = _simd_unpacklo_epi16(src0, src1); // rg0 rg1 rg2 rg3 rg8 rg9 rgA rgB
- simdscalari pre1 = _simd_unpackhi_epi16(src0, src1); // rg4 rg5 rg6 rg7 rgC rgD rgE rgF
- simdscalari pre2 = _simd_unpacklo_epi16(src2, src3); // ba0 ba1 ba3 ba3 ba8 ba9 baA baB
- simdscalari pre3 = _simd_unpackhi_epi16(src2, src3); // ba4 ba5 ba6 ba7 baC baD baE baF
-
- simdscalari tmp0 = _simd_unpacklo_epi32(pre0, pre2); // rbga0 rbga1 rbga8 rbga9
- simdscalari tmp1 = _simd_unpackhi_epi32(pre0, pre2); // rbga2 rbga3 rbgaA rbgaB
- simdscalari tmp2 = _simd_unpacklo_epi32(pre1, pre3); // rbga4 rbga5 rgbaC rbgaD
- simdscalari tmp3 = _simd_unpackhi_epi32(pre1, pre3); // rbga6 rbga7 rbgaE rbgaF
-
- simdscalari dst0 = _simd_permute2f128_si(
- tmp0, tmp1, 0x20); // (2, 0) // rbga0 rbga1 rbga2 rbga3
- simdscalari dst1 = _simd_permute2f128_si(
- tmp2, tmp3, 0x20); // (2, 0) // rbga4 rbga5 rbga6 rbga7
- simdscalari dst2 = _simd_permute2f128_si(
- tmp0, tmp1, 0x31); // (3, 1) // rbga8 rbga9 rbgaA rbgaB
- simdscalari dst3 = _simd_permute2f128_si(
- tmp2, tmp3, 0x31); // (3, 1) // rbgaC rbgaD rbgaE rbgaF
-
- _simd_store_si(reinterpret_cast<simdscalari*>(pDst) + 0, dst0); // rgbargbargbargba
- _simd_store_si(reinterpret_cast<simdscalari*>(pDst) + 1, dst1); // rgbargbargbargba
- _simd_store_si(reinterpret_cast<simdscalari*>(pDst) + 2, dst2); // rgbargbargbargba
- _simd_store_si(reinterpret_cast<simdscalari*>(pDst) + 3, dst3); // rgbargbargbargba
- }
+#if KNOB_SIMD16_WIDTH == 16
+ // clang-format off
+
+ simdscalari src0 = _simd_load_si(reinterpret_cast<const simdscalari*>(pSrc)); // rrrrrrrrrrrrrrrr
+ simdscalari src1 = _simd_load_si(reinterpret_cast<const simdscalari*>(pSrc) + 1); // gggggggggggggggg
+ simdscalari src2 = _simd_load_si(reinterpret_cast<const simdscalari*>(pSrc) + 2); // bbbbbbbbbbbbbbbb
+ simdscalari src3 = _simd_load_si(reinterpret_cast<const simdscalari*>(pSrc) + 3); // aaaaaaaaaaaaaaaa
+
+ simdscalari pre0 = _simd_unpacklo_epi16(src0, src1); // rg0 rg1 rg2 rg3 rg8 rg9 rgA rgB
+ simdscalari pre1 = _simd_unpackhi_epi16(src0, src1); // rg4 rg5 rg6 rg7 rgC rgD rgE rgF
+ simdscalari pre2 = _simd_unpacklo_epi16(src2, src3); // ba0 ba1 ba3 ba3 ba8 ba9 baA baB
+ simdscalari pre3 = _simd_unpackhi_epi16(src2, src3); // ba4 ba5 ba6 ba7 baC baD baE baF
+
+ simdscalari tmp0 = _simd_unpacklo_epi32(pre0, pre2); // rbga0 rbga1 rbga8 rbga9
+ simdscalari tmp1 = _simd_unpackhi_epi32(pre0, pre2); // rbga2 rbga3 rbgaA rbgaB
+ simdscalari tmp2 = _simd_unpacklo_epi32(pre1, pre3); // rbga4 rbga5 rgbaC rbgaD
+ simdscalari tmp3 = _simd_unpackhi_epi32(pre1, pre3); // rbga6 rbga7 rbgaE rbgaF
+
+ simdscalari dst0 = _simd_permute2f128_si(tmp0, tmp1, 0x20); // (2, 0) // rbga0 rbga1 rbga2 rbga3
+ simdscalari dst1 = _simd_permute2f128_si(tmp2, tmp3, 0x20); // (2, 0) // rbga4 rbga5 rbga6 rbga7
+ simdscalari dst2 = _simd_permute2f128_si(tmp0, tmp1, 0x31); // (3, 1) // rbga8 rbga9 rbgaA rbgaB
+ simdscalari dst3 = _simd_permute2f128_si(tmp2, tmp3, 0x31); // (3, 1) // rbgaC rbgaD rbgaE rbgaF
+
+ _simd_store_si(reinterpret_cast<simdscalari*>(pDst) + 0, dst0); // rgbargbargbargba
+ _simd_store_si(reinterpret_cast<simdscalari*>(pDst) + 1, dst1); // rgbargbargbargba
+ _simd_store_si(reinterpret_cast<simdscalari*>(pDst) + 2, dst2); // rgbargbargbargba
+ _simd_store_si(reinterpret_cast<simdscalari*>(pDst) + 3, dst3); // rgbargbargbargba
+
+ // clang-format on
+#else
+#error Unsupported vector width
#endif
+ }
};
//////////////////////////////////////////////////////////////////////////
#error Unsupported vector width
#endif
}
-#if ENABLE_AVX512_SIMD16
- INLINE static void Transpose_16(const uint8_t* pSrc, uint8_t* pDst)
+ INLINE static void Transpose_simd16(const uint8_t* pSrc, uint8_t* pDst)
{
- simdscalari src0 =
- _simd_load_si(reinterpret_cast<const simdscalari*>(pSrc)); // rrrrrrrrrrrrrrrr
- simdscalari src1 =
- _simd_load_si(reinterpret_cast<const simdscalari*>(pSrc) + 1); // gggggggggggggggg
- simdscalari src2 =
- _simd_load_si(reinterpret_cast<const simdscalari*>(pSrc) + 2); // bbbbbbbbbbbbbbbb
- simdscalari src3 = _simd_setzero_si(); // aaaaaaaaaaaaaaaa
-
- simdscalari pre0 = _simd_unpacklo_epi16(src0, src1); // rg0 rg1 rg2 rg3 rg8 rg9 rgA rgB
- simdscalari pre1 = _simd_unpackhi_epi16(src0, src1); // rg4 rg5 rg6 rg7 rgC rgD rgE rgF
- simdscalari pre2 = _simd_unpacklo_epi16(src2, src3); // ba0 ba1 ba3 ba3 ba8 ba9 baA baB
- simdscalari pre3 = _simd_unpackhi_epi16(src2, src3); // ba4 ba5 ba6 ba7 baC baD baE baF
-
- simdscalari tmp0 = _simd_unpacklo_epi32(pre0, pre2); // rbga0 rbga1 rbga8 rbga9
- simdscalari tmp1 = _simd_unpackhi_epi32(pre0, pre2); // rbga2 rbga3 rbgaA rbgaB
- simdscalari tmp2 = _simd_unpacklo_epi32(pre1, pre3); // rbga4 rbga5 rgbaC rbgaD
- simdscalari tmp3 = _simd_unpackhi_epi32(pre1, pre3); // rbga6 rbga7 rbgaE rbgaF
-
- simdscalari dst0 = _simd_permute2f128_si(
- tmp0, tmp1, 0x20); // (2, 0) // rbga0 rbga1 rbga2 rbga3
- simdscalari dst1 = _simd_permute2f128_si(
- tmp2, tmp3, 0x20); // (2, 0) // rbga4 rbga5 rbga6 rbga7
- simdscalari dst2 = _simd_permute2f128_si(
- tmp0, tmp1, 0x31); // (3, 1) // rbga8 rbga9 rbgaA rbgaB
- simdscalari dst3 = _simd_permute2f128_si(
- tmp2, tmp3, 0x31); // (3, 1) // rbgaC rbgaD rbgaE rbgaF
-
- _simd_store_si(reinterpret_cast<simdscalari*>(pDst) + 0, dst0); // rgbargbargbargba
- _simd_store_si(reinterpret_cast<simdscalari*>(pDst) + 1, dst1); // rgbargbargbargba
- _simd_store_si(reinterpret_cast<simdscalari*>(pDst) + 2, dst2); // rgbargbargbargba
- _simd_store_si(reinterpret_cast<simdscalari*>(pDst) + 3, dst3); // rgbargbargbargba
- }
+#if KNOB_SIMD16_WIDTH == 16
+ // clang-format off
+
+ simdscalari src0 = _simd_load_si(reinterpret_cast<const simdscalari*>(pSrc)); // rrrrrrrrrrrrrrrr
+ simdscalari src1 = _simd_load_si(reinterpret_cast<const simdscalari*>(pSrc) + 1); // gggggggggggggggg
+ simdscalari src2 = _simd_load_si(reinterpret_cast<const simdscalari*>(pSrc) + 2); // bbbbbbbbbbbbbbbb
+ simdscalari src3 = _simd_setzero_si(); // aaaaaaaaaaaaaaaa
+
+ simdscalari pre0 = _simd_unpacklo_epi16(src0, src1); // rg0 rg1 rg2 rg3 rg8 rg9 rgA rgB
+ simdscalari pre1 = _simd_unpackhi_epi16(src0, src1); // rg4 rg5 rg6 rg7 rgC rgD rgE rgF
+ simdscalari pre2 = _simd_unpacklo_epi16(src2, src3); // ba0 ba1 ba3 ba3 ba8 ba9 baA baB
+ simdscalari pre3 = _simd_unpackhi_epi16(src2, src3); // ba4 ba5 ba6 ba7 baC baD baE baF
+
+ simdscalari tmp0 = _simd_unpacklo_epi32(pre0, pre2); // rbga0 rbga1 rbga8 rbga9
+ simdscalari tmp1 = _simd_unpackhi_epi32(pre0, pre2); // rbga2 rbga3 rbgaA rbgaB
+ simdscalari tmp2 = _simd_unpacklo_epi32(pre1, pre3); // rbga4 rbga5 rgbaC rbgaD
+ simdscalari tmp3 = _simd_unpackhi_epi32(pre1, pre3); // rbga6 rbga7 rbgaE rbgaF
+
+ simdscalari dst0 = _simd_permute2f128_si(tmp0, tmp1, 0x20); // (2, 0) // rbga0 rbga1 rbga2 rbga3
+ simdscalari dst1 = _simd_permute2f128_si(tmp2, tmp3, 0x20); // (2, 0) // rbga4 rbga5 rbga6 rbga7
+ simdscalari dst2 = _simd_permute2f128_si(tmp0, tmp1, 0x31); // (3, 1) // rbga8 rbga9 rbgaA rbgaB
+ simdscalari dst3 = _simd_permute2f128_si(tmp2, tmp3, 0x31); // (3, 1) // rbgaC rbgaD rbgaE rbgaF
+
+ _simd_store_si(reinterpret_cast<simdscalari*>(pDst) + 0, dst0); // rgbargbargbargba
+ _simd_store_si(reinterpret_cast<simdscalari*>(pDst) + 1, dst1); // rgbargbargbargba
+ _simd_store_si(reinterpret_cast<simdscalari*>(pDst) + 2, dst2); // rgbargbargbargba
+ _simd_store_si(reinterpret_cast<simdscalari*>(pDst) + 3, dst3); // rgbargbargbargba
+
+ // clang-format on
+#else
+#error Unsupported vector width
#endif
+ }
};
//////////////////////////////////////////////////////////////////////////
#error Unsupported vector width
#endif
}
-#if ENABLE_AVX512_SIMD16
- INLINE static void Transpose_16(const uint8_t* pSrc, uint8_t* pDst)
+ INLINE static void Transpose_simd16(const uint8_t* pSrc, uint8_t* pDst)
{
- simdscalari src0 =
- _simd_load_si(reinterpret_cast<const simdscalari*>(pSrc)); // rrrrrrrrrrrrrrrr
- simdscalari src1 =
- _simd_load_si(reinterpret_cast<const simdscalari*>(pSrc) + 1); // gggggggggggggggg
+#if KNOB_SIMD16_WIDTH == 16
+ // clang-format off
- simdscalari tmp0 = _simd_unpacklo_epi16(src0, src1); // rg0 rg1 rg2 rg3 rg8 rg9 rgA rgB
- simdscalari tmp1 = _simd_unpackhi_epi16(src0, src1); // rg4 rg5 rg6 rg7 rgC rgD rgE rgF
+ simdscalari src0 = _simd_load_si(reinterpret_cast<const simdscalari*>(pSrc)); // rrrrrrrrrrrrrrrr
+ simdscalari src1 = _simd_load_si(reinterpret_cast<const simdscalari*>(pSrc) + 1); // gggggggggggggggg
- simdscalari dst0 = _simd_permute2f128_si(
- tmp0, tmp1, 0x20); // (2, 0) // rg0 rg1 rg2 rg3 rg4 rg5 rg6 rg7
- simdscalari dst1 = _simd_permute2f128_si(
- tmp0, tmp1, 0x31); // (3, 1) // rg8 rg9 rgA rgB rgC rgD rgE rgF
+ simdscalari tmp0 = _simd_unpacklo_epi16(src0, src1); // rg0 rg1 rg2 rg3 rg8 rg9 rgA rgB
+ simdscalari tmp1 = _simd_unpackhi_epi16(src0, src1); // rg4 rg5 rg6 rg7 rgC rgD rgE rgF
- _simd_store_si(reinterpret_cast<simdscalari*>(pDst) + 0, dst0); // rgrgrgrgrgrgrgrg
- _simd_store_si(reinterpret_cast<simdscalari*>(pDst) + 1, dst1); // rgrgrgrgrgrgrgrg
- }
+ simdscalari dst0 = _simd_permute2f128_si(tmp0, tmp1, 0x20); // (2, 0) // rg0 rg1 rg2 rg3 rg4 rg5 rg6 rg7
+ simdscalari dst1 = _simd_permute2f128_si(tmp0, tmp1, 0x31); // (3, 1) // rg8 rg9 rgA rgB rgC rgD rgE rgF
+
+ _simd_store_si(reinterpret_cast<simdscalari*>(pDst) + 0, dst0); // rgrgrgrgrgrgrgrg
+ _simd_store_si(reinterpret_cast<simdscalari*>(pDst) + 1, dst1); // rgrgrgrgrgrgrgrg
+
+ // clang-format on
+#else
+#error Unsupported vector width
#endif
+ }
};
//////////////////////////////////////////////////////////////////////////
/// @param pSrc - source data in SOA form
/// @param pDst - output data in AOS form
static void Transpose(const uint8_t* pSrc, uint8_t* pDst) = delete;
-#if ENABLE_AVX512_SIMD16
-
- static void Transpose_16(const uint8_t* pSrc, uint8_t* pDst) = delete;
-#endif
+ static void Transpose_simd16(const uint8_t* pSrc, uint8_t* pDst) = delete;
};
//////////////////////////////////////////////////////////////////////////
/// @param pSrc - source data in SOA form
/// @param pDst - output data in AOS form
static void Transpose(const uint8_t* pSrc, uint8_t* pDst) = delete;
-#if ENABLE_AVX512_SIMD16
-
- static void Transpose_16(const uint8_t* pSrc, uint8_t* pDst) = delete;
-#endif
+ static void Transpose_simd16(const uint8_t* pSrc, uint8_t* pDst) = delete;
};
//////////////////////////////////////////////////////////////////////////
/// @param pSrc - source data in SOA form
/// @param pDst - output data in AOS form
static void Transpose(const uint8_t* pSrc, uint8_t* pDst) = delete;
-#if ENABLE_AVX512_SIMD16
-
- static void Transpose_16(const uint8_t* pSrc, uint8_t* pDst) = delete;
-#endif
+ static void Transpose_simd16(const uint8_t* pSrc, uint8_t* pDst) = delete;
};
//////////////////////////////////////////////////////////////////////////
/// @param pSrc - source data in SOA form
/// @param pDst - output data in AOS form
static void Transpose(const uint8_t* pSrc, uint8_t* pDst) = delete;
-#if ENABLE_AVX512_SIMD16
-
- static void Transpose_16(const uint8_t* pSrc, uint8_t* pDst) = delete;
-#endif
+ static void Transpose_simd16(const uint8_t* pSrc, uint8_t* pDst) = delete;
};
//////////////////////////////////////////////////////////////////////////
/// @param pSrc - source data in SOA form
/// @param pDst - output data in AOS form
static void Transpose(const uint8_t* pSrc, uint8_t* pDst) = delete;
-#if ENABLE_AVX512_SIMD16
-
- static void Transpose_16(const uint8_t* pSrc, uint8_t* pDst) = delete;
-#endif
+ static void Transpose_simd16(const uint8_t* pSrc, uint8_t* pDst) = delete;
};
//////////////////////////////////////////////////////////////////////////
/// @param pSrc - source data in SOA form
/// @param pDst - output data in AOS form
static void Transpose(const uint8_t* pSrc, uint8_t* pDst) = delete;
-#if ENABLE_AVX512_SIMD16
-
- static void Transpose_16(const uint8_t* pSrc, uint8_t* pDst) = delete;
-#endif
+ static void Transpose_simd16(const uint8_t* pSrc, uint8_t* pDst) = delete;
};
//////////////////////////////////////////////////////////////////////////
/// @param pSrc - source data in SOA form
/// @param pDst - output data in AOS form
static void Transpose(const uint8_t* pSrc, uint8_t* pDst) = delete;
+ static void Transpose_simd16(const uint8_t* pSrc, uint8_t* pDst) = delete;
};
//////////////////////////////////////////////////////////////////////////
/// @param pSrc - source data in SOA form
/// @param pDst - output data in AOS form
static void Transpose(const uint8_t* pSrc, uint8_t* pDst) = delete;
-#if ENABLE_AVX512_SIMD16
-
- static void Transpose_16(const uint8_t* pSrc, uint8_t* pDst) = delete;
-#endif
+ static void Transpose_simd16(const uint8_t* pSrc, uint8_t* pDst) = delete;
};
//////////////////////////////////////////////////////////////////////////
/// @param pSrc - source data in SOA form
/// @param pDst - output data in AOS form
static void Transpose(const uint8_t* pSrc, uint8_t* pDst) = delete;
-#if ENABLE_AVX512_SIMD16
-
- static void Transpose_16(const uint8_t* pSrc, uint8_t* pDst) = delete;
-#endif
+ static void Transpose_simd16(const uint8_t* pSrc, uint8_t* pDst) = delete;
};
//////////////////////////////////////////////////////////////////////////
/// @param pSrc - source data in SOA form
/// @param pDst - output data in AOS form
static void Transpose(const uint8_t* pSrc, uint8_t* pDst) = delete;
-#if ENABLE_AVX512_SIMD16
-
- static void Transpose_16(const uint8_t* pSrc, uint8_t* pDst) = delete;
-#endif
+ static void Transpose_simd16(const uint8_t* pSrc, uint8_t* pDst) = delete;
};
//////////////////////////////////////////////////////////////////////////
/// @param pSrc - source data in SOA form
/// @param pDst - output data in AOS form
static void Transpose(const uint8_t* pSrc, uint8_t* pDst) = delete;
-#if ENABLE_AVX512_SIMD16
-
- static void Transpose_16(const uint8_t* pSrc, uint8_t* pDst) = delete;
-#endif
+ static void Transpose_simd16(const uint8_t* pSrc, uint8_t* pDst) = delete;
};
//////////////////////////////////////////////////////////////////////////
/// @param pSrc - source data in SOA form
/// @param pDst - output data in AOS form
static void Transpose(const uint8_t* pSrc, uint8_t* pDst) = delete;
-#if ENABLE_AVX512_SIMD16
-
- static void Transpose_16(const uint8_t* pSrc, uint8_t* pDst) = delete;
-#endif
+ static void Transpose_simd16(const uint8_t* pSrc, uint8_t* pDst) = delete;
};
//////////////////////////////////////////////////////////////////////////
/// @param pSrc - source data in SOA form
/// @param pDst - output data in AOS form
static void Transpose(const uint8_t* pSrc, uint8_t* pDst) = delete;
-#if ENABLE_AVX512_SIMD16
-
- static void Transpose_16(const uint8_t* pSrc, uint8_t* pDst) = delete;
-#endif
+ static void Transpose_simd16(const uint8_t* pSrc, uint8_t* pDst) = delete;
};
#if (KNOB_ARCH == KNOB_ARCH_AVX)
#define KNOB_ARCH_ISA AVX
#define KNOB_ARCH_STR "AVX"
-#define KNOB_SIMD_WIDTH 8
-#define KNOB_SIMD_BYTES 32
#elif (KNOB_ARCH == KNOB_ARCH_AVX2)
#define KNOB_ARCH_ISA AVX2
#define KNOB_ARCH_STR "AVX2"
-#define KNOB_SIMD_WIDTH 8
-#define KNOB_SIMD_BYTES 32
#elif (KNOB_ARCH == KNOB_ARCH_AVX512)
#define KNOB_ARCH_ISA AVX512F
#define KNOB_ARCH_STR "AVX512"
-#define KNOB_SIMD_WIDTH 8
-#define KNOB_SIMD_BYTES 32
#else
#error "Unknown architecture"
#endif
-#if ENABLE_AVX512_SIMD16
+#define KNOB_SIMD_WIDTH 8
+#define KNOB_SIMD_BYTES 32
#define KNOB_SIMD16_WIDTH 16
#define KNOB_SIMD16_BYTES 64
-#if (KNOB_ARCH == KNOB_ARCH_AVX512)
-#define ENABLE_AVX512_EMULATION 0
-#else
-#define ENABLE_AVX512_EMULATION 1
-#endif
-
-#endif
-
#define MAX_KNOB_ARCH_STR_LEN sizeof("AVX512_PLUS_PADDING")
///////////////////////////////////////////////////////////////////////////////
simdvector attrib[SWR_VTX_NUM_SLOTS];
};
-#if ENABLE_AVX512_SIMD16
struct simd16vertex
{
simd16vector attrib[SWR_VTX_NUM_SLOTS];
};
-#endif
-
template <typename SIMD_T>
struct SIMDVERTEX_T
{
// enums
enum SWR_TILE_MODE
{
- SWR_TILE_NONE = 0x0, // Linear mode (no tiling)
- SWR_TILE_MODE_WMAJOR, // W major tiling
- SWR_TILE_MODE_XMAJOR, // X major tiling
- SWR_TILE_MODE_YMAJOR, // Y major tiling
- SWR_TILE_SWRZ, // SWR-Z tiling
+ SWR_TILE_NONE = 0x0, // Linear mode (no tiling)
+ SWR_TILE_MODE_WMAJOR, // W major tiling
+ SWR_TILE_MODE_XMAJOR, // X major tiling
+ SWR_TILE_MODE_YMAJOR, // Y major tiling
+ SWR_TILE_SWRZ, // SWR-Z tiling
+
SWR_TILE_MODE_COUNT
};
simdscalari xmax;
};
-#if ENABLE_AVX512_SIMD16
struct simd16BBox
{
simd16scalari ymin;
simd16scalari xmin;
simd16scalari xmax;
};
-#endif
template <typename SIMD_T>
struct SIMDBBOX_T
StoreSOA<DstFormat>(src, soaTile);
// Convert from SOA --> AOS
- FormatTraits<DstFormat>::TransposeT::Transpose_16(soaTile, aosTile);
+ FormatTraits<DstFormat>::TransposeT::Transpose_simd16(soaTile, aosTile);
// Store data into destination
StorePixels<FormatTraits<DstFormat>::bpp, NumDests>::Store(aosTile, ppDsts);
OSALIGNSIMD16(uint8_t) aosTile[MAX_RASTER_TILE_BYTES];
// Convert from SOA --> AOS
- FormatTraits<Format>::TransposeT::Transpose_16(pSrc, aosTile);
+ FormatTraits<Format>::TransposeT::Transpose_simd16(pSrc, aosTile);
// Store data into destination
StorePixels<FormatTraits<Format>::bpp, NumDests>::Store(aosTile, ppDsts);
}
};
-#if ENABLE_AVX512_SIMD16
//////////////////////////////////////////////////////////////////////////
/// SimdTile 8x2 for AVX-512
//////////////////////////////////////////////////////////////////////////
}
};
-#endif
//////////////////////////////////////////////////////////////////////////
/// @brief Computes lod offset for 1D surface at specified lod.
/// @param baseWidth - width of basemip (mip 0).