CONVERT_SFIXED,
};
-#if USE_SIMD16_SHADERS
-#define USE_SIMD16_GATHERS 0
-#endif
-
//////////////////////////////////////////////////////////////////////////
/// Interface to Jitting a fetch shader
//////////////////////////////////////////////////////////////////////////
uint32_t&, uint32_t&, const ComponentEnable, const ComponentControl(&)[4], Value*(&)[4],
const uint32_t(&)[4]> Shuffle8bpcArgs;
-#if USE_SIMD16_SHADERS
-#if USE_SIMD16_GATHERS
void Shuffle8bpcGatherd16(Shuffle8bpcArgs &args);
-#else
- void Shuffle8bpcGatherd(Shuffle8bpcArgs &args, bool useVertexID2);
-#endif
-#else
void Shuffle8bpcGatherd(Shuffle8bpcArgs &args);
-#endif
typedef std::tuple<Value*(&)[2], Value*, const Instruction::CastOps, const ConversionType,
uint32_t&, uint32_t&, const ComponentEnable, const ComponentControl(&)[4], Value*(&)[4]> Shuffle16bpcArgs;
-#if USE_SIMD16_SHADERS
-#if USE_SIMD16_GATHERS
void Shuffle16bpcGather16(Shuffle16bpcArgs &args);
-#else
- void Shuffle16bpcGather(Shuffle16bpcArgs &args, bool useVertexID2);
-#endif
-#else
void Shuffle16bpcGather(Shuffle16bpcArgs &args);
-#endif
-#if USE_SIMD16_GATHERS
- void StoreVertexElements16(Value* pVtxOut, const uint32_t outputElt, const uint32_t numEltsToStore, Value* (&vVertexElements)[4]);
-#else
void StoreVertexElements(Value* pVtxOut, const uint32_t outputElt, const uint32_t numEltsToStore, Value* (&vVertexElements)[4]);
-#endif
-#if USE_SIMD16_SHADERS
-#if USE_SIMD16_GATHERS
- Value *GenerateCompCtrlVector16(const ComponentControl ctrl);
-#else
- Value *GenerateCompCtrlVector(const ComponentControl ctrl, bool useVertexID2);
-#endif
-#else
Value *GenerateCompCtrlVector(const ComponentControl ctrl);
-#endif
-
- void JitLoadVertices(const FETCH_COMPILE_STATE &fetchState, Value* streams, Value* vIndices, Value* pVtxOut);
-#if USE_SIMD16_SHADERS
-#if USE_SIMD16_GATHERS
- void JitGatherVertices(const FETCH_COMPILE_STATE &fetchState, Value *streams, Value *vIndices, Value *vIndices2, Value *pVtxOut, bool useVertexID2);
-#else
- void JitGatherVertices(const FETCH_COMPILE_STATE &fetchState, Value* streams, Value* vIndices, Value* pVtxOut, bool useVertexID2);
-#endif
-#else
void JitGatherVertices(const FETCH_COMPILE_STATE &fetchState, Value* streams, Value* vIndices, Value* pVtxOut);
-#endif
bool IsOddFormat(SWR_FORMAT format);
bool IsUniformFormat(SWR_FORMAT format);
Value* pVtxOut = &*argitr;
pVtxOut->setName("vtxOutput");
+ uint32_t baseWidth = mVWidth;
+
+ SWR_ASSERT(mVWidth == 8 || mVWidth == 16, "Unsupported vector width %d", mVWidth);
+
+ // Override builder target width to force 16-wide SIMD
#if USE_SIMD16_SHADERS
-#if 0// USE_SIMD16_BUILDER
- pVtxOut = BITCAST(pVtxOut, PointerType::get(VectorType::get(mFP32Ty, mVWidth16), 0));
-#else
- pVtxOut = BITCAST(pVtxOut, PointerType::get(VectorType::get(mFP32Ty, mVWidth), 0));
-#endif
-#else
- pVtxOut = BITCAST(pVtxOut, PointerType::get(VectorType::get(mFP32Ty, mVWidth), 0));
+ SetTargetWidth(16);
#endif
+ pVtxOut = BITCAST(pVtxOut, PointerType::get(mSimdFP32Ty, 0));
+
// SWR_FETCH_CONTEXT::pStreams
Value* streams = LOAD(mpFetchInfo,{0, SWR_FETCH_CONTEXT_pStreams});
streams->setName("pStreams");
// SWR_FETCH_CONTEXT::pLastIndex
Value* pLastIndex = LOAD(mpFetchInfo,{0, SWR_FETCH_CONTEXT_pLastIndex});
pLastIndex->setName("pLastIndex");
-
Value* vIndices;
-#if USE_SIMD16_SHADERS
- Value* indices2;
- Value* vIndices2;
-#endif
switch(fetchState.indexType)
{
case R8_UINT:
indices = BITCAST(indices, Type::getInt8PtrTy(JM()->mContext, 0));
-#if USE_SIMD16_SHADERS
- indices2 = GEP(indices, C(8));
-#endif
if(fetchState.bDisableIndexOOBCheck)
{
vIndices = LOAD(BITCAST(indices, PointerType::get(VectorType::get(mInt8Ty, mpJitMgr->mVWidth), 0)), {(uint32_t)0});
vIndices = Z_EXT(vIndices, mSimdInt32Ty);
-#if USE_SIMD16_SHADERS
- vIndices2 = LOAD(BITCAST(indices2, PointerType::get(VectorType::get(mInt8Ty, mpJitMgr->mVWidth), 0)), { (uint32_t)0 });
- vIndices2 = Z_EXT(vIndices2, mSimdInt32Ty);
-#endif
}
else
{
pLastIndex = BITCAST(pLastIndex, Type::getInt8PtrTy(JM()->mContext, 0));
vIndices = GetSimdValid8bitIndices(indices, pLastIndex);
-#if USE_SIMD16_SHADERS
- pLastIndex = BITCAST(pLastIndex, Type::getInt8PtrTy(JM()->mContext, 0));
- vIndices2 = GetSimdValid8bitIndices(indices2, pLastIndex);
-#endif
}
break;
case R16_UINT:
indices = BITCAST(indices, Type::getInt16PtrTy(JM()->mContext, 0));
-#if USE_SIMD16_SHADERS
- indices2 = GEP(indices, C(8));
-#endif
if(fetchState.bDisableIndexOOBCheck)
{
vIndices = LOAD(BITCAST(indices, PointerType::get(VectorType::get(mInt16Ty, mpJitMgr->mVWidth), 0)), {(uint32_t)0});
vIndices = Z_EXT(vIndices, mSimdInt32Ty);
-#if USE_SIMD16_SHADERS
- vIndices2 = LOAD(BITCAST(indices2, PointerType::get(VectorType::get(mInt16Ty, mpJitMgr->mVWidth), 0)), { (uint32_t)0 });
- vIndices2 = Z_EXT(vIndices2, mSimdInt32Ty);
-#endif
}
else
{
pLastIndex = BITCAST(pLastIndex, Type::getInt16PtrTy(JM()->mContext, 0));
vIndices = GetSimdValid16bitIndices(indices, pLastIndex);
-#if USE_SIMD16_SHADERS
- pLastIndex = BITCAST(pLastIndex, Type::getInt16PtrTy(JM()->mContext, 0));
- vIndices2 = GetSimdValid16bitIndices(indices2, pLastIndex);
-#endif
}
break;
case R32_UINT:
-#if USE_SIMD16_SHADERS
- indices2 = GEP(indices, C(8));
-#endif
(fetchState.bDisableIndexOOBCheck) ? vIndices = LOAD(BITCAST(indices, PointerType::get(mSimdInt32Ty,0)),{(uint32_t)0})
: vIndices = GetSimdValid32bitIndices(indices, pLastIndex);
-#if USE_SIMD16_SHADERS
- (fetchState.bDisableIndexOOBCheck) ? vIndices2 = LOAD(BITCAST(indices2, PointerType::get(mSimdInt32Ty, 0)), { (uint32_t)0 })
- : vIndices2 = GetSimdValid32bitIndices(indices2, pLastIndex);
-#endif
break; // incoming type is already 32bit int
default:
SWR_INVALID("Unsupported index type");
vIndices = nullptr;
-#if USE_SIMD16_SHADERS
- vIndices2 = nullptr;
-#endif
break;
}
if(fetchState.bForceSequentialAccessEnable)
{
- Value* pOffsets = C({ 0, 1, 2, 3, 4, 5, 6, 7 });
+ Value* pOffsets = mVWidth == 8 ? C({ 0, 1, 2, 3, 4, 5, 6, 7 }) :
+ C({ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 });
// VertexData buffers are accessed sequentially, the index is equal to the vertex number
vIndices = VBROADCAST(LOAD(mpFetchInfo, { 0, SWR_FETCH_CONTEXT_StartVertex }));
vIndices = ADD(vIndices, pOffsets);
-#if USE_SIMD16_SHADERS
- vIndices2 = ADD(vIndices, VIMMED1(8));
-#endif
}
Value* vVertexId = vIndices;
-#if USE_SIMD16_SHADERS
- Value* vVertexId2 = vIndices2;
-#endif
if (fetchState.bVertexIDOffsetEnable)
{
// Assuming one of baseVertex or startVertex is 0, so adding both should be functionally correct
Value* vStartVertex = VBROADCAST(LOAD(mpFetchInfo, { 0, SWR_FETCH_CONTEXT_StartVertex }));
vVertexId = ADD(vIndices, vBaseVertex);
vVertexId = ADD(vVertexId, vStartVertex);
-#if USE_SIMD16_SHADERS
- vVertexId2 = ADD(vIndices2, vBaseVertex);
- vVertexId2 = ADD(vVertexId2, vStartVertex);
-#endif
}
// store out vertex IDs
- STORE(vVertexId, GEP(mpFetchInfo, { 0, SWR_FETCH_CONTEXT_VertexID }));
-#if USE_SIMD16_SHADERS
- STORE(vVertexId2, GEP(mpFetchInfo, { 0, SWR_FETCH_CONTEXT_VertexID2 }));
-#endif
+ if (mVWidth == 16)
+ {
+ // store out in simd8 halves until core supports 16-wide natively
+ auto vVertexIdLo = EXTRACT_16(vVertexId, 0);
+ auto vVertexIdHi = EXTRACT_16(vVertexId, 1);
+ STORE(vVertexIdLo, GEP(mpFetchInfo, { 0, SWR_FETCH_CONTEXT_VertexID }));
+ STORE(vVertexIdHi, GEP(mpFetchInfo, { 0, SWR_FETCH_CONTEXT_VertexID2 }));
+ }
+ else if (mVWidth == 8)
+ {
+ STORE(vVertexId, GEP(mpFetchInfo, { 0, SWR_FETCH_CONTEXT_VertexID }));
+ }
// store out cut mask if enabled
if (fetchState.bEnableCutIndex)
{
Value* vCutIndex = VIMMED1(fetchState.cutIndex);
Value* cutMask = VMASK(ICMP_EQ(vIndices, vCutIndex));
- STORE(cutMask, GEP(mpFetchInfo, { 0, SWR_FETCH_CONTEXT_CutMask }));
-#if USE_SIMD16_SHADERS
- Value* cutMask2 = VMASK(ICMP_EQ(vIndices2, vCutIndex));
- STORE(cutMask2, GEP(mpFetchInfo, { 0, SWR_FETCH_CONTEXT_CutMask2 }));
-#endif
+
+ if (mVWidth == 16)
+ {
+ auto cutMaskLo = EXTRACT_16(cutMask, 0);
+ auto cutMaskHi = EXTRACT_16(cutMask, 1);
+ STORE(cutMaskLo, GEP(mpFetchInfo, { 0, SWR_FETCH_CONTEXT_CutMask }));
+ STORE(cutMaskHi, GEP(mpFetchInfo, { 0, SWR_FETCH_CONTEXT_CutMask2 }));
+ }
+ else if (mVWidth == 8)
+ {
+ STORE(cutMask, GEP(mpFetchInfo, { 0, SWR_FETCH_CONTEXT_CutMask }));
+ }
}
// Fetch attributes from memory and output to a simdvertex struct
// since VGATHER has a perf penalty on HSW vs BDW, allow client to choose which fetch method to use
-#if USE_SIMD16_SHADERS
- if (fetchState.bDisableVGATHER)
- {
- JitLoadVertices(fetchState, streams, vIndices, pVtxOut);
- JitLoadVertices(fetchState, streams, vIndices2, GEP(pVtxOut, C(1)));
- }
- else
- {
-#if USE_SIMD16_GATHERS
- JitGatherVertices(fetchState, streams, vIndices, vIndices2, pVtxOut, false);
-#else
- JitGatherVertices(fetchState, streams, vIndices, pVtxOut, false);
- JitGatherVertices(fetchState, streams, vIndices2, GEP(pVtxOut, C(1)), true);
-#endif
- }
-#else
- (fetchState.bDisableVGATHER) ? JitLoadVertices(fetchState, streams, vIndices, pVtxOut)
- : JitGatherVertices(fetchState, streams, vIndices, pVtxOut);
-#endif
+ JitGatherVertices(fetchState, streams, vIndices, pVtxOut);
RET_VOID();
JitManager::DumpToFile(fetch, "opt");
- return fetch;
-}
-
-//////////////////////////////////////////////////////////////////////////
-/// @brief Loads attributes from memory using LOADs, shuffling the
-/// components into SOA form.
-/// *Note* currently does not support component control,
-/// component packing, instancing
-/// @param fetchState - info about attributes to be fetched from memory
-/// @param streams - value pointer to the current vertex stream
-/// @param vIndices - vector value of indices to load
-/// @param pVtxOut - value pointer to output simdvertex struct
-void FetchJit::JitLoadVertices(const FETCH_COMPILE_STATE &fetchState, Value* streams, Value* vIndices, Value* pVtxOut)
-{
- // Zack shuffles; a variant of the Charleston.
-
- std::vector<Value*> vectors(16);
- std::vector<Constant*> pMask(mVWidth);
- for(uint32_t i = 0; i < mVWidth; ++i)
- {
- pMask[i] = (C(i < 4 ? i : 4));
- }
- Constant* promoteMask = ConstantVector::get(pMask);
- Constant* uwvec = UndefValue::get(VectorType::get(mFP32Ty, 4));
-
- Value* startVertex = LOAD(mpFetchInfo, {0, SWR_FETCH_CONTEXT_StartVertex});
- Value* startInstance = LOAD(mpFetchInfo, {0, SWR_FETCH_CONTEXT_StartInstance});
- Value* curInstance = LOAD(mpFetchInfo, {0, SWR_FETCH_CONTEXT_CurInstance});
- Value* vBaseVertex = VBROADCAST(LOAD(mpFetchInfo, {0, SWR_FETCH_CONTEXT_BaseVertex}));
- curInstance->setName("curInstance");
-
- for(uint32_t nelt = 0; nelt < fetchState.numAttribs; ++nelt)
- {
- Value* elements[4] = {0};
- const INPUT_ELEMENT_DESC& ied = fetchState.layout[nelt];
- const SWR_FORMAT_INFO &info = GetFormatInfo((SWR_FORMAT)ied.Format);
- SWR_ASSERT((info.bpp != 0), "Unsupported format in JitLoadVertices.");
- uint32_t numComponents = info.numComps;
- uint32_t bpc = info.bpp / info.numComps; ///@todo Code below assumes all components are same size. Need to fix.
-
- // load path doesn't support component packing
- SWR_ASSERT(ied.ComponentPacking == ComponentEnable::XYZW, "Fetch load path doesn't support component packing.");
-
- vectors.clear();
-
- if (fetchState.bInstanceIDOffsetEnable)
- {
- SWR_ASSERT((0), "TODO: Fill out more once driver sends this down");
- }
-
- Value *vCurIndices;
- Value *startOffset;
- if(ied.InstanceEnable)
- {
- Value* stepRate = C(ied.InstanceAdvancementState);
-
- // prevent a div by 0 for 0 step rate
- Value* isNonZeroStep = ICMP_UGT(stepRate, C(0));
- stepRate = SELECT(isNonZeroStep, stepRate, C(1));
-
- // calc the current offset into instanced data buffer
- Value* calcInstance = UDIV(curInstance, stepRate);
-
- // if step rate is 0, every instance gets instance 0
- calcInstance = SELECT(isNonZeroStep, calcInstance, C(0));
-
- vCurIndices = VBROADCAST(calcInstance);
-
- startOffset = startInstance;
- }
- else if (ied.InstanceStrideEnable)
- {
- // silence unused variable warnings
- startOffset = C(0);
- vCurIndices = vIndices;
-
- SWR_ASSERT((0), "TODO: Fill out more once driver sends this down.");
- }
- else
- {
- // offset indices by baseVertex
- vCurIndices = ADD(vIndices, vBaseVertex);
-
- startOffset = startVertex;
- }
-
- // load SWR_VERTEX_BUFFER_STATE::pData
- Value *stream = LOAD(streams, {ied.StreamIndex, SWR_VERTEX_BUFFER_STATE_xpData});
-
- // load SWR_VERTEX_BUFFER_STATE::pitch
- Value *stride = LOAD(streams, {ied.StreamIndex, SWR_VERTEX_BUFFER_STATE_pitch});
- stride = Z_EXT(stride, mInt64Ty);
-
- // load SWR_VERTEX_BUFFER_STATE::size
- Value *size = LOAD(streams, {ied.StreamIndex, SWR_VERTEX_BUFFER_STATE_size});
- size = Z_EXT(size, mInt64Ty);
-
- Value* startVertexOffset = MUL(Z_EXT(startOffset, mInt64Ty), stride);
-
- Value *minVertex = NULL;
- Value *minVertexOffset = NULL;
- if (fetchState.bPartialVertexBuffer) {
- // fetch min index for low bounds checking
- minVertex = GEP(streams, {C(ied.StreamIndex), C(SWR_VERTEX_BUFFER_STATE_minVertex)});
- minVertex = LOAD(minVertex);
- if (!fetchState.bDisableIndexOOBCheck) {
- minVertexOffset = MUL(Z_EXT(minVertex, mInt64Ty), stride);
- }
- }
-
- // Load from the stream.
- for(uint32_t lane = 0; lane < mVWidth; ++lane)
- {
- // Get index
- Value* index = VEXTRACT(vCurIndices, C(lane));
-
- if (fetchState.bPartialVertexBuffer) {
- // clamp below minvertex
- Value *isBelowMin = ICMP_SLT(index, minVertex);
- index = SELECT(isBelowMin, minVertex, index);
- }
-
- index = Z_EXT(index, mInt64Ty);
-
- Value* offset = MUL(index, stride);
- offset = ADD(offset, C((int64_t)ied.AlignedByteOffset));
- offset = ADD(offset, startVertexOffset);
-
- if (!fetchState.bDisableIndexOOBCheck) {
- // check for out of bound access, including partial OOB, and replace them with minVertex
- Value *endOffset = ADD(offset, C((int64_t)info.Bpp));
- Value *oob = ICMP_ULE(endOffset, size);
- if (fetchState.bPartialVertexBuffer) {
- offset = SELECT(oob, offset, minVertexOffset);
- } else {
- offset = SELECT(oob, offset, ConstantInt::get(mInt64Ty, 0));
- }
- }
-
- Value* pointer = GEP(stream, offset);
- // We use a full-lane, but don't actually care.
- Value* vptr = 0;
-
- // get a pointer to a 4 component attrib in default address space
- switch(bpc)
- {
- case 8: vptr = BITCAST(pointer, PointerType::get(VectorType::get(mInt8Ty, 4), 0)); break;
- case 16: vptr = BITCAST(pointer, PointerType::get(VectorType::get(mInt16Ty, 4), 0)); break;
- case 32: vptr = BITCAST(pointer, PointerType::get(VectorType::get(mFP32Ty, 4), 0)); break;
- default: SWR_INVALID("Unsupported underlying bpp!");
- }
-
- // load 4 components of attribute
- Value* vec = ALIGNED_LOAD(vptr, 1, false);
-
- // Convert To FP32 internally
- switch(info.type[0])
- {
- case SWR_TYPE_UNORM:
- switch(bpc)
- {
- case 8:
- vec = UI_TO_FP(vec, VectorType::get(mFP32Ty, 4));
- vec = FMUL(vec, ConstantVector::get(std::vector<Constant*>(4, ConstantFP::get(mFP32Ty, 1.0 / 255.0))));
- break;
- case 16:
- vec = UI_TO_FP(vec, VectorType::get(mFP32Ty, 4));
- vec = FMUL(vec, ConstantVector::get(std::vector<Constant*>(4, ConstantFP::get(mFP32Ty, 1.0 / 65535.0))));
- break;
- default:
- SWR_INVALID("Unsupported underlying type!");
- break;
- }
- break;
- case SWR_TYPE_SNORM:
- switch(bpc)
- {
- case 8:
- vec = SI_TO_FP(vec, VectorType::get(mFP32Ty, 4));
- vec = FMUL(vec, ConstantVector::get(std::vector<Constant*>(4, ConstantFP::get(mFP32Ty, 1.0 / 128.0))));
- break;
- case 16:
- vec = SI_TO_FP(vec, VectorType::get(mFP32Ty, 4));
- vec = FMUL(vec, ConstantVector::get(std::vector<Constant*>(4, ConstantFP::get(mFP32Ty, 1.0 / 32768.0))));
- break;
- default:
- SWR_INVALID("Unsupported underlying type!");
- break;
- }
- break;
- case SWR_TYPE_UINT:
- // Zero extend uint32_t types.
- switch(bpc)
- {
- case 8:
- case 16:
- vec = Z_EXT(vec, VectorType::get(mInt32Ty, 4));
- vec = BITCAST(vec, VectorType::get(mFP32Ty, 4));
- break;
- case 32:
- break; // Pass through unchanged.
- default:
- SWR_INVALID("Unsupported underlying type!");
- break;
- }
- break;
- case SWR_TYPE_SINT:
- // Sign extend SINT types.
- switch(bpc)
- {
- case 8:
- case 16:
- vec = S_EXT(vec, VectorType::get(mInt32Ty, 4));
- vec = BITCAST(vec, VectorType::get(mFP32Ty, 4));
- break;
- case 32:
- break; // Pass through unchanged.
- default:
- SWR_INVALID("Unsupported underlying type!");
- break;
- }
- break;
- case SWR_TYPE_FLOAT:
- switch(bpc)
- {
- case 32:
- break; // Pass through unchanged.
- default:
- SWR_INVALID("Unsupported underlying type!");
- }
- break;
- case SWR_TYPE_USCALED:
- vec = UI_TO_FP(vec, VectorType::get(mFP32Ty, 4));
- break;
- case SWR_TYPE_SSCALED:
- vec = SI_TO_FP(vec, VectorType::get(mFP32Ty, 4));
- break;
- case SWR_TYPE_SFIXED:
- vec = FMUL(SI_TO_FP(vec, VectorType::get(mFP32Ty, 4)), VBROADCAST(C(1/65536.0f)));
- break;
- case SWR_TYPE_UNKNOWN:
- case SWR_TYPE_UNUSED:
- SWR_INVALID("Unsupported type %d!", info.type[0]);
- }
-
- // promote mask: sse(0,1,2,3) | avx(0,1,2,3,4,4,4,4)
- // uwvec: 4 x F32, undef value
- Value* wvec = VSHUFFLE(vec, uwvec, promoteMask);
- vectors.push_back(wvec);
- }
-
- std::vector<Constant*> v01Mask(mVWidth);
- std::vector<Constant*> v23Mask(mVWidth);
- std::vector<Constant*> v02Mask(mVWidth);
- std::vector<Constant*> v13Mask(mVWidth);
-
- // Concatenate the vectors together.
- elements[0] = VUNDEF_F();
- elements[1] = VUNDEF_F();
- elements[2] = VUNDEF_F();
- elements[3] = VUNDEF_F();
- for(uint32_t b = 0, num4Wide = mVWidth / 4; b < num4Wide; ++b)
- {
- v01Mask[4 * b + 0] = C(0 + 4 * b);
- v01Mask[4 * b + 1] = C(1 + 4 * b);
- v01Mask[4 * b + 2] = C(0 + 4 * b + mVWidth);
- v01Mask[4 * b + 3] = C(1 + 4 * b + mVWidth);
-
- v23Mask[4 * b + 0] = C(2 + 4 * b);
- v23Mask[4 * b + 1] = C(3 + 4 * b);
- v23Mask[4 * b + 2] = C(2 + 4 * b + mVWidth);
- v23Mask[4 * b + 3] = C(3 + 4 * b + mVWidth);
-
- v02Mask[4 * b + 0] = C(0 + 4 * b);
- v02Mask[4 * b + 1] = C(2 + 4 * b);
- v02Mask[4 * b + 2] = C(0 + 4 * b + mVWidth);
- v02Mask[4 * b + 3] = C(2 + 4 * b + mVWidth);
-
- v13Mask[4 * b + 0] = C(1 + 4 * b);
- v13Mask[4 * b + 1] = C(3 + 4 * b);
- v13Mask[4 * b + 2] = C(1 + 4 * b + mVWidth);
- v13Mask[4 * b + 3] = C(3 + 4 * b + mVWidth);
-
- std::vector<Constant*> iMask(mVWidth);
- for(uint32_t i = 0; i < mVWidth; ++i)
- {
- if(((4 * b) <= i) && (i < (4 * (b + 1))))
- {
- iMask[i] = C(i % 4 + mVWidth);
- }
- else
- {
- iMask[i] = C(i);
- }
- }
- Constant* insertMask = ConstantVector::get(iMask);
- elements[0] = VSHUFFLE(elements[0], vectors[4 * b + 0], insertMask);
- elements[1] = VSHUFFLE(elements[1], vectors[4 * b + 1], insertMask);
- elements[2] = VSHUFFLE(elements[2], vectors[4 * b + 2], insertMask);
- elements[3] = VSHUFFLE(elements[3], vectors[4 * b + 3], insertMask);
- }
-
- Value* x0y0x1y1 = VSHUFFLE(elements[0], elements[1], ConstantVector::get(v01Mask));
- Value* x2y2x3y3 = VSHUFFLE(elements[2], elements[3], ConstantVector::get(v01Mask));
- Value* z0w0z1w1 = VSHUFFLE(elements[0], elements[1], ConstantVector::get(v23Mask));
- Value* z2w3z2w3 = VSHUFFLE(elements[2], elements[3], ConstantVector::get(v23Mask));
- elements[0] = VSHUFFLE(x0y0x1y1, x2y2x3y3, ConstantVector::get(v02Mask));
- elements[1] = VSHUFFLE(x0y0x1y1, x2y2x3y3, ConstantVector::get(v13Mask));
- elements[2] = VSHUFFLE(z0w0z1w1, z2w3z2w3, ConstantVector::get(v02Mask));
- elements[3] = VSHUFFLE(z0w0z1w1, z2w3z2w3, ConstantVector::get(v13Mask));
-
- switch(numComponents + 1)
- {
- case 1: elements[0] = VIMMED1(0.0f);
- case 2: elements[1] = VIMMED1(0.0f);
- case 3: elements[2] = VIMMED1(0.0f);
- case 4: elements[3] = VIMMED1(1.0f);
- }
-
- for(uint32_t c = 0; c < 4; ++c)
- {
+ // Revert 16-wide override
#if USE_SIMD16_SHADERS
- Value* dest = GEP(pVtxOut, C(nelt * 8 + c * 2), "destGEP");
-#else
- Value* dest = GEP(pVtxOut, C(nelt * 4 + c), "destGEP");
+ SetTargetWidth(baseWidth);
#endif
- STORE(elements[c], dest);
- }
- }
+
+ return fetch;
}
// returns true for odd formats that require special state.gather handling
/// @param streams - value pointer to the current vertex stream
/// @param vIndices - vector value of indices to gather
/// @param pVtxOut - value pointer to output simdvertex struct
-#if USE_SIMD16_SHADERS
-#if USE_SIMD16_GATHERS
-void FetchJit::JitGatherVertices(const FETCH_COMPILE_STATE &fetchState,
- Value *streams, Value *vIndices, Value *vIndices2, Value *pVtxOut, bool useVertexID2)
-#else
-void FetchJit::JitGatherVertices(const FETCH_COMPILE_STATE &fetchState,
- Value* streams, Value* vIndices, Value* pVtxOut, bool useVertexID2)
-#endif
-#else
void FetchJit::JitGatherVertices(const FETCH_COMPILE_STATE &fetchState,
Value* streams, Value* vIndices, Value* pVtxOut)
-#endif
{
uint32_t currentVertexElement = 0;
uint32_t outputElt = 0;
Value* vVertexElements[4];
-#if USE_SIMD16_GATHERS
- Value *pVtxSrc2[4];
-#endif
Value* startVertex = LOAD(mpFetchInfo, {0, SWR_FETCH_CONTEXT_StartVertex});
Value* startInstance = LOAD(mpFetchInfo, {0, SWR_FETCH_CONTEXT_StartInstance});
Value* curInstance = LOAD(mpFetchInfo, {0, SWR_FETCH_CONTEXT_CurInstance});
-#if USE_SIMD16_GATHERS
- Value* vBaseVertex16 = VBROADCAST_16(LOAD(mpFetchInfo, { 0, SWR_FETCH_CONTEXT_BaseVertex }));
-#else
Value* vBaseVertex = VBROADCAST(LOAD(mpFetchInfo, { 0, SWR_FETCH_CONTEXT_BaseVertex }));
-#endif
curInstance->setName("curInstance");
for (uint32_t nInputElt = 0; nInputElt < fetchState.numAttribs; nInputElt += 1)
Value *pStreamBase = INT_TO_PTR(stream, PointerType::get(mInt8Ty, 0));
Value *stride = LOAD(streams, {ied.StreamIndex, SWR_VERTEX_BUFFER_STATE_pitch});
-#if USE_SIMD16_GATHERS
- Value *vStride16 = VBROADCAST_16(stride);
-#else
Value *vStride = VBROADCAST(stride);
-#endif
// max vertex index that is fully in bounds
Value *maxVertex = GEP(streams, {C(ied.StreamIndex), C(SWR_VERTEX_BUFFER_STATE_maxVertex)});
curInstance = ADD(curInstance, startInstance);
}
-#if USE_SIMD16_GATHERS
- Value *vCurIndices16;
-#else
Value *vCurIndices;
-#endif
Value *startOffset;
-#if USE_SIMD16_GATHERS
- Value *vInstanceStride16 = VIMMED1_16(0);
-#else
Value *vInstanceStride = VIMMED1(0);
-#endif
if (ied.InstanceEnable)
{
// if step rate is 0, every instance gets instance 0
calcInstance = SELECT(isNonZeroStep, calcInstance, C(0));
-#if USE_SIMD16_GATHERS
- vCurIndices16 = VBROADCAST_16(calcInstance);
-#else
vCurIndices = VBROADCAST(calcInstance);
-#endif
-
startOffset = startInstance;
}
else if (ied.InstanceStrideEnable)
{
// grab the instance advancement state, determines stride in bytes from one instance to the next
Value* stepRate = C(ied.InstanceAdvancementState);
-#if USE_SIMD16_GATHERS
- vInstanceStride16 = VBROADCAST_16(MUL(curInstance, stepRate));
-#else
vInstanceStride = VBROADCAST(MUL(curInstance, stepRate));
-#endif
// offset indices by baseVertex
-#if USE_SIMD16_GATHERS
- Value *vIndices16 = JOIN_16(vIndices, vIndices2);
-
- vCurIndices16 = ADD(vIndices16, vBaseVertex16);
-#else
vCurIndices = ADD(vIndices, vBaseVertex);
-#endif
startOffset = startVertex;
SWR_ASSERT((0), "TODO: Fill out more once driver sends this down.");
else
{
// offset indices by baseVertex
-#if USE_SIMD16_GATHERS
- Value *vIndices16 = JOIN_16(vIndices, vIndices2);
-
- vCurIndices16 = ADD(vIndices16, vBaseVertex16);
-#else
vCurIndices = ADD(vIndices, vBaseVertex);
-#endif
-
startOffset = startVertex;
}
// Load the in bounds size of a partially valid vertex
Value *partialInboundsSize = GEP(streams, {C(ied.StreamIndex), C(SWR_VERTEX_BUFFER_STATE_partialInboundsSize)});
partialInboundsSize = LOAD(partialInboundsSize);
-#if USE_SIMD16_GATHERS
- Value *vPartialVertexSize = VBROADCAST_16(partialInboundsSize);
- Value *vBpp = VBROADCAST_16(C(info.Bpp));
- Value *vAlignmentOffsets = VBROADCAST_16(C(ied.AlignedByteOffset));
-#else
Value *vPartialVertexSize = VBROADCAST(partialInboundsSize);
Value *vBpp = VBROADCAST(C(info.Bpp));
Value *vAlignmentOffsets = VBROADCAST(C(ied.AlignedByteOffset));
-#endif
// is the element is <= the partially valid size
Value *vElementInBoundsMask = ICMP_SLE(vBpp, SUB(vPartialVertexSize, vAlignmentOffsets));
-#if USE_SIMD16_GATHERS
- // override cur indices with 0 if pitch is 0
- Value *pZeroPitchMask16 = ICMP_EQ(vStride16, VIMMED1_16(0));
- vCurIndices16 = SELECT(pZeroPitchMask16, VIMMED1_16(0), vCurIndices16);
-
- // are vertices partially OOB?
- Value *vMaxVertex16 = VBROADCAST_16(maxVertex);
- Value *vPartialOOBMask = ICMP_EQ(vCurIndices16, vMaxVertex16);
-
- // are vertices fully in bounds?
- Value *vMaxGatherMask16 = ICMP_ULT(vCurIndices16, vMaxVertex16);
-
- Value *vGatherMask16;
-
- if (fetchState.bPartialVertexBuffer)
- {
- // are vertices below minVertex limit?
- Value *vMinVertex16 = VBROADCAST_16(minVertex);
- Value *vMinGatherMask16 = ICMP_UGE(vCurIndices16, vMinVertex16);
-
- // only fetch lanes that pass both tests
- vGatherMask16 = AND(vMaxGatherMask16, vMinGatherMask16);
- }
- else
- {
- vGatherMask16 = vMaxGatherMask16;
- }
-
- // blend in any partially OOB indices that have valid elements
- vGatherMask16 = SELECT(vPartialOOBMask, vElementInBoundsMask, vGatherMask16);
-
- // calculate the actual offsets into the VB
- Value *vOffsets16 = MUL(vCurIndices16, vStride16);
- vOffsets16 = ADD(vOffsets16, vAlignmentOffsets);
-
- // if instance stride enable is:
- // true - add product of the instanceID and advancement state to the offst into the VB
- // false - value of vInstanceStride has been initialialized to zero
- vOffsets16 = ADD(vOffsets16, vInstanceStride16);
-
- // TODO: remove the following simd8 interop stuff once all code paths are fully widened to SIMD16..
-
- Value *vGatherMask = EXTRACT_16(vGatherMask16, 0);
- Value *vGatherMask2 = EXTRACT_16(vGatherMask16, 1);
-
- Value *vOffsets = EXTRACT_16(vOffsets16, 0);
- Value *vOffsets2 = EXTRACT_16(vOffsets16, 1);
-#else
// override cur indices with 0 if pitch is 0
Value* pZeroPitchMask = ICMP_EQ(vStride, VIMMED1(0));
vCurIndices = SELECT(pZeroPitchMask, VIMMED1(0), vCurIndices);
// false - value of vInstanceStride has been initialialized to zero
vOffsets = ADD(vOffsets, vInstanceStride);
-#endif
// Packing and component control
ComponentEnable compMask = (ComponentEnable)ied.ComponentPacking;
const ComponentControl compCtrl[4] { (ComponentControl)ied.ComponentControl0, (ComponentControl)ied.ComponentControl1,
(ComponentControl)ied.ComponentControl2, (ComponentControl)ied.ComponentControl3};
- // Special gather/conversion for formats without equal component sizes
- if (IsOddFormat((SWR_FORMAT)ied.Format))
- {
-#if USE_SIMD16_GATHERS
- Value *pResults[4];
- Value *pResults2[4];
- CreateGatherOddFormats((SWR_FORMAT)ied.Format, vGatherMask, pStreamBase, vOffsets, pResults);
- CreateGatherOddFormats((SWR_FORMAT)ied.Format, vGatherMask2, pStreamBase, vOffsets2, pResults2);
- ConvertFormat((SWR_FORMAT)ied.Format, pResults);
- ConvertFormat((SWR_FORMAT)ied.Format, pResults2);
-
- for (uint32_t c = 0; c < 4; c += 1)
- {
- if (isComponentEnabled(compMask, c))
- {
- // pack adjacent pairs of SIMD8s into SIMD16s
- pVtxSrc2[currentVertexElement++] = JOIN_16(pResults[c], pResults2[c]);
-
- if (currentVertexElement > 3)
- {
- // store SIMD16s
- Value *pVtxOut2 = BITCAST(pVtxOut, PointerType::get(VectorType::get(mFP32Ty, mVWidth16), 0));
-
- StoreVertexElements16(pVtxOut2, outputElt++, 4, pVtxSrc2);
- // reset to the next vVertexElement to output
- currentVertexElement = 0;
- }
- }
- }
-#else
- Value *pResults[4];
- CreateGatherOddFormats((SWR_FORMAT)ied.Format, vGatherMask, pStreamBase, vOffsets, pResults);
- ConvertFormat((SWR_FORMAT)ied.Format, pResults);
-
- for (uint32_t c = 0; c < 4; c += 1)
- {
- if (isComponentEnabled(compMask, c))
- {
- vVertexElements[currentVertexElement++] = pResults[c];
- if (currentVertexElement > 3)
- {
- StoreVertexElements(pVtxOut, outputElt++, 4, vVertexElements);
- // reset to the next vVertexElement to output
- currentVertexElement = 0;
- }
- }
- }
-#endif
- }
- else if(info.type[0] == SWR_TYPE_FLOAT)
- {
- ///@todo: support 64 bit vb accesses
- Value *gatherSrc = VIMMED1(0.0f);
-#if USE_SIMD16_GATHERS
- Value *gatherSrc16 = VIMMED1_16(0.0f);
-#endif
-
- SWR_ASSERT(IsUniformFormat((SWR_FORMAT)ied.Format),
- "Unsupported format for standard gather fetch.");
-
- // Gather components from memory to store in a simdvertex structure
- switch (bpc)
- {
- case 16:
- {
-#if USE_SIMD16_GATHERS
- Value *gatherResult[2];
-
- // if we have at least one component out of x or y to fetch
- if (isComponentEnabled(compMask, 0) || isComponentEnabled(compMask, 1))
- {
- gatherResult[0] = GATHERPS_16(gatherSrc16, pStreamBase, vOffsets16, vGatherMask16);
-
- // e.g. result of first 8x32bit integer gather for 16bit components
- // 256i - 0 1 2 3 4 5 6 7
- // xyxy xyxy xyxy xyxy xyxy xyxy xyxy xyxy
- //
- }
- else
- {
- gatherResult[0] = VUNDEF_I_16();
- }
-
- // if we have at least one component out of z or w to fetch
- if (isComponentEnabled(compMask, 2) || isComponentEnabled(compMask, 3))
- {
- // offset base to the next components(zw) in the vertex to gather
- pStreamBase = GEP(pStreamBase, C((char)4));
-
- gatherResult[1] = GATHERPS_16(gatherSrc16, pStreamBase, vOffsets16, vGatherMask16);
-
- // e.g. result of second 8x32bit integer gather for 16bit components
- // 256i - 0 1 2 3 4 5 6 7
- // zwzw zwzw zwzw zwzw zwzw zwzw zwzw zwzw
- //
- }
- else
- {
- gatherResult[1] = VUNDEF_I_16();
- }
+ // Special gather/conversion for formats without equal component sizes
+ if (IsOddFormat((SWR_FORMAT)ied.Format))
+ {
+ Value *pResults[4];
+ CreateGatherOddFormats((SWR_FORMAT)ied.Format, vGatherMask, pStreamBase, vOffsets, pResults);
+ ConvertFormat((SWR_FORMAT)ied.Format, pResults);
- // if we have at least one component to shuffle into place
- if (compMask)
+ for (uint32_t c = 0; c < 4; c += 1)
+ {
+ if (isComponentEnabled(compMask, c))
+ {
+ vVertexElements[currentVertexElement++] = pResults[c];
+ if (currentVertexElement > 3)
{
- Value *pVtxOut2 = BITCAST(pVtxOut, PointerType::get(VectorType::get(mFP32Ty, mVWidth16), 0));
+ StoreVertexElements(pVtxOut, outputElt++, 4, vVertexElements);
+ // reset to the next vVertexElement to output
+ currentVertexElement = 0;
+ }
+ }
+ }
+ }
+ else if(info.type[0] == SWR_TYPE_FLOAT)
+ {
+ ///@todo: support 64 bit vb accesses
+ Value *gatherSrc = VIMMED1(0.0f);
- Shuffle16bpcArgs args = std::forward_as_tuple(gatherResult, pVtxOut2, Instruction::CastOps::FPExt, CONVERT_NONE,
- currentVertexElement, outputElt, compMask, compCtrl, pVtxSrc2);
+ SWR_ASSERT(IsUniformFormat((SWR_FORMAT)ied.Format),
+ "Unsupported format for standard gather fetch.");
- // Shuffle gathered components into place in simdvertex struct
- Shuffle16bpcGather16(args); // outputs to vVertexElements ref
- }
-#else
+ // Gather components from memory to store in a simdvertex structure
+ switch (bpc)
+ {
+ case 16:
+ {
Value *vGatherResult[2];
// if we have at least one component out of x or y to fetch
currentVertexElement, outputElt, compMask, compCtrl, vVertexElements);
// Shuffle gathered components into place in simdvertex struct
-#if USE_SIMD16_SHADERS
- Shuffle16bpcGather(args, useVertexID2); // outputs to vVertexElements ref
-#else
- Shuffle16bpcGather(args); // outputs to vVertexElements ref
-#endif
+ mVWidth == 16 ? Shuffle16bpcGather16(args) : Shuffle16bpcGather(args); // outputs to vVertexElements ref
}
-#endif
}
break;
case 32:
{
for (uint32_t i = 0; i < 4; i += 1)
{
-#if USE_SIMD16_GATHERS
- if (isComponentEnabled(compMask, i))
- {
- // if we need to gather the component
- if (compCtrl[i] == StoreSrc)
- {
- // Gather a SIMD of vertices
- // APIs allow a 4GB range for offsets
- // However, GATHERPS uses signed 32-bit offsets, so only a 2GB range :(
- // But, we know that elements must be aligned for FETCH. :)
- // Right shift the offset by a bit and then scale by 2 to remove the sign extension.
- Value *shiftedOffsets16 = LSHR(vOffsets16, 1);
- pVtxSrc2[currentVertexElement++] = GATHERPS_16(gatherSrc16, pStreamBaseGFX, shiftedOffsets16, vGatherMask16, 2, GFX_MEM_CLIENT_FETCH);
- }
- else
- {
- pVtxSrc2[currentVertexElement++] = GenerateCompCtrlVector16(compCtrl[i]);
- }
-
- if (currentVertexElement > 3)
- {
- // store SIMD16s
- Value *pVtxOut2 = BITCAST(pVtxOut, PointerType::get(VectorType::get(mFP32Ty, mVWidth16), 0));
-
- StoreVertexElements16(pVtxOut2, outputElt++, 4, pVtxSrc2);
- // reset to the next vVertexElement to output
- currentVertexElement = 0;
- }
- }
-#else
if (isComponentEnabled(compMask, i))
{
// if we need to gather the component
}
else
{
-#if USE_SIMD16_SHADERS
- vVertexElements[currentVertexElement++] = GenerateCompCtrlVector(compCtrl[i], useVertexID2);
-#else
vVertexElements[currentVertexElement++] = GenerateCompCtrlVector(compCtrl[i]);
-#endif
}
if (currentVertexElement > 3)
currentVertexElement = 0;
}
}
-#endif
// offset base to the next component in the vertex to gather
pStreamBase = GEP(pStreamBase, C((char)4));
{
for (uint32_t i = 0; i < 4; i += 1)
{
-#if USE_SIMD16_GATHERS
- if (isComponentEnabled(compMask, i))
- {
- // if we need to gather the component
- if (compCtrl[i] == StoreSrc)
- {
- Value *vMaskLo = VSHUFFLE(vGatherMask, VUNDEF(mInt1Ty, 8), C({ 0, 1, 2, 3 }));
- Value *vMaskLo2 = VSHUFFLE(vGatherMask2, VUNDEF(mInt1Ty, 8), C({ 0, 1, 2, 3 }));
- Value *vMaskHi = VSHUFFLE(vGatherMask, VUNDEF(mInt1Ty, 8), C({ 4, 5, 6, 7 }));
- Value *vMaskHi2 = VSHUFFLE(vGatherMask2, VUNDEF(mInt1Ty, 8), C({ 4, 5, 6, 7 }));
-
- Value *vOffsetsLo = VEXTRACTI128(vOffsets, C(0));
- Value *vOffsetsLo2 = VEXTRACTI128(vOffsets2, C(0));
- Value *vOffsetsHi = VEXTRACTI128(vOffsets, C(1));
- Value *vOffsetsHi2 = VEXTRACTI128(vOffsets2, C(1));
-
- Value *vZeroDouble = VECTOR_SPLAT(4, ConstantFP::get(IRB()->getDoubleTy(), 0.0f));
-
- Value* pGatherLo = GATHERPD(vZeroDouble, pStreamBase, vOffsetsLo, vMaskLo);
- Value* pGatherLo2 = GATHERPD(vZeroDouble, pStreamBase, vOffsetsLo2, vMaskLo2);
- Value* pGatherHi = GATHERPD(vZeroDouble, pStreamBase, vOffsetsHi, vMaskHi);
- Value* pGatherHi2 = GATHERPD(vZeroDouble, pStreamBase, vOffsetsHi2, vMaskHi2);
-
- pGatherLo = VCVTPD2PS(pGatherLo);
- pGatherLo2 = VCVTPD2PS(pGatherLo2);
- pGatherHi = VCVTPD2PS(pGatherHi);
- pGatherHi2 = VCVTPD2PS(pGatherHi2);
-
- Value *pGather = VSHUFFLE(pGatherLo, pGatherHi, C({ 0, 1, 2, 3, 4, 5, 6, 7 }));
- Value *pGather2 = VSHUFFLE(pGatherLo2, pGatherHi2, C({ 0, 1, 2, 3, 4, 5, 6, 7 }));
-
- // pack adjacent pairs of SIMD8s into SIMD16s
- pVtxSrc2[currentVertexElement++] = JOIN_16(pGather, pGather2);
- }
- else
- {
- pVtxSrc2[currentVertexElement++] = GenerateCompCtrlVector16(compCtrl[i]);
- }
-
- if (currentVertexElement > 3)
- {
- // store SIMD16s
- Value *pVtxOut2 = BITCAST(pVtxOut, PointerType::get(VectorType::get(mFP32Ty, mVWidth16), 0));
-
- StoreVertexElements16(pVtxOut2, outputElt++, 4, pVtxSrc2);
- // reset to the next vVertexElement to output
- currentVertexElement = 0;
- }
- }
-
- // offset base to the next component in the vertex to gather
- pStreamBase = GEP(pStreamBase, C((char)8));
-#else
if (isComponentEnabled(compMask, i))
{
// if we need to gather the component
}
else
{
-#if USE_SIMD16_SHADERS
- vVertexElements[currentVertexElement++] = GenerateCompCtrlVector(compCtrl[i], useVertexID2);
-#else
vVertexElements[currentVertexElement++] = GenerateCompCtrlVector(compCtrl[i]);
-#endif
}
if (currentVertexElement > 3)
// offset base to the next component in the vertex to gather
pStreamBase = GEP(pStreamBase, C((char)8));
-#endif
}
}
break;
// value substituted when component of gather is masked
Value* gatherSrc = VIMMED1(0);
-#if USE_SIMD16_GATHERS
- Value *gatherSrc16 = VIMMED1_16(0);
-#endif
// Gather components from memory to store in a simdvertex structure
switch (bpc)
// if we have at least one component to fetch
if (compMask)
{
-#if USE_SIMD16_GATHERS
- Value *gatherResult = GATHERDD_16(gatherSrc16, pStreamBase, vOffsets16, vGatherMask16);
-
- // e.g. result of an 8x32bit integer gather for 8bit components
- // 256i - 0 1 2 3 4 5 6 7
- // xyzw xyzw xyzw xyzw xyzw xyzw xyzw xyzw
-
- Value *pVtxOut2 = BITCAST(pVtxOut, PointerType::get(VectorType::get(mFP32Ty, mVWidth16), 0));
-
- Shuffle8bpcArgs args = std::forward_as_tuple(gatherResult, pVtxOut2, extendCastType, conversionType,
- currentVertexElement, outputElt, compMask, compCtrl, pVtxSrc2, info.swizzle);
-
- // Shuffle gathered components into place in simdvertex struct
- Shuffle8bpcGatherd16(args); // outputs to vVertexElements ref
-#else
Value *vGatherResult = GATHERDD(gatherSrc, pStreamBase, vOffsets, vGatherMask);
// e.g. result of an 8x32bit integer gather for 8bit components
// 256i - 0 1 2 3 4 5 6 7
currentVertexElement, outputElt, compMask, compCtrl, vVertexElements, info.swizzle);
// Shuffle gathered components into place in simdvertex struct
-#if USE_SIMD16_SHADERS
- Shuffle8bpcGatherd(args, useVertexID2); // outputs to vVertexElements ref
-#else
- Shuffle8bpcGatherd(args); // outputs to vVertexElements ref
-#endif
-#endif
+ mVWidth == 16 ? Shuffle8bpcGatherd16(args) : Shuffle8bpcGatherd(args); // outputs to vVertexElements ref
}
}
break;
case 16:
{
-#if USE_SIMD16_GATHERS
- Value *gatherResult[2];
-
- // if we have at least one component out of x or y to fetch
- if (isComponentEnabled(compMask, 0) || isComponentEnabled(compMask, 1))
- {
- gatherResult[0] = GATHERDD_16(gatherSrc16, pStreamBase, vOffsets16, vGatherMask16);
-
- // e.g. result of first 8x32bit integer gather for 16bit components
- // 256i - 0 1 2 3 4 5 6 7
- // xyxy xyxy xyxy xyxy xyxy xyxy xyxy xyxy
- //
- }
- else
- {
- gatherResult[0] = VUNDEF_I_16();
- }
-
- // if we have at least one component out of z or w to fetch
- if (isComponentEnabled(compMask, 2) || isComponentEnabled(compMask, 3))
- {
- // offset base to the next components(zw) in the vertex to gather
- pStreamBase = GEP(pStreamBase, C((char)4));
-
- gatherResult[1] = GATHERDD_16(gatherSrc16, pStreamBase, vOffsets16, vGatherMask16);
-
- // e.g. result of second 8x32bit integer gather for 16bit components
- // 256i - 0 1 2 3 4 5 6 7
- // zwzw zwzw zwzw zwzw zwzw zwzw zwzw zwzw
- //
- }
- else
- {
- gatherResult[1] = VUNDEF_I_16();
- }
-
- // if we have at least one component to shuffle into place
- if (compMask)
- {
- Value *pVtxOut2 = BITCAST(pVtxOut, PointerType::get(VectorType::get(mFP32Ty, mVWidth16), 0));
-
- Shuffle16bpcArgs args = std::forward_as_tuple(gatherResult, pVtxOut2, extendCastType, conversionType,
- currentVertexElement, outputElt, compMask, compCtrl, pVtxSrc2);
-
- // Shuffle gathered components into place in simdvertex struct
- Shuffle16bpcGather16(args); // outputs to vVertexElements ref
- }
-#else
Value *vGatherResult[2];
// if we have at least one component out of x or y to fetch
currentVertexElement, outputElt, compMask, compCtrl, vVertexElements);
// Shuffle gathered components into place in simdvertex struct
-#if USE_SIMD16_SHADERS
- Shuffle16bpcGather(args, useVertexID2); // outputs to vVertexElements ref
-#else
- Shuffle16bpcGather(args); // outputs to vVertexElements ref
-#endif
+ mVWidth == 16 ? Shuffle16bpcGather16(args) : Shuffle16bpcGather(args); // outputs to vVertexElements ref
}
-#endif
}
break;
case 32:
// if we need to gather the component
if (compCtrl[i] == StoreSrc)
{
-#if USE_SIMD16_GATHERS
- Value *pGather = GATHERDD_16(gatherSrc16, pStreamBase, vOffsets16, vGatherMask16);
-
- if (conversionType == CONVERT_USCALED)
- {
- pGather = UI_TO_FP(pGather, mSimd16FP32Ty);
- }
- else if (conversionType == CONVERT_SSCALED)
- {
- pGather = SI_TO_FP(pGather, mSimd16FP32Ty);
- }
- else if (conversionType == CONVERT_SFIXED)
- {
- pGather = FMUL(SI_TO_FP(pGather, mSimd16FP32Ty), VBROADCAST_16(C(1 / 65536.0f)));
- }
-
- pVtxSrc2[currentVertexElement++] = pGather;
-
- // e.g. result of a single 8x32bit integer gather for 32bit components
- // 256i - 0 1 2 3 4 5 6 7
- // xxxx xxxx xxxx xxxx xxxx xxxx xxxx xxxx
-#else
Value* pGather = GATHERDD(gatherSrc, pStreamBase, vOffsets, vGatherMask);
if (conversionType == CONVERT_USCALED)
// e.g. result of a single 8x32bit integer gather for 32bit components
// 256i - 0 1 2 3 4 5 6 7
// xxxx xxxx xxxx xxxx xxxx xxxx xxxx xxxx
-#endif
}
else
{
-#if USE_SIMD16_GATHERS
- pVtxSrc2[currentVertexElement++] = GenerateCompCtrlVector16(compCtrl[i]);
-#else
-#if USE_SIMD16_SHADERS
- vVertexElements[currentVertexElement++] = GenerateCompCtrlVector(compCtrl[i], useVertexID2);
-#else
vVertexElements[currentVertexElement++] = GenerateCompCtrlVector(compCtrl[i]);
-#endif
-#endif
}
if (currentVertexElement > 3)
{
-#if USE_SIMD16_GATHERS
- // store SIMD16s
- Value *pVtxOut2 = BITCAST(pVtxOut, PointerType::get(VectorType::get(mFP32Ty, mVWidth16), 0));
-
- StoreVertexElements16(pVtxOut2, outputElt++, 4, pVtxSrc2);
-#else
StoreVertexElements(pVtxOut, outputElt++, 4, vVertexElements);
-#endif
// reset to the next vVertexElement to output
currentVertexElement = 0;
// if we have a partially filled vVertexElement struct, output it
if (currentVertexElement > 0)
{
-#if USE_SIMD16_GATHERS
- // store SIMD16s
- Value *pVtxOut2 = BITCAST(pVtxOut, PointerType::get(VectorType::get(mFP32Ty, mVWidth16), 0));
-
- StoreVertexElements16(pVtxOut2, outputElt++, currentVertexElement, pVtxSrc2);
-#else
StoreVertexElements(pVtxOut, outputElt++, currentVertexElement, vVertexElements);
-#endif
}
}
numIndicesLeft = SDIV(numIndicesLeft, C(4));
// create a vector of index counts from the base index ptr passed into the fetch
- const std::vector<Constant*> vecIndices {C(0), C(1), C(2), C(3), C(4), C(5), C(6), C(7)};
- Constant* vIndexOffsets = ConstantVector::get(vecIndices);
+ Constant* vIndexOffsets;
+ if (mVWidth == 8)
+ {
+ vIndexOffsets = C({ 0, 1, 2, 3, 4, 5, 6, 7 });
+ }
+ else
+ {
+ vIndexOffsets = C({ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 });
+ }
// compare index count to the max valid index
// e.g vMaxIndex 4 4 4 4 4 4 4 4 : 4 indices left to load
/// @param compCtrl - component control val
/// @param vVertexElements[4] - vertex components to output
/// @param swizzle[4] - component swizzle location
-#if USE_SIMD16_GATHERS
void FetchJit::Shuffle8bpcGatherd16(Shuffle8bpcArgs &args)
{
// Unpack tuple args
const uint32_t(&swizzle)[4] = std::get<9>(args);
// cast types
- Type *vGatherTy = mSimdInt32Ty;
- Type *v32x8Ty = VectorType::get(mInt8Ty, mVWidth * 4); // vwidth is units of 32 bits
+ Type *vGatherTy = VectorType::get(mInt32Ty, 8);
+ Type *v32x8Ty = VectorType::get(mInt8Ty, 32);
// have to do extra work for sign extending
if ((extendType == Instruction::CastOps::SExt) || (extendType == Instruction::CastOps::SIToFP))
{
- Type *v16x8Ty = VectorType::get(mInt8Ty, mVWidth * 2); // 8x16bit ints in a 128bit lane
- Type *v128Ty = VectorType::get(IntegerType::getIntNTy(JM()->mContext, 128), mVWidth / 4); // vwidth is units of 32 bits
+ Type *v16x8Ty = VectorType::get(mInt8Ty, 16); // 8x16bit ints in a 128bit lane
+ Type *v128Ty = VectorType::get(IntegerType::getIntNTy(JM()->mContext, 128), 2);
// shuffle mask, including any swizzling
const char x = (char)swizzle[0]; const char y = (char)swizzle[1];
Value *vi128XY_hi = nullptr;
if (isComponentEnabled(compMask, 0) || isComponentEnabled(compMask, 1))
{
- vi128XY_lo = BITCAST(VPERMD(vShufResult_lo, C<int32_t>({ 0, 4, 0, 0, 1, 5, 0, 0 })), v128Ty);
- vi128XY_hi = BITCAST(VPERMD(vShufResult_hi, C<int32_t>({ 0, 4, 0, 0, 1, 5, 0, 0 })), v128Ty);
+ vi128XY_lo = BITCAST(VSHUFFLE(vShufResult_lo, vShufResult_lo, C<int32_t>({ 0, 4, 0, 0, 1, 5, 0, 0 })), v128Ty);
+ vi128XY_hi = BITCAST(VSHUFFLE(vShufResult_hi, vShufResult_hi, C<int32_t>({ 0, 4, 0, 0, 1, 5, 0, 0 })), v128Ty);
// after PERMD: move and pack xy and zw components in low 64 bits of each 128bit lane
// 256i - 0 1 2 3 4 5 6 7
Value *vi128ZW_hi = nullptr;
if (isComponentEnabled(compMask, 2) || isComponentEnabled(compMask, 3))
{
- vi128ZW_lo = BITCAST(VPERMD(vShufResult_lo, C<int32_t>({ 2, 6, 0, 0, 3, 7, 0, 0 })), v128Ty);
- vi128ZW_hi = BITCAST(VPERMD(vShufResult_hi, C<int32_t>({ 2, 6, 0, 0, 3, 7, 0, 0 })), v128Ty);
+ vi128ZW_lo = BITCAST(VSHUFFLE(vShufResult_lo, vShufResult_lo, C<int32_t>({ 2, 6, 0, 0, 3, 7, 0, 0 })), v128Ty);
+ vi128ZW_hi = BITCAST(VSHUFFLE(vShufResult_hi, vShufResult_hi, C<int32_t>({ 2, 6, 0, 0, 3, 7, 0, 0 })), v128Ty);
}
// init denormalize variables if needed
Value *temp_lo = PMOVSXBD(BITCAST(VEXTRACT(selectedPermute_lo, C(lane)), v16x8Ty));
Value *temp_hi = PMOVSXBD(BITCAST(VEXTRACT(selectedPermute_hi, C(lane)), v16x8Ty));
+ Value* temp = JOIN_16(temp_lo, temp_hi);
+
// denormalize if needed
if (conversionType != CONVERT_NONE)
{
- temp_lo = FMUL(CAST(fpCast, temp_lo, mSimdFP32Ty), conversionFactor);
- temp_hi = FMUL(CAST(fpCast, temp_hi, mSimdFP32Ty), conversionFactor);
+ temp = FMUL(CAST(fpCast, temp, mSimdFP32Ty), conversionFactor);
}
- vVertexElements[currentVertexElement] = JOIN_16(temp_lo, temp_hi);
+ vVertexElements[currentVertexElement] = temp;
currentVertexElement += 1;
}
else
{
- vVertexElements[currentVertexElement++] = GenerateCompCtrlVector16(compCtrl[i]);
+ vVertexElements[currentVertexElement++] = GenerateCompCtrlVector(compCtrl[i]);
}
if (currentVertexElement > 3)
{
- StoreVertexElements16(pVtxOut, outputElt++, 4, vVertexElements);
+ StoreVertexElements(pVtxOut, outputElt++, 4, vVertexElements);
// reset to the next vVertexElement to output
currentVertexElement = 0;
}
// 256i - 0 1 2 3 4 5 6 7
// x000 x000 x000 x000 x000 x000 x000 x000
+ Value* temp = JOIN_16(temp_lo, temp_hi);
+
// denormalize if needed
if (conversionType != CONVERT_NONE)
{
- temp_lo = FMUL(CAST(fpCast, temp_lo, mSimdFP32Ty), conversionFactor);
- temp_hi = FMUL(CAST(fpCast, temp_hi, mSimdFP32Ty), conversionFactor);
+ temp = FMUL(CAST(fpCast, temp, mSimdFP32Ty), conversionFactor);
}
- vVertexElements[currentVertexElement] = JOIN_16(temp_lo, temp_hi);
+ vVertexElements[currentVertexElement] = temp;
currentVertexElement += 1;
}
else
{
- vVertexElements[currentVertexElement++] = GenerateCompCtrlVector16(compCtrl[i]);
+ vVertexElements[currentVertexElement++] = GenerateCompCtrlVector(compCtrl[i]);
}
if (currentVertexElement > 3)
{
- StoreVertexElements16(pVtxOut, outputElt++, 4, vVertexElements);
+ StoreVertexElements(pVtxOut, outputElt++, 4, vVertexElements);
// reset to the next vVertexElement to output
currentVertexElement = 0;
}
}
}
-#else
-#if USE_SIMD16_SHADERS
-void FetchJit::Shuffle8bpcGatherd(Shuffle8bpcArgs &args, bool useVertexID2)
-#else
void FetchJit::Shuffle8bpcGatherd(Shuffle8bpcArgs &args)
-#endif
{
// Unpack tuple args
Value*& vGatherResult = std::get<0>(args);
}
else
{
-#if USE_SIMD16_SHADERS
- vVertexElements[currentVertexElement++] = GenerateCompCtrlVector(compCtrl[i], useVertexID2);
-#else
vVertexElements[currentVertexElement++] = GenerateCompCtrlVector(compCtrl[i]);
-#endif
}
if (currentVertexElement > 3)
}
}
-#endif
//////////////////////////////////////////////////////////////////////////
/// @brief Takes a SIMD of gathered 16bpc verts, zero or sign extends,
/// denormalizes if needed, converts to F32 if needed, and positions in
/// @param compMask - component packing mask
/// @param compCtrl - component control val
/// @param vVertexElements[4] - vertex components to output
-#if USE_SIMD16_GATHERS
void FetchJit::Shuffle16bpcGather16(Shuffle16bpcArgs &args)
{
// Unpack tuple args
Value* (&vVertexElements)[4] = std::get<8>(args);
// cast types
- Type *vGatherTy = VectorType::get(IntegerType::getInt32Ty(JM()->mContext), mVWidth);
- Type *v32x8Ty = VectorType::get(mInt8Ty, mVWidth * 4); // vwidth is units of 32 bits
+ Type *vGatherTy = VectorType::get(mInt32Ty, 8);
+ Type *v32x8Ty = VectorType::get(mInt8Ty, 32);
// have to do extra work for sign extending
if ((extendType == Instruction::CastOps::SExt) || (extendType == Instruction::CastOps::SIToFP) || (extendType == Instruction::CastOps::FPExt))
bool bFP = (extendType == Instruction::CastOps::FPExt) ? true : false;
Type *v8x16Ty = VectorType::get(mInt16Ty, 8); // 8x16bit in a 128bit lane
- Type *v128bitTy = VectorType::get(IntegerType::getIntNTy(JM()->mContext, 128), mVWidth / 4); // vwidth is units of 32 bits
+ Type *v128bitTy = VectorType::get(IntegerType::getIntNTy(JM()->mContext, 128), 2);
// shuffle mask
- Value *vConstMask = C<char>({ 0, 1, 4, 5, 8, 9, 12, 13, 2, 3, 6, 7, 10, 11, 14, 15,
- 0, 1, 4, 5, 8, 9, 12, 13, 2, 3, 6, 7, 10, 11, 14, 15 });
+ Value *vConstMask = C<uint8_t>({ 0, 1, 4, 5, 8, 9, 12, 13, 2, 3, 6, 7, 10, 11, 14, 15,
+ 0, 1, 4, 5, 8, 9, 12, 13, 2, 3, 6, 7, 10, 11, 14, 15 });
Value *vi128XY_lo = nullptr;
Value *vi128XY_hi = nullptr;
if (isComponentEnabled(compMask, 0) || isComponentEnabled(compMask, 1))
{
// SIMD16 PSHUFB isnt part of AVX-512F, so split into SIMD8 for the sake of KNL, for now..
- Value *vGatherResult_lo = EXTRACT_16(vGatherResult[0], 0);
- Value *vGatherResult_hi = EXTRACT_16(vGatherResult[0], 1);
+ Value *vGatherResult_lo = BITCAST(EXTRACT_16(vGatherResult[0], 0), v32x8Ty);
+ Value *vGatherResult_hi = BITCAST(EXTRACT_16(vGatherResult[0], 1), v32x8Ty);
- Value *vShufResult_lo = BITCAST(PSHUFB(BITCAST(vGatherResult_lo, v32x8Ty), vConstMask), vGatherTy);
- Value *vShufResult_hi = BITCAST(PSHUFB(BITCAST(vGatherResult_hi, v32x8Ty), vConstMask), vGatherTy);
+ Value *vShufResult_lo = BITCAST(PSHUFB(vGatherResult_lo, vConstMask), vGatherTy);
+ Value *vShufResult_hi = BITCAST(PSHUFB(vGatherResult_hi, vConstMask), vGatherTy);
// after pshufb: group components together in each 128bit lane
// 256i - 0 1 2 3 4 5 6 7
// xxxx xxxx yyyy yyyy xxxx xxxx yyyy yyyy
- vi128XY_lo = BITCAST(VPERMD(vShufResult_lo, C<int32_t>({ 0, 1, 4, 5, 2, 3, 6, 7 })), v128bitTy);
- vi128XY_hi = BITCAST(VPERMD(vShufResult_hi, C<int32_t>({ 0, 1, 4, 5, 2, 3, 6, 7 })), v128bitTy);
+ vi128XY_lo = BITCAST(VSHUFFLE(vShufResult_lo, vShufResult_lo, C<int32_t>({ 0, 1, 4, 5, 2, 3, 6, 7 })), v128bitTy);
+ vi128XY_hi = BITCAST(VSHUFFLE(vShufResult_hi, vShufResult_hi, C<int32_t>({ 0, 1, 4, 5, 2, 3, 6, 7 })), v128bitTy);
// after PERMD: move and pack xy components into each 128bit lane
// 256i - 0 1 2 3 4 5 6 7
Value *vi128ZW_hi = nullptr;
if (isComponentEnabled(compMask, 2) || isComponentEnabled(compMask, 3))
{
- Value *vGatherResult_lo = EXTRACT_16(vGatherResult[1], 0);
- Value *vGatherResult_hi = EXTRACT_16(vGatherResult[1], 1);
+ Value *vGatherResult_lo = BITCAST(EXTRACT_16(vGatherResult[1], 0), v32x8Ty);
+ Value *vGatherResult_hi = BITCAST(EXTRACT_16(vGatherResult[1], 1), v32x8Ty);
- Value *vShufResult_lo = BITCAST(PSHUFB(BITCAST(vGatherResult_lo, v32x8Ty), vConstMask), vGatherTy);
- Value *vShufResult_hi = BITCAST(PSHUFB(BITCAST(vGatherResult_hi, v32x8Ty), vConstMask), vGatherTy);
+ Value *vShufResult_lo = BITCAST(PSHUFB(vGatherResult_lo, vConstMask), vGatherTy);
+ Value *vShufResult_hi = BITCAST(PSHUFB(vGatherResult_hi, vConstMask), vGatherTy);
- vi128ZW_lo = BITCAST(VPERMD(vShufResult_lo, C<int32_t>({ 0, 1, 4, 5, 2, 3, 6, 7 })), v128bitTy);
- vi128ZW_hi = BITCAST(VPERMD(vShufResult_hi, C<int32_t>({ 0, 1, 4, 5, 2, 3, 6, 7 })), v128bitTy);
+ vi128ZW_lo = BITCAST(VSHUFFLE(vShufResult_lo, vShufResult_lo, C<int32_t>({ 0, 1, 4, 5, 2, 3, 6, 7 })), v128bitTy);
+ vi128ZW_hi = BITCAST(VSHUFFLE(vShufResult_hi, vShufResult_hi, C<int32_t>({ 0, 1, 4, 5, 2, 3, 6, 7 })), v128bitTy);
}
// init denormalize variables if needed
if (bFP)
{
// extract 128 bit lanes to sign extend each component
- Value *temp_lo = CVTPH2PS(BITCAST(VEXTRACT(selectedPermute_lo, C(lane)), v8x16Ty));
- Value *temp_hi = CVTPH2PS(BITCAST(VEXTRACT(selectedPermute_hi, C(lane)), v8x16Ty));
+ /// @todo Force 8-wide cvt until we support generic cvt in x86 lowering pass
+ Function* pCvtPh2Ps = Intrinsic::getDeclaration(JM()->mpCurrentModule, Intrinsic::x86_vcvtph2ps_256);
+ Value *temp_lo = CALL(pCvtPh2Ps, BITCAST(VEXTRACT(selectedPermute_lo, C(lane)), v8x16Ty));
+ Value *temp_hi = CALL(pCvtPh2Ps, BITCAST(VEXTRACT(selectedPermute_hi, C(lane)), v8x16Ty));
vVertexElements[currentVertexElement] = JOIN_16(temp_lo, temp_hi);
}
Value *temp_lo = PMOVSXWD(BITCAST(VEXTRACT(selectedPermute_lo, C(lane)), v8x16Ty));
Value *temp_hi = PMOVSXWD(BITCAST(VEXTRACT(selectedPermute_hi, C(lane)), v8x16Ty));
+ Value* temp = JOIN_16(temp_lo, temp_hi);
+
// denormalize if needed
if (conversionType != CONVERT_NONE)
{
- temp_lo = FMUL(CAST(IntToFpCast, temp_lo, mSimdFP32Ty), conversionFactor);
- temp_hi = FMUL(CAST(IntToFpCast, temp_hi, mSimdFP32Ty), conversionFactor);
+ temp = FMUL(CAST(IntToFpCast, temp, mSimdFP32Ty), conversionFactor);
}
- vVertexElements[currentVertexElement] = JOIN_16(temp_lo, temp_hi);
+ vVertexElements[currentVertexElement] = temp;
}
currentVertexElement += 1;
}
else
{
- vVertexElements[currentVertexElement++] = GenerateCompCtrlVector16(compCtrl[i]);
+ vVertexElements[currentVertexElement++] = GenerateCompCtrlVector(compCtrl[i]);
}
if (currentVertexElement > 3)
{
- StoreVertexElements16(pVtxOut, outputElt++, 4, vVertexElements);
+ StoreVertexElements(pVtxOut, outputElt++, 4, vVertexElements);
// reset to the next vVertexElement to output
currentVertexElement = 0;
}
// 256i - 0 1 2 3 4 5 6 7
// xx00 xx00 xx00 xx00 xx00 xx00 xx00 xx00
+ Value* temp = JOIN_16(temp_lo, temp_hi);
+
// denormalize if needed
if (conversionType != CONVERT_NONE)
{
- temp_lo = FMUL(CAST(fpCast, temp_lo, mSimdFP32Ty), conversionFactor);
- temp_hi = FMUL(CAST(fpCast, temp_hi, mSimdFP32Ty), conversionFactor);
+ temp = FMUL(CAST(fpCast, temp, mSimdFP32Ty), conversionFactor);
}
- vVertexElements[currentVertexElement] = JOIN_16(temp_lo, temp_hi);
+ vVertexElements[currentVertexElement] = temp;
currentVertexElement += 1;
}
else
{
- vVertexElements[currentVertexElement++] = GenerateCompCtrlVector16(compCtrl[i]);
+ vVertexElements[currentVertexElement++] = GenerateCompCtrlVector(compCtrl[i]);
}
if (currentVertexElement > 3)
{
- StoreVertexElements16(pVtxOut, outputElt++, 4, vVertexElements);
+ StoreVertexElements(pVtxOut, outputElt++, 4, vVertexElements);
// reset to the next vVertexElement to output
currentVertexElement = 0;
}
}
}
-#else
-#if USE_SIMD16_SHADERS
-void FetchJit::Shuffle16bpcGather(Shuffle16bpcArgs &args, bool useVertexID2)
-#else
void FetchJit::Shuffle16bpcGather(Shuffle16bpcArgs &args)
-#endif
{
// Unpack tuple args
Value* (&vGatherResult)[2] = std::get<0>(args);
}
else
{
-#if USE_SIMD16_SHADERS
- vVertexElements[currentVertexElement++] = GenerateCompCtrlVector(compCtrl[i], useVertexID2);
-#else
vVertexElements[currentVertexElement++] = GenerateCompCtrlVector(compCtrl[i]);
-#endif
}
if (currentVertexElement > 3)
}
else
{
-#if USE_SIMD16_SHADERS
- vVertexElements[currentVertexElement++] = GenerateCompCtrlVector(compCtrl[i], useVertexID2);
-#else
vVertexElements[currentVertexElement++] = GenerateCompCtrlVector(compCtrl[i]);
-#endif
}
if (currentVertexElement > 3)
}
}
-#endif
//////////////////////////////////////////////////////////////////////////
/// @brief Output a simdvertex worth of elements to the current outputElt
/// @param pVtxOut - base address of VIN output struct
/// @param outputElt - simdvertex offset in VIN to write to
/// @param numEltsToStore - number of simdvertex rows to write out
/// @param vVertexElements - LLVM Value*[] simdvertex to write out
-#if USE_SIMD16_GATHERS
-void FetchJit::StoreVertexElements16(Value* pVtxOut, const uint32_t outputElt, const uint32_t numEltsToStore, Value* (&vVertexElements)[4])
-{
- SWR_ASSERT(numEltsToStore <= 4, "Invalid element count.");
-
- for (uint32_t c = 0; c < numEltsToStore; ++c)
- {
- // STORE expects FP32 x vWidth type, just bitcast if needed
- if (!vVertexElements[c]->getType()->getScalarType()->isFloatTy())
- {
-#if FETCH_DUMP_VERTEX
- PRINT("vVertexElements[%d]: 0x%x\n", { C(c), vVertexElements[c] });
-#endif
- vVertexElements[c] = BITCAST(vVertexElements[c], mSimd16FP32Ty);
- }
-#if FETCH_DUMP_VERTEX
- else
- {
- PRINT("vVertexElements[%d]: %f\n", { C(c), vVertexElements[c] });
- }
-#endif
- // outputElt * 4 = offsetting by the size of a simdvertex
- // + c offsets to a 32bit x vWidth row within the current vertex
- Value* dest = GEP(pVtxOut, C(outputElt * 4 + c), "destGEP");
- STORE(vVertexElements[c], dest);
- }
-}
-
-#else
void FetchJit::StoreVertexElements(Value* pVtxOut, const uint32_t outputElt, const uint32_t numEltsToStore, Value* (&vVertexElements)[4])
{
SWR_ASSERT(numEltsToStore <= 4, "Invalid element count.");
#endif
// outputElt * 4 = offsetting by the size of a simdvertex
// + c offsets to a 32bit x vWidth row within the current vertex
-#if USE_SIMD16_SHADERS
- Value* dest = GEP(pVtxOut, C(outputElt * 8 + c * 2), "destGEP");
-#else
Value* dest = GEP(pVtxOut, C(outputElt * 4 + c), "destGEP");
-#endif
STORE(vVertexElements[c], dest);
}
}
-#endif
//////////////////////////////////////////////////////////////////////////
/// @brief Generates a constant vector of values based on the
/// ComponentControl value
/// @param ctrl - ComponentControl value
-#if USE_SIMD16_GATHERS
-Value *FetchJit::GenerateCompCtrlVector16(const ComponentControl ctrl)
-{
- switch (ctrl)
- {
- case NoStore:
- return VUNDEF_I_16();
- case Store0:
- return VIMMED1_16(0);
- case Store1Fp:
- return VIMMED1_16(1.0f);
- case Store1Int:
- return VIMMED1_16(1);
- case StoreVertexId:
- {
- Value *pId_lo = BITCAST(LOAD(GEP(mpFetchInfo, { 0, SWR_FETCH_CONTEXT_VertexID })), mSimdFP32Ty);
- Value *pId_hi = BITCAST(LOAD(GEP(mpFetchInfo, { 0, SWR_FETCH_CONTEXT_VertexID2 })), mSimdFP32Ty);
-
- Value *pId = JOIN_16(pId_lo, pId_hi);
-
- return pId;
- }
- case StoreInstanceId:
- {
- Value *pId = BITCAST(LOAD(GEP(mpFetchInfo, { 0, SWR_FETCH_CONTEXT_CurInstance })), mFP32Ty);
- return VBROADCAST_16(pId);
- }
-
-
- case StoreSrc:
- default:
- SWR_INVALID("Invalid component control");
- return VUNDEF_I_16();
- }
-}
-
-#else
-#if USE_SIMD16_SHADERS
-Value *FetchJit::GenerateCompCtrlVector(const ComponentControl ctrl, bool useVertexID2)
-#else
Value *FetchJit::GenerateCompCtrlVector(const ComponentControl ctrl)
-#endif
{
switch (ctrl)
{
case Store1Int:
return VIMMED1(1);
case StoreVertexId:
+ {
+ if (mVWidth == 16)
{
-#if USE_SIMD16_SHADERS
- Value *pId;
- if (useVertexID2)
- {
- pId = BITCAST(LOAD(GEP(mpFetchInfo, { 0, SWR_FETCH_CONTEXT_VertexID2 })), mSimdFP32Ty);
- }
- else
- {
- pId = BITCAST(LOAD(GEP(mpFetchInfo, { 0, SWR_FETCH_CONTEXT_VertexID })), mSimdFP32Ty);
- }
-#else
- Value *pId = BITCAST(LOAD(GEP(mpFetchInfo, { 0, SWR_FETCH_CONTEXT_VertexID })), mSimdFP32Ty);
-#endif
- return pId;
+ Type* pSimd8FPTy = VectorType::get(mFP32Ty, 8);
+ Value *pIdLo = BITCAST(LOAD(GEP(mpFetchInfo, { 0, SWR_FETCH_CONTEXT_VertexID })), pSimd8FPTy);
+ Value *pIdHi = BITCAST(LOAD(GEP(mpFetchInfo, { 0, SWR_FETCH_CONTEXT_VertexID2 })), pSimd8FPTy);
+ return JOIN_16(pIdLo, pIdHi);
+ }
+ else
+ {
+ return BITCAST(LOAD(GEP(mpFetchInfo, { 0, SWR_FETCH_CONTEXT_VertexID })), mSimdFP32Ty);
}
+ }
case StoreInstanceId:
{
Value *pId = BITCAST(LOAD(GEP(mpFetchInfo, { 0, SWR_FETCH_CONTEXT_CurInstance })), mFP32Ty);
}
}
-#endif
//////////////////////////////////////////////////////////////////////////
/// @brief Returns the enable mask for the specified component.
/// @param enableMask - enable bits