['VPSHUFB', ['a', 'b'], 'a'],
['VPERMD', ['a', 'idx'], 'a'],
['VPERMPS', ['idx', 'a'], 'a'],
- ['VCVTPD2PS', ['a'], 'VectorType::get(mFP32Ty, VEC_GET_NUM_ELEMS)'],
+ ['VCVTPD2PS', ['a'], 'getVectorType(mFP32Ty, VEC_GET_NUM_ELEMS)'],
['VCVTPS2PH', ['a', 'round'], 'mSimdInt16Ty'],
['VHSUBPS', ['a', 'b'], 'a'],
['VPTESTC', ['a', 'b'], 'mInt32Ty'],
elif type == 'HANDLE':
llvm_type = 'PointerType::get(Type::getInt32Ty(ctx), 0)'
elif type == 'simdscalar':
- llvm_type = 'VectorType::get(Type::getFloatTy(ctx), pJitMgr->mVWidth)'
+ llvm_type = 'getVectorType(Type::getFloatTy(ctx), pJitMgr->mVWidth)'
elif type == 'simdscalari':
- llvm_type = 'VectorType::get(Type::getInt32Ty(ctx), pJitMgr->mVWidth)'
+ llvm_type = 'getVectorType(Type::getInt32Ty(ctx), pJitMgr->mVWidth)'
elif type == 'simd16scalar':
- llvm_type = 'VectorType::get(Type::getFloatTy(ctx), 16)'
+ llvm_type = 'getVectorType(Type::getFloatTy(ctx), 16)'
elif type == 'simd16scalari':
- llvm_type = 'VectorType::get(Type::getInt32Ty(ctx), 16)'
+ llvm_type = 'getVectorType(Type::getInt32Ty(ctx), 16)'
elif type == '__m128i':
- llvm_type = 'VectorType::get(Type::getInt32Ty(ctx), 4)'
+ llvm_type = 'getVectorType(Type::getInt32Ty(ctx), 4)'
elif type == 'SIMD256::Float':
- llvm_type = 'VectorType::get(Type::getFloatTy(ctx), 8)'
+ llvm_type = 'getVectorType(Type::getFloatTy(ctx), 8)'
elif type == 'SIMD256::Integer':
- llvm_type = 'VectorType::get(Type::getInt32Ty(ctx), 8)'
+ llvm_type = 'getVectorType(Type::getInt32Ty(ctx), 8)'
elif type == 'SIMD512::Float':
- llvm_type = 'VectorType::get(Type::getFloatTy(ctx), 16)'
+ llvm_type = 'getVectorType(Type::getFloatTy(ctx), 16)'
elif type == 'SIMD512::Integer':
- llvm_type = 'VectorType::get(Type::getInt32Ty(ctx), 16)'
+ llvm_type = 'getVectorType(Type::getInt32Ty(ctx), 16)'
elif type == 'simdvector':
- llvm_type = 'ArrayType::get(VectorType::get(Type::getFloatTy(ctx), 8), 4)'
+ llvm_type = 'ArrayType::get(getVectorType(Type::getFloatTy(ctx), 8), 4)'
elif type == 'simd16vector':
- llvm_type = 'ArrayType::get(VectorType::get(Type::getFloatTy(ctx), 16), 4)'
+ llvm_type = 'ArrayType::get(getVectorType(Type::getFloatTy(ctx), 16), 4)'
elif type == 'SIMD256::Vec4':
- llvm_type = 'ArrayType::get(VectorType::get(Type::getFloatTy(ctx), 8), 4)'
+ llvm_type = 'ArrayType::get(getVectorType(Type::getFloatTy(ctx), 8), 4)'
elif type == 'SIMD512::Vec4':
- llvm_type = 'ArrayType::get(VectorType::get(Type::getFloatTy(ctx), 16), 4)'
+ llvm_type = 'ArrayType::get(getVectorType(Type::getFloatTy(ctx), 16), 4)'
else:
llvm_type = 'Gen_%s(pJitMgr)' % type
passes.add(createCFGSimplificationPass());
passes.add(createEarlyCSEPass());
passes.add(createInstructionCombiningPass());
+#if LLVM_VERSION_MAJOR <= 11
passes.add(createConstantPropagationPass());
+#endif
passes.add(createSCCPPass());
passes.add(createAggressiveDCEPass());
mInt64PtrTy = PointerType::get(mInt64Ty, 0);
mHandleTy = mInt8PtrTy;
- mSimd4FP64Ty = VectorType::get(mDoubleTy, 4);
+ mSimd4FP64Ty = getVectorType(mDoubleTy, 4);
// Built in types: target simd
SetTargetWidth(pJitMgr->mVWidth);
// Built in types: simd16
- mSimd16Int1Ty = VectorType::get(mInt1Ty, mVWidth16);
- mSimd16Int16Ty = VectorType::get(mInt16Ty, mVWidth16);
- mSimd16Int32Ty = VectorType::get(mInt32Ty, mVWidth16);
- mSimd16Int64Ty = VectorType::get(mInt64Ty, mVWidth16);
- mSimd16FP16Ty = VectorType::get(mFP16Ty, mVWidth16);
- mSimd16FP32Ty = VectorType::get(mFP32Ty, mVWidth16);
+ mSimd16Int1Ty = getVectorType(mInt1Ty, mVWidth16);
+ mSimd16Int16Ty = getVectorType(mInt16Ty, mVWidth16);
+ mSimd16Int32Ty = getVectorType(mInt32Ty, mVWidth16);
+ mSimd16Int64Ty = getVectorType(mInt64Ty, mVWidth16);
+ mSimd16FP16Ty = getVectorType(mFP16Ty, mVWidth16);
+ mSimd16FP32Ty = getVectorType(mFP32Ty, mVWidth16);
mSimd16VectorTy = ArrayType::get(mSimd16FP32Ty, 4);
mSimd16VectorTRTy = ArrayType::get(mSimd16FP32Ty, 5);
- mSimd32Int8Ty = VectorType::get(mInt8Ty, 32);
+ mSimd32Int8Ty = getVectorType(mInt8Ty, 32);
if (sizeof(uint32_t*) == 4)
{
{
mVWidth = width;
- mSimdInt1Ty = VectorType::get(mInt1Ty, mVWidth);
- mSimdInt16Ty = VectorType::get(mInt16Ty, mVWidth);
- mSimdInt32Ty = VectorType::get(mInt32Ty, mVWidth);
- mSimdInt64Ty = VectorType::get(mInt64Ty, mVWidth);
- mSimdFP16Ty = VectorType::get(mFP16Ty, mVWidth);
- mSimdFP32Ty = VectorType::get(mFP32Ty, mVWidth);
+ mSimdInt1Ty = getVectorType(mInt1Ty, mVWidth);
+ mSimdInt16Ty = getVectorType(mInt16Ty, mVWidth);
+ mSimdInt32Ty = getVectorType(mInt32Ty, mVWidth);
+ mSimdInt64Ty = getVectorType(mInt64Ty, mVWidth);
+ mSimdFP16Ty = getVectorType(mFP16Ty, mVWidth);
+ mSimdFP32Ty = getVectorType(mFP32Ty, mVWidth);
mSimdVectorTy = ArrayType::get(mSimdFP32Ty, 4);
mSimdVectorIntTy = ArrayType::get(mSimdInt32Ty, 4);
mSimdVectorTRTy = ArrayType::get(mSimdFP32Ty, 5);
}
// <ty> should packetize to <8 x <ty>>
- Type* vecType = VectorType::get(pType, JM()->mVWidth);
+ Type* vecType = getVectorType(pType, JM()->mVWidth);
return vecType;
}
} // namespace SwrJit
bool bPackedOutput)
{
// cast types
- Type* vGatherTy = VectorType::get(IntegerType::getInt32Ty(JM()->mContext), mVWidth);
- Type* v32x8Ty = VectorType::get(mInt8Ty, mVWidth * 4); // vwidth is units of 32 bits
+ Type* vGatherTy = getVectorType(IntegerType::getInt32Ty(JM()->mContext), mVWidth);
+ Type* v32x8Ty = getVectorType(mInt8Ty, mVWidth * 4); // vwidth is units of 32 bits
// input could either be float or int vector; do shuffle work in int
vGatherInput[0] = BITCAST(vGatherInput[0], mSimdInt32Ty);
if (bPackedOutput)
{
- Type* v128bitTy = VectorType::get(IntegerType::getIntNTy(JM()->mContext, 128),
+ Type* v128bitTy = getVectorType(IntegerType::getIntNTy(JM()->mContext, 128),
mVWidth / 4); // vwidth is units of 32 bits
// shuffle mask
bool bPackedOutput)
{
// cast types
- Type* vGatherTy = VectorType::get(IntegerType::getInt32Ty(JM()->mContext), mVWidth);
- Type* v32x8Ty = VectorType::get(mInt8Ty, mVWidth * 4); // vwidth is units of 32 bits
+ Type* vGatherTy = getVectorType(IntegerType::getInt32Ty(JM()->mContext), mVWidth);
+ Type* v32x8Ty = getVectorType(mInt8Ty, mVWidth * 4); // vwidth is units of 32 bits
if (bPackedOutput)
{
- Type* v128Ty = VectorType::get(IntegerType::getIntNTy(JM()->mContext, 128),
+ Type* v128Ty = getVectorType(IntegerType::getIntNTy(JM()->mContext, 128),
mVWidth / 4); // vwidth is units of 32 bits
// shuffle mask
Value* vConstMask = C<char>({0, 4, 8, 12, 1, 5, 9, 13, 2, 6, 10, 14, 3, 7, 11, 15,
Value* Builder::VIMMED1(uint64_t i)
{
#if LLVM_VERSION_MAJOR > 10
- return ConstantVector::getSplat(ElementCount(mVWidth, false), cast<ConstantInt>(C(i)));
+ return ConstantVector::getSplat(ElementCount::get(mVWidth, false), cast<ConstantInt>(C(i)));
#else
return ConstantVector::getSplat(mVWidth, cast<ConstantInt>(C(i)));
#endif
Value* Builder::VIMMED1_16(uint64_t i)
{
#if LLVM_VERSION_MAJOR > 10
- return ConstantVector::getSplat(ElementCount(mVWidth16, false), cast<ConstantInt>(C(i)));
+ return ConstantVector::getSplat(ElementCount::get(mVWidth16, false), cast<ConstantInt>(C(i)));
#else
return ConstantVector::getSplat(mVWidth16, cast<ConstantInt>(C(i)));
#endif
Value* Builder::VIMMED1(int i)
{
#if LLVM_VERSION_MAJOR > 10
- return ConstantVector::getSplat(ElementCount(mVWidth, false), cast<ConstantInt>(C(i)));
+ return ConstantVector::getSplat(ElementCount::get(mVWidth, false), cast<ConstantInt>(C(i)));
#else
return ConstantVector::getSplat(mVWidth, cast<ConstantInt>(C(i)));
#endif
Value* Builder::VIMMED1_16(int i)
{
#if LLVM_VERSION_MAJOR > 10
- return ConstantVector::getSplat(ElementCount(mVWidth16, false), cast<ConstantInt>(C(i)));
+ return ConstantVector::getSplat(ElementCount::get(mVWidth16, false), cast<ConstantInt>(C(i)));
#else
return ConstantVector::getSplat(mVWidth16, cast<ConstantInt>(C(i)));
#endif
Value* Builder::VIMMED1(uint32_t i)
{
#if LLVM_VERSION_MAJOR > 10
- return ConstantVector::getSplat(ElementCount(mVWidth, false), cast<ConstantInt>(C(i)));
+ return ConstantVector::getSplat(ElementCount::get(mVWidth, false), cast<ConstantInt>(C(i)));
#else
return ConstantVector::getSplat(mVWidth, cast<ConstantInt>(C(i)));
#endif
Value* Builder::VIMMED1_16(uint32_t i)
{
#if LLVM_VERSION_MAJOR > 10
- return ConstantVector::getSplat(ElementCount(mVWidth16, false), cast<ConstantInt>(C(i)));
+ return ConstantVector::getSplat(ElementCount::get(mVWidth16, false), cast<ConstantInt>(C(i)));
#else
return ConstantVector::getSplat(mVWidth16, cast<ConstantInt>(C(i)));
#endif
Value* Builder::VIMMED1(float i)
{
#if LLVM_VERSION_MAJOR > 10
- return ConstantVector::getSplat(ElementCount(mVWidth, false), cast<ConstantFP>(C(i)));
+ return ConstantVector::getSplat(ElementCount::get(mVWidth, false), cast<ConstantFP>(C(i)));
#else
return ConstantVector::getSplat(mVWidth, cast<ConstantFP>(C(i)));
#endif
Value* Builder::VIMMED1_16(float i)
{
#if LLVM_VERSION_MAJOR > 10
- return ConstantVector::getSplat(ElementCount(mVWidth16, false), cast<ConstantFP>(C(i)));
+ return ConstantVector::getSplat(ElementCount::get(mVWidth16, false), cast<ConstantFP>(C(i)));
#else
return ConstantVector::getSplat(mVWidth16, cast<ConstantFP>(C(i)));
#endif
Value* Builder::VIMMED1(bool i)
{
#if LLVM_VERSION_MAJOR > 10
- return ConstantVector::getSplat(ElementCount(mVWidth, false), cast<ConstantInt>(C(i)));
+ return ConstantVector::getSplat(ElementCount::get(mVWidth, false), cast<ConstantInt>(C(i)));
#else
return ConstantVector::getSplat(mVWidth, cast<ConstantInt>(C(i)));
#endif
Value* Builder::VIMMED1_16(bool i)
{
#if LLVM_VERSION_MAJOR > 10
- return ConstantVector::getSplat(ElementCount(mVWidth16, false), cast<ConstantInt>(C(i)));
+ return ConstantVector::getSplat(ElementCount::get(mVWidth16, false), cast<ConstantInt>(C(i)));
#else
return ConstantVector::getSplat(mVWidth16, cast<ConstantInt>(C(i)));
#endif
}
- Value* Builder::VUNDEF_IPTR() { return UndefValue::get(VectorType::get(mInt32PtrTy, mVWidth)); }
+ Value* Builder::VUNDEF_IPTR() { return UndefValue::get(getVectorType(mInt32PtrTy, mVWidth)); }
- Value* Builder::VUNDEF(Type* t) { return UndefValue::get(VectorType::get(t, mVWidth)); }
+ Value* Builder::VUNDEF(Type* t) { return UndefValue::get(getVectorType(t, mVWidth)); }
- Value* Builder::VUNDEF_I() { return UndefValue::get(VectorType::get(mInt32Ty, mVWidth)); }
+ Value* Builder::VUNDEF_I() { return UndefValue::get(getVectorType(mInt32Ty, mVWidth)); }
- Value* Builder::VUNDEF_I_16() { return UndefValue::get(VectorType::get(mInt32Ty, mVWidth16)); }
+ Value* Builder::VUNDEF_I_16() { return UndefValue::get(getVectorType(mInt32Ty, mVWidth16)); }
- Value* Builder::VUNDEF_F() { return UndefValue::get(VectorType::get(mFP32Ty, mVWidth)); }
+ Value* Builder::VUNDEF_F() { return UndefValue::get(getVectorType(mFP32Ty, mVWidth)); }
- Value* Builder::VUNDEF_F_16() { return UndefValue::get(VectorType::get(mFP32Ty, mVWidth16)); }
+ Value* Builder::VUNDEF_F_16() { return UndefValue::get(getVectorType(mFP32Ty, mVWidth16)); }
Value* Builder::VUNDEF(Type* ty, uint32_t size)
{
- return UndefValue::get(VectorType::get(ty, size));
+ return UndefValue::get(getVectorType(ty, size));
}
Value* Builder::VBROADCAST(Value* src, const llvm::Twine& name)
// number of 8 bit elements in b
uint32_t numElms = cast<VectorType>(cB->getType())->getNumElements();
// output vector
- Value* vShuf = UndefValue::get(VectorType::get(mInt8Ty, numElms));
+ Value* vShuf = UndefValue::get(getVectorType(mInt8Ty, numElms));
// insert an 8 bit value from the high and low lanes of a per loop iteration
numElms /= 2;
Value* Builder::PMOVSXBD(Value* a)
{
// VPMOVSXBD output type
- Type* v8x32Ty = VectorType::get(mInt32Ty, 8);
+ Type* v8x32Ty = getVectorType(mInt32Ty, 8);
// Extract 8 values from 128bit lane and sign extend
return S_EXT(VSHUFFLE(a, a, C<int>({0, 1, 2, 3, 4, 5, 6, 7})), v8x32Ty);
}
Value* Builder::PMOVSXWD(Value* a)
{
// VPMOVSXWD output type
- Type* v8x32Ty = VectorType::get(mInt32Ty, 8);
+ Type* v8x32Ty = getVectorType(mInt32Ty, 8);
// Extract 8 values from 128bit lane and sign extend
return S_EXT(VSHUFFLE(a, a, C<int>({0, 1, 2, 3, 4, 5, 6, 7})), v8x32Ty);
}
#else
uint32_t numElems = a->getType()->getVectorNumElements();
#endif
- Value* input = BITCAST(a, VectorType::get(mFP16Ty, numElems));
+ Value* input = BITCAST(a, getVectorType(mFP16Ty, numElems));
- return FP_EXT(input, VectorType::get(mFP32Ty, numElems), name);
+ return FP_EXT(input, getVectorType(mFP32Ty, numElems), name);
}
//////////////////////////////////////////////////////////////////////////
if (fetchState.bDisableIndexOOBCheck)
{
vIndices = LOAD(
- BITCAST(indices, PointerType::get(VectorType::get(mInt8Ty, mpJitMgr->mVWidth), 0)),
+ BITCAST(indices, PointerType::get(getVectorType(mInt8Ty, mpJitMgr->mVWidth), 0)),
{(uint32_t)0});
vIndices = Z_EXT(vIndices, mSimdInt32Ty);
}
if (fetchState.bDisableIndexOOBCheck)
{
vIndices = LOAD(
- BITCAST(indices, PointerType::get(VectorType::get(mInt16Ty, mpJitMgr->mVWidth), 0)),
+ BITCAST(indices, PointerType::get(getVectorType(mInt16Ty, mpJitMgr->mVWidth), 0)),
{(uint32_t)0});
vIndices = Z_EXT(vIndices, mSimdInt32Ty);
}
optPasses.add(createCFGSimplificationPass());
optPasses.add(createEarlyCSEPass());
optPasses.add(createInstructionCombiningPass());
+#if LLVM_VERSION_MAJOR <= 11
optPasses.add(createConstantPropagationPass());
+#endif
optPasses.add(createSCCPPass());
optPasses.add(createAggressiveDCEPass());
const uint32_t(&swizzle)[4] = std::get<9>(args);
// cast types
- Type* vGatherTy = VectorType::get(mInt32Ty, 8);
- Type* v32x8Ty = VectorType::get(mInt8Ty, 32);
+ Type* vGatherTy = getVectorType(mInt32Ty, 8);
+ Type* v32x8Ty = getVectorType(mInt8Ty, 32);
// have to do extra work for sign extending
if ((extendType == Instruction::CastOps::SExt) || (extendType == Instruction::CastOps::SIToFP))
{
- Type* v16x8Ty = VectorType::get(mInt8Ty, 16); // 8x16bit ints in a 128bit lane
- Type* v128Ty = VectorType::get(IntegerType::getIntNTy(JM()->mContext, 128), 2);
+ Type* v16x8Ty = getVectorType(mInt8Ty, 16); // 8x16bit ints in a 128bit lane
+ Type* v128Ty = getVectorType(IntegerType::getIntNTy(JM()->mContext, 128), 2);
// shuffle mask, including any swizzling
const char x = (char)swizzle[0];
const uint32_t(&swizzle)[4] = std::get<9>(args);
// cast types
- Type* v32x8Ty = VectorType::get(mInt8Ty, mVWidth * 4); // vwidth is units of 32 bits
+ Type* v32x8Ty = getVectorType(mInt8Ty, mVWidth * 4); // vwidth is units of 32 bits
for (uint32_t i = 0; i < 4; i++)
{
Value*(&vVertexElements)[4] = std::get<8>(args);
// cast types
- Type* vGatherTy = VectorType::get(mInt32Ty, 8);
- Type* v32x8Ty = VectorType::get(mInt8Ty, 32);
+ Type* vGatherTy = getVectorType(mInt32Ty, 8);
+ Type* v32x8Ty = getVectorType(mInt8Ty, 32);
// have to do extra work for sign extending
if ((extendType == Instruction::CastOps::SExt) ||
// is this PP float?
bool bFP = (extendType == Instruction::CastOps::FPExt) ? true : false;
- Type* v8x16Ty = VectorType::get(mInt16Ty, 8); // 8x16bit in a 128bit lane
- Type* v128bitTy = VectorType::get(IntegerType::getIntNTy(JM()->mContext, 128), 2);
+ Type* v8x16Ty = getVectorType(mInt16Ty, 8); // 8x16bit in a 128bit lane
+ Type* v128bitTy = getVectorType(IntegerType::getIntNTy(JM()->mContext, 128), 2);
// shuffle mask
Value* vConstMask = C<uint8_t>({0, 1, 4, 5, 8, 9, 12, 13, 2, 3, 6, 7, 10, 11, 14, 15,
Value*(&vVertexElements)[4] = std::get<8>(args);
// cast types
- Type* vGatherTy = VectorType::get(IntegerType::getInt32Ty(JM()->mContext), mVWidth);
- Type* v32x8Ty = VectorType::get(mInt8Ty, mVWidth * 4); // vwidth is units of 32 bits
+ Type* vGatherTy = getVectorType(IntegerType::getInt32Ty(JM()->mContext), mVWidth);
+ Type* v32x8Ty = getVectorType(mInt8Ty, mVWidth * 4); // vwidth is units of 32 bits
// have to do extra work for sign extending
if ((extendType == Instruction::CastOps::SExt) ||
// is this PP float?
bool bFP = (extendType == Instruction::CastOps::FPExt) ? true : false;
- Type* v8x16Ty = VectorType::get(mInt16Ty, 8); // 8x16bit in a 128bit lane
- Type* v128bitTy = VectorType::get(IntegerType::getIntNTy(JM()->mContext, 128),
+ Type* v8x16Ty = getVectorType(mInt16Ty, 8); // 8x16bit in a 128bit lane
+ Type* v128bitTy = getVectorType(IntegerType::getIntNTy(JM()->mContext, 128),
mVWidth / 4); // vwidth is units of 32 bits
// shuffle mask
{
if (mVWidth == 16)
{
- Type* pSimd8FPTy = VectorType::get(mFP32Ty, 8);
+ Type* pSimd8FPTy = getVectorType(mFP32Ty, 8);
Value* pIdLo =
BITCAST(LOAD(GEP(mpFetchInfo, {0, SWR_FETCH_CONTEXT_VertexID})), pSimd8FPTy);
Value* pIdHi =
SWR_ASSERT(false, "Unhandled vector width type %d\n", width);
}
- return ConstantVector::getNullValue(VectorType::get(pTy, numElem));
+ return ConstantVector::getNullValue(getVectorType(pTy, numElem));
}
Value* GetMask(TargetWidth width)
#else
uint32_t numElem = vi1Mask->getType()->getVectorNumElements();
#endif
- return B->S_EXT(vi1Mask, VectorType::get(B->mInt32Ty, numElem));
+ return B->S_EXT(vi1Mask, getVectorType(B->mInt32Ty, numElem));
}
Instruction* ProcessIntrinsicAdvanced(CallInst* pCallInst)
v32Gather = UndefValue::get(vSrc->getType());
#if LLVM_VERSION_MAJOR > 10
- auto vi32Scale = ConstantVector::getSplat(ElementCount(numElem, false), cast<ConstantInt>(i32Scale));
+ auto vi32Scale = ConstantVector::getSplat(ElementCount::get(numElem, false), cast<ConstantInt>(i32Scale));
#else
auto vi32Scale = ConstantVector::getSplat(numElem, cast<ConstantInt>(i32Scale));
#endif
#else
uint32_t numElem = v64Mask->getType()->getVectorNumElements();
#endif
- v64Mask = B->S_EXT(v64Mask, VectorType::get(B->mInt64Ty, numElem));
+ v64Mask = B->S_EXT(v64Mask, getVectorType(B->mInt64Ty, numElem));
v64Mask = B->BITCAST(v64Mask, vSrc->getType());
Value* src0 = B->VSHUFFLE(vSrc, vSrc, B->C({0, 1, 2, 3}));
uint32_t numElemSrc1 = src1->getType()->getVectorNumElements();
uint32_t numElemMask1 = mask1->getType()->getVectorNumElements();
#endif
- src0 = B->BITCAST(src0, VectorType::get(B->mInt64Ty, numElemSrc0));
- mask0 = B->BITCAST(mask0, VectorType::get(B->mInt64Ty, numElemMask0));
+ src0 = B->BITCAST(src0, getVectorType(B->mInt64Ty, numElemSrc0));
+ mask0 = B->BITCAST(mask0, getVectorType(B->mInt64Ty, numElemMask0));
Value* gather0 =
B->CALL(pX86IntrinFunc, {src0, pBase, indices0, mask0, i8Scale});
- src1 = B->BITCAST(src1, VectorType::get(B->mInt64Ty, numElemSrc1));
- mask1 = B->BITCAST(mask1, VectorType::get(B->mInt64Ty, numElemMask1));
+ src1 = B->BITCAST(src1, getVectorType(B->mInt64Ty, numElemSrc1));
+ mask1 = B->BITCAST(mask1, getVectorType(B->mInt64Ty, numElemMask1));
Value* gather1 =
B->CALL(pX86IntrinFunc, {src1, pBase, indices1, mask1, i8Scale});
v32Gather = B->VSHUFFLE(gather0, gather1, B->C({0, 1, 2, 3, 4, 5, 6, 7}));
}
#endif
+#if LLVM_VERSION_MAJOR >= 12
+static inline llvm::VectorType* getVectorType(llvm::Type *ElementType, unsigned NumElements)
+{
+ return llvm::VectorType::get(ElementType, NumElements, false);
+}
+#else
+static inline llvm::VectorType* getVectorType(llvm::Type *ElementType, unsigned NumElements)
+{
+ return llvm::VectorType::get(ElementType, NumElements);
+}
+#endif
+
#if LLVM_VERSION_MAJOR < 7
#pragma pop_macro("DEBUG")
#endif
Value* pAttrib = GEP(pStream, C(4 * decl.attribSlot));
// load 4 components from stream
- Type* simd4Ty = VectorType::get(IRB()->getFloatTy(), 4);
+ Type* simd4Ty = getVectorType(IRB()->getFloatTy(), 4);
Type* simd4PtrTy = PointerType::get(simd4Ty, 0);
pAttrib = BITCAST(pAttrib, simd4PtrTy);
Value* vattrib = LOAD(pAttrib);
passes.add(createCFGSimplificationPass());
passes.add(createEarlyCSEPass());
passes.add(createInstructionCombiningPass());
+#if LLVM_VERSION_MAJOR <= 11
passes.add(createConstantPropagationPass());
+#endif
passes.add(createSCCPPass());
passes.add(createAggressiveDCEPass());
LLVMValueRef vertex_index,
boolean is_aindex_indirect,
LLVMValueRef attrib_index,
- boolean is_sindex_indirect,
LLVMValueRef swizzle_index)
{
swr_tcs_llvm_iface *iface = (swr_tcs_llvm_iface*)tcs_iface;
LLVMValueRef vertex_index,
boolean is_aindex_indirect,
LLVMValueRef attrib_index,
- boolean is_sindex_indirect,
LLVMValueRef swizzle_index,
uint32_t name)
{
LLVMValueRef vertex_index,
boolean is_aindex_indirect,
LLVMValueRef attrib_index,
- boolean is_sindex_indirect,
LLVMValueRef swizzle_index)
{
swr_tes_llvm_iface *iface = (swr_tes_llvm_iface*)tes_iface;
Value *pVertexOffset = MUL(unwrap(emitted_vertices_vec), VIMMED1(vertSize));
Value *vMask = LOAD(iface->pGsCtx, {0, SWR_GS_CONTEXT_mask});
- Value *vMask1 = TRUNC(vMask, VectorType::get(mInt1Ty, mVWidth));
+ Value *vMask1 = TRUNC(vMask, getVectorType(mInt1Ty, mVWidth));
Value *pStack = STACKSAVE();
Value *pTmpPtr = ALLOCA(mFP32Ty, C(4)); // used for dummy write for lane masking
IRB()->SetInsertPoint(unwrap(LLVMGetInsertBlock(gallivm->builder)));
Value *vMask = LOAD(iface->pGsCtx, { 0, SWR_GS_CONTEXT_mask });
- Value *vMask1 = TRUNC(vMask, VectorType::get(mInt1Ty, 8));
+ Value *vMask1 = TRUNC(vMask, getVectorType(mInt1Ty, 8));
uint32_t vertsPerPrim = iface->num_verts_per_prim;
Value *mask = unwrap(mask_vec);
Value *cmpMask = VMASK(ICMP_NE(unwrap(verts_per_prim_vec), VIMMED1(0)));
mask = AND(mask, cmpMask);
- vMask1 = TRUNC(mask, VectorType::get(mInt1Ty, 8));
+ vMask1 = TRUNC(mask, getVectorType(mInt1Ty, 8));
vCount = SUB(vCount, VIMMED1(1));
Value *vOffset = ADD(UDIV(vCount, VIMMED1(8)), VIMMED1(VERTEX_COUNT_SIZE));
Value *vValue = SHL(VIMMED1(1), UREM(vCount, VIMMED1(8)));
- vValue = TRUNC(vValue, VectorType::get(mInt8Ty, 8));
+ vValue = TRUNC(vValue, getVectorType(mInt8Ty, 8));
Value *pStack = STACKSAVE();
Value *pTmpPtr = ALLOCA(mInt8Ty, C(4)); // used for dummy read/write for lane masking
Value* pTessFactors = GEP(pPatch, {C(0), C(ScalarPatch_tessFactors)});
assert(SWR_NUM_OUTER_TESS_FACTORS == 4);
- Value* sys_value_outer_factors = UndefValue::get(VectorType::get(mFP32Ty, 4));
+ Value* sys_value_outer_factors = UndefValue::get(getVectorType(mFP32Ty, 4));
for (unsigned i = 0; i < SWR_NUM_OUTER_TESS_FACTORS; i++) {
Value* v = LOAD(pTessFactors, {0, SWR_TESSELLATION_FACTORS_OuterTessFactors, i});
sys_value_outer_factors = VINSERT(sys_value_outer_factors, v, i, "gl_TessLevelOuter");
system_values.tess_outer = wrap(sys_value_outer_factors);
assert(SWR_NUM_INNER_TESS_FACTORS == 2);
- Value* sys_value_inner_factors = UndefValue::get(VectorType::get(mFP32Ty, 4));
+ Value* sys_value_inner_factors = UndefValue::get(getVectorType(mFP32Ty, 4));
for (unsigned i = 0; i < SWR_NUM_INNER_TESS_FACTORS; i++) {
Value* v = LOAD(pTessFactors, {0, SWR_TESSELLATION_FACTORS_InnerTessFactors, i});
sys_value_inner_factors = VINSERT(sys_value_inner_factors, v, i, "gl_TessLevelInner");