1 /****************************************************************************
2 * Copyright (C) 2014-2018 Intel Corporation. All Rights Reserved.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 * @brief llvm pass to lower meta code to x86
29 ******************************************************************************/
31 #include "jit_pch.hpp"
33 #include "JitManager.h"
35 #include <unordered_map>
40 // foward declare the initializer
41 void initializeLowerX86Pass(PassRegistry
&);
64 typedef std::function
<Instruction
*(LowerX86
*, TargetArch
, TargetWidth
, CallInst
*)> EmuFunc
;
68 Intrinsic::ID intrin
[NUM_WIDTHS
];
72 // Map of intrinsics that haven't been moved to the new mechanism yet. If used, these get the previous behavior of
73 // mapping directly to avx/avx2 intrinsics.
74 static std::map
<std::string
, Intrinsic::ID
> intrinsicMap
= {
75 {"meta.intrinsic.BEXTR_32", Intrinsic::x86_bmi_bextr_32
},
76 {"meta.intrinsic.VPSHUFB", Intrinsic::x86_avx2_pshuf_b
},
77 {"meta.intrinsic.VCVTPS2PH", Intrinsic::x86_vcvtps2ph_256
},
78 {"meta.intrinsic.VPTESTC", Intrinsic::x86_avx_ptestc_256
},
79 {"meta.intrinsic.VPTESTZ", Intrinsic::x86_avx_ptestz_256
},
80 {"meta.intrinsic.VFMADDPS", Intrinsic::x86_fma_vfmadd_ps_256
},
81 {"meta.intrinsic.VPHADDD", Intrinsic::x86_avx2_phadd_d
},
82 {"meta.intrinsic.PDEP32", Intrinsic::x86_bmi_pdep_32
},
83 {"meta.intrinsic.RDTSC", Intrinsic::x86_rdtsc
},
87 Instruction
* NO_EMU(LowerX86
* pThis
, TargetArch arch
, TargetWidth width
, CallInst
* pCallInst
);
88 Instruction
* VPERM_EMU(LowerX86
* pThis
, TargetArch arch
, TargetWidth width
, CallInst
* pCallInst
);
89 Instruction
* VGATHER_EMU(LowerX86
* pThis
, TargetArch arch
, TargetWidth width
, CallInst
* pCallInst
);
90 Instruction
* VROUND_EMU(LowerX86
* pThis
, TargetArch arch
, TargetWidth width
, CallInst
* pCallInst
);
91 Instruction
* VHSUB_EMU(LowerX86
* pThis
, TargetArch arch
, TargetWidth width
, CallInst
* pCallInst
);
93 Instruction
* DOUBLE_EMU(LowerX86
* pThis
, TargetArch arch
, TargetWidth width
, CallInst
* pCallInst
, Intrinsic::ID intrin
);
95 static Intrinsic::ID DOUBLE
= (Intrinsic::ID
)-1;
97 static std::map
<std::string
, X86Intrinsic
> intrinsicMap2
[] = {
100 {"meta.intrinsic.VRCPPS", {{Intrinsic::x86_avx_rcp_ps_256
, DOUBLE
}, NO_EMU
}},
101 {"meta.intrinsic.VPERMPS", {{Intrinsic::not_intrinsic
, Intrinsic::not_intrinsic
}, VPERM_EMU
}},
102 {"meta.intrinsic.VPERMD", {{Intrinsic::not_intrinsic
, Intrinsic::not_intrinsic
}, VPERM_EMU
}},
103 {"meta.intrinsic.VGATHERPD", {{Intrinsic::not_intrinsic
, Intrinsic::not_intrinsic
}, VGATHER_EMU
}},
104 {"meta.intrinsic.VGATHERPS", {{Intrinsic::not_intrinsic
, Intrinsic::not_intrinsic
}, VGATHER_EMU
}},
105 {"meta.intrinsic.VGATHERDD", {{Intrinsic::not_intrinsic
, Intrinsic::not_intrinsic
}, VGATHER_EMU
}},
106 {"meta.intrinsic.VCVTPD2PS", {{Intrinsic::x86_avx_cvt_pd2_ps_256
, Intrinsic::not_intrinsic
}, NO_EMU
}},
107 {"meta.intrinsic.VCVTPH2PS", {{Intrinsic::x86_vcvtph2ps_256
, Intrinsic::not_intrinsic
}, NO_EMU
}},
108 {"meta.intrinsic.VROUND", {{Intrinsic::x86_avx_round_ps_256
, DOUBLE
}, NO_EMU
}},
109 {"meta.intrinsic.VHSUBPS", {{Intrinsic::x86_avx_hsub_ps_256
, DOUBLE
}, NO_EMU
}},
112 {"meta.intrinsic.VRCPPS", {{Intrinsic::x86_avx_rcp_ps_256
, DOUBLE
}, NO_EMU
}},
113 {"meta.intrinsic.VPERMPS", {{Intrinsic::x86_avx2_permps
, Intrinsic::not_intrinsic
}, VPERM_EMU
}},
114 {"meta.intrinsic.VPERMD", {{Intrinsic::x86_avx2_permd
, Intrinsic::not_intrinsic
}, VPERM_EMU
}},
115 {"meta.intrinsic.VGATHERPD", {{Intrinsic::not_intrinsic
, Intrinsic::not_intrinsic
}, VGATHER_EMU
}},
116 {"meta.intrinsic.VGATHERPS", {{Intrinsic::not_intrinsic
, Intrinsic::not_intrinsic
}, VGATHER_EMU
}},
117 {"meta.intrinsic.VGATHERDD", {{Intrinsic::not_intrinsic
, Intrinsic::not_intrinsic
}, VGATHER_EMU
}},
118 {"meta.intrinsic.VCVTPD2PS", {{Intrinsic::x86_avx_cvt_pd2_ps_256
, DOUBLE
}, NO_EMU
}},
119 {"meta.intrinsic.VCVTPH2PS", {{Intrinsic::x86_vcvtph2ps_256
, Intrinsic::not_intrinsic
}, NO_EMU
}},
120 {"meta.intrinsic.VROUND", {{Intrinsic::x86_avx_round_ps_256
, DOUBLE
}, NO_EMU
}},
121 {"meta.intrinsic.VHSUBPS", {{Intrinsic::x86_avx_hsub_ps_256
, DOUBLE
}, NO_EMU
}},
124 {"meta.intrinsic.VRCPPS", {{Intrinsic::x86_avx512_rcp14_ps_256
, Intrinsic::x86_avx512_rcp14_ps_512
}, NO_EMU
}},
125 {"meta.intrinsic.VPERMPS", {{Intrinsic::x86_avx512_mask_permvar_sf_256
, Intrinsic::x86_avx512_mask_permvar_sf_512
}, NO_EMU
}},
126 {"meta.intrinsic.VPERMD", {{Intrinsic::x86_avx512_mask_permvar_si_256
, Intrinsic::x86_avx512_mask_permvar_si_512
}, NO_EMU
}},
127 {"meta.intrinsic.VGATHERPD", {{Intrinsic::not_intrinsic
, Intrinsic::not_intrinsic
}, VGATHER_EMU
}},
128 {"meta.intrinsic.VGATHERPS", {{Intrinsic::not_intrinsic
, Intrinsic::not_intrinsic
}, VGATHER_EMU
}},
129 {"meta.intrinsic.VGATHERDD", {{Intrinsic::not_intrinsic
, Intrinsic::not_intrinsic
}, VGATHER_EMU
}},
130 {"meta.intrinsic.VCVTPD2PS", {{Intrinsic::x86_avx512_mask_cvtpd2ps_256
, Intrinsic::x86_avx512_mask_cvtpd2ps_512
}, NO_EMU
}},
131 {"meta.intrinsic.VCVTPH2PS", {{Intrinsic::x86_avx512_mask_vcvtph2ps_256
, Intrinsic::x86_avx512_mask_vcvtph2ps_512
}, NO_EMU
}},
132 {"meta.intrinsic.VROUND", {{Intrinsic::not_intrinsic
, Intrinsic::not_intrinsic
}, VROUND_EMU
}},
133 {"meta.intrinsic.VHSUBPS", {{Intrinsic::not_intrinsic
, Intrinsic::not_intrinsic
}, VHSUB_EMU
}},
137 struct LowerX86
: public FunctionPass
139 LowerX86(Builder
* b
= nullptr)
140 : FunctionPass(ID
), B(b
)
142 initializeLowerX86Pass(*PassRegistry::getPassRegistry());
144 // Determine target arch
145 if (JM()->mArch
.AVX512F())
149 else if (JM()->mArch
.AVX2())
153 else if (JM()->mArch
.AVX())
160 SWR_ASSERT(false, "Unsupported AVX architecture.");
165 // Try to decipher the vector type of the instruction. This does not work properly
166 // across all intrinsics, and will have to be rethought. Probably need something
167 // similar to llvm's getDeclaration() utility to map a set of inputs to a specific typed
169 void GetRequestedWidthAndType(CallInst
* pCallInst
, const StringRef intrinName
, TargetWidth
* pWidth
, Type
** pTy
)
171 Type
* pVecTy
= pCallInst
->getType();
173 // Check for intrinsic specific types
174 // VCVTPD2PS type comes from src, not dst
175 if (intrinName
.equals("meta.intrinsic.VCVTPD2PS"))
177 pVecTy
= pCallInst
->getOperand(0)->getType();
180 if (!pVecTy
->isVectorTy())
182 for (auto& op
: pCallInst
->arg_operands())
184 if (op
.get()->getType()->isVectorTy())
186 pVecTy
= op
.get()->getType();
191 SWR_ASSERT(pVecTy
->isVectorTy(), "Couldn't determine vector size");
193 uint32_t width
= cast
<VectorType
>(pVecTy
)->getBitWidth();
196 case 256: *pWidth
= W256
; break;
197 case 512: *pWidth
= W512
; break;
198 default: SWR_ASSERT(false, "Unhandled vector width %d", width
);
202 *pTy
= pVecTy
->getScalarType();
205 Value
* GetZeroVec(TargetWidth width
, Type
* pTy
)
207 uint32_t numElem
= 0;
210 case W256
: numElem
= 8; break;
211 case W512
: numElem
= 16; break;
212 default: SWR_ASSERT(false, "Unhandled vector width type %d\n", width
);
215 return ConstantVector::getNullValue(VectorType::get(pTy
, numElem
));
218 Value
* GetMask(TargetWidth width
)
223 case W256
: mask
= B
->C((uint8_t)-1); break;
224 case W512
: mask
= B
->C((uint16_t)-1); break;
225 default: SWR_ASSERT(false, "Unhandled vector width type %d\n", width
);
230 // Convert <N x i1> mask to <N x i32> x86 mask
231 Value
* VectorMask(Value
* vi1Mask
)
233 uint32_t numElem
= vi1Mask
->getType()->getVectorNumElements();
234 return B
->S_EXT(vi1Mask
, VectorType::get(B
->mInt32Ty
, numElem
));
237 Instruction
* ProcessIntrinsicAdvanced(CallInst
* pCallInst
)
239 Function
* pFunc
= pCallInst
->getCalledFunction();
240 auto& intrinsic
= intrinsicMap2
[mTarget
][pFunc
->getName()];
241 TargetWidth vecWidth
;
243 GetRequestedWidthAndType(pCallInst
, pFunc
->getName(), &vecWidth
, &pElemTy
);
245 // Check if there is a native intrinsic for this instruction
246 Intrinsic::ID id
= intrinsic
.intrin
[vecWidth
];
249 // Double pump the next smaller SIMD intrinsic
250 SWR_ASSERT(vecWidth
!= 0, "Cannot double pump smallest SIMD width.");
251 Intrinsic::ID id2
= intrinsic
.intrin
[vecWidth
- 1];
252 SWR_ASSERT(id2
!= Intrinsic::not_intrinsic
, "Cannot find intrinsic to double pump.");
253 return DOUBLE_EMU(this, mTarget
, vecWidth
, pCallInst
, id2
);
255 else if (id
!= Intrinsic::not_intrinsic
)
257 Function
* pIntrin
= Intrinsic::getDeclaration(B
->JM()->mpCurrentModule
, id
);
258 SmallVector
<Value
*, 8> args
;
259 for (auto& arg
: pCallInst
->arg_operands())
261 args
.push_back(arg
.get());
264 // If AVX512, all instructions add a src operand and mask. We'll pass in 0 src and full mask for now
265 // Assuming the intrinsics are consistent and place the src operand and mask last in the argument list.
266 if (mTarget
== AVX512
)
268 if (pFunc
->getName().equals("meta.intrinsic.VCVTPD2PS")) {
269 args
.push_back(GetZeroVec(W256
, pCallInst
->getType()->getScalarType()));
270 args
.push_back(GetMask(W256
));
271 // for AVX512 VCVTPD2PS, we also have to add rounding mode
272 args
.push_back(B
->C(_MM_FROUND_TO_NEAREST_INT
|
275 args
.push_back(GetZeroVec(vecWidth
, pElemTy
));
276 args
.push_back(GetMask(vecWidth
));
280 return B
->CALLA(pIntrin
, args
);
284 // No native intrinsic, call emulation function
285 return intrinsic
.emuFunc(this, mTarget
, vecWidth
, pCallInst
);
292 Instruction
* ProcessIntrinsic(CallInst
* pCallInst
)
294 Function
* pFunc
= pCallInst
->getCalledFunction();
296 // Forward to the advanced support if found
297 if (intrinsicMap2
[mTarget
].find(pFunc
->getName()) != intrinsicMap2
[mTarget
].end())
299 return ProcessIntrinsicAdvanced(pCallInst
);
302 SWR_ASSERT(intrinsicMap
.find(pFunc
->getName()) != intrinsicMap
.end(), "Unimplemented intrinsic %s.", pFunc
->getName());
304 Intrinsic::ID x86Intrinsic
= intrinsicMap
[pFunc
->getName()];
305 Function
* pX86IntrinFunc
= Intrinsic::getDeclaration(B
->JM()->mpCurrentModule
, x86Intrinsic
);
307 SmallVector
<Value
*, 8> args
;
308 for (auto& arg
: pCallInst
->arg_operands())
310 args
.push_back(arg
.get());
312 return B
->CALLA(pX86IntrinFunc
, args
);
315 //////////////////////////////////////////////////////////////////////////
316 /// @brief LLVM funtion pass run method.
317 /// @param f- The function we're working on with this pass.
318 virtual bool runOnFunction(Function
& F
)
320 std::vector
<Instruction
*> toRemove
;
322 for (auto& BB
: F
.getBasicBlockList())
324 for (auto& I
: BB
.getInstList())
326 if (CallInst
* pCallInst
= dyn_cast
<CallInst
>(&I
))
328 Function
* pFunc
= pCallInst
->getCalledFunction();
331 if (pFunc
->getName().startswith("meta.intrinsic"))
333 B
->IRB()->SetInsertPoint(&I
);
334 Instruction
* pReplace
= ProcessIntrinsic(pCallInst
);
335 SWR_ASSERT(pReplace
);
336 toRemove
.push_back(pCallInst
);
337 pCallInst
->replaceAllUsesWith(pReplace
);
345 for (auto* pInst
: toRemove
)
347 pInst
->eraseFromParent();
350 JitManager::DumpToFile(&F
, "lowerx86");
355 virtual void getAnalysisUsage(AnalysisUsage
& AU
) const
359 JitManager
* JM() { return B
->JM(); }
365 static char ID
; ///< Needed by LLVM to generate ID for FunctionPass.
368 char LowerX86::ID
= 0; // LLVM uses address of ID as the actual ID.
370 FunctionPass
* createLowerX86Pass(Builder
* b
)
372 return new LowerX86(b
);
375 Instruction
* NO_EMU(LowerX86
* pThis
, TargetArch arch
, TargetWidth width
, CallInst
* pCallInst
)
377 SWR_ASSERT(false, "Unimplemented intrinsic emulation.");
381 Instruction
* VPERM_EMU(LowerX86
* pThis
, TargetArch arch
, TargetWidth width
, CallInst
* pCallInst
)
383 // Only need vperm emulation for AVX
384 SWR_ASSERT(arch
== AVX
);
386 Builder
* B
= pThis
->B
;
387 auto v32A
= pCallInst
->getArgOperand(0);
388 auto vi32Index
= pCallInst
->getArgOperand(1);
391 if (isa
<Constant
>(vi32Index
))
393 // Can use llvm shuffle vector directly with constant shuffle indices
394 v32Result
= B
->VSHUFFLE(v32A
, v32A
, vi32Index
);
398 v32Result
= UndefValue::get(v32A
->getType());
399 for (uint32_t l
= 0; l
< v32A
->getType()->getVectorNumElements(); ++l
)
401 auto i32Index
= B
->VEXTRACT(vi32Index
, B
->C(l
));
402 auto val
= B
->VEXTRACT(v32A
, i32Index
);
403 v32Result
= B
->VINSERT(v32Result
, val
, B
->C(l
));
406 return cast
<Instruction
>(v32Result
);
409 Instruction
* VGATHER_EMU(LowerX86
* pThis
, TargetArch arch
, TargetWidth width
, CallInst
* pCallInst
)
411 Builder
* B
= pThis
->B
;
412 auto vSrc
= pCallInst
->getArgOperand(0);
413 auto pBase
= pCallInst
->getArgOperand(1);
414 auto vi32Indices
= pCallInst
->getArgOperand(2);
415 auto vi1Mask
= pCallInst
->getArgOperand(3);
416 auto i8Scale
= pCallInst
->getArgOperand(4);
418 pBase
= B
->POINTER_CAST(pBase
, PointerType::get(B
->mInt8Ty
, 0));
419 uint32_t numElem
= vSrc
->getType()->getVectorNumElements();
420 auto i32Scale
= B
->Z_EXT(i8Scale
, B
->mInt32Ty
);
421 auto srcTy
= vSrc
->getType()->getVectorElementType();
425 // Full emulation for AVX
426 // Store source on stack to provide a valid address to load from inactive lanes
427 auto pStack
= B
->STACKSAVE();
428 auto pTmp
= B
->ALLOCA(vSrc
->getType());
429 B
->STORE(vSrc
, pTmp
);
431 v32Gather
= UndefValue::get(vSrc
->getType());
432 auto vi32Scale
= ConstantVector::getSplat(numElem
, cast
<ConstantInt
>(i32Scale
));
433 auto vi32Offsets
= B
->MUL(vi32Indices
, vi32Scale
);
435 for (uint32_t i
= 0; i
< numElem
; ++i
)
437 auto i32Offset
= B
->VEXTRACT(vi32Offsets
, B
->C(i
));
438 auto pLoadAddress
= B
->GEP(pBase
, i32Offset
);
439 pLoadAddress
= B
->BITCAST(pLoadAddress
, PointerType::get(srcTy
, 0));
440 auto pMaskedLoadAddress
= B
->GEP(pTmp
, { 0, i
});
441 auto i1Mask
= B
->VEXTRACT(vi1Mask
, B
->C(i
));
442 auto pValidAddress
= B
->SELECT(i1Mask
, pLoadAddress
, pMaskedLoadAddress
);
443 auto val
= B
->LOAD(pValidAddress
);
444 v32Gather
= B
->VINSERT(v32Gather
, val
, B
->C(i
));
447 B
->STACKRESTORE(pStack
);
449 else if (arch
== AVX2
|| (arch
== AVX512
&& width
== W256
))
451 Function
* pX86IntrinFunc
;
452 if (srcTy
== B
->mFP32Ty
)
454 pX86IntrinFunc
= Intrinsic::getDeclaration(B
->JM()->mpCurrentModule
, Intrinsic::x86_avx2_gather_d_ps_256
);
456 else if (srcTy
== B
->mInt32Ty
)
458 pX86IntrinFunc
= Intrinsic::getDeclaration(B
->JM()->mpCurrentModule
, Intrinsic::x86_avx2_gather_d_d_256
);
460 else if (srcTy
== B
->mDoubleTy
)
462 pX86IntrinFunc
= Intrinsic::getDeclaration(B
->JM()->mpCurrentModule
, Intrinsic::x86_avx2_gather_d_q_256
);
466 SWR_ASSERT(false, "Unsupported vector element type for gather.");
471 auto v32Mask
= B
->BITCAST(pThis
->VectorMask(vi1Mask
), vSrc
->getType());
472 v32Gather
= B
->CALL(pX86IntrinFunc
, { vSrc
, pBase
, vi32Indices
, v32Mask
, i8Scale
});
474 else if (width
== W512
)
476 // Double pump 4-wide for 64bit elements
477 if (vSrc
->getType()->getVectorElementType() == B
->mDoubleTy
)
479 auto v64Mask
= pThis
->VectorMask(vi1Mask
);
480 v64Mask
= B
->S_EXT(v64Mask
,
481 VectorType::get(B
->mInt64Ty
, v64Mask
->getType()->getVectorNumElements()));
482 v64Mask
= B
->BITCAST(v64Mask
, vSrc
->getType());
484 Value
* src0
= B
->VSHUFFLE(vSrc
, vSrc
, B
->C({ 0, 1, 2, 3 }));
485 Value
* src1
= B
->VSHUFFLE(vSrc
, vSrc
, B
->C({ 4, 5, 6, 7 }));
487 Value
* indices0
= B
->VSHUFFLE(vi32Indices
, vi32Indices
, B
->C({ 0, 1, 2, 3 }));
488 Value
* indices1
= B
->VSHUFFLE(vi32Indices
, vi32Indices
, B
->C({ 4, 5, 6, 7 }));
490 Value
* mask0
= B
->VSHUFFLE(v64Mask
, v64Mask
, B
->C({ 0, 1, 2, 3 }));
491 Value
* mask1
= B
->VSHUFFLE(v64Mask
, v64Mask
, B
->C({ 4, 5, 6, 7 }));
493 src0
= B
->BITCAST(src0
, VectorType::get(B
->mInt64Ty
, src0
->getType()->getVectorNumElements()));
494 mask0
= B
->BITCAST(mask0
, VectorType::get(B
->mInt64Ty
, mask0
->getType()->getVectorNumElements()));
495 Value
* gather0
= B
->CALL(pX86IntrinFunc
, { src0
, pBase
, indices0
, mask0
, i8Scale
});
496 src1
= B
->BITCAST(src1
, VectorType::get(B
->mInt64Ty
, src1
->getType()->getVectorNumElements()));
497 mask1
= B
->BITCAST(mask1
, VectorType::get(B
->mInt64Ty
, mask1
->getType()->getVectorNumElements()));
498 Value
* gather1
= B
->CALL(pX86IntrinFunc
, { src1
, pBase
, indices1
, mask1
, i8Scale
});
500 v32Gather
= B
->VSHUFFLE(gather0
, gather1
, B
->C({ 0, 1, 2, 3, 4, 5, 6, 7 }));
501 v32Gather
= B
->BITCAST(v32Gather
, vSrc
->getType());
505 // Double pump 8-wide for 32bit elements
506 auto v32Mask
= pThis
->VectorMask(vi1Mask
);
507 v32Mask
= B
->BITCAST(v32Mask
, vSrc
->getType());
508 Value
* src0
= B
->EXTRACT_16(vSrc
, 0);
509 Value
* src1
= B
->EXTRACT_16(vSrc
, 1);
511 Value
* indices0
= B
->EXTRACT_16(vi32Indices
, 0);
512 Value
* indices1
= B
->EXTRACT_16(vi32Indices
, 1);
514 Value
* mask0
= B
->EXTRACT_16(v32Mask
, 0);
515 Value
* mask1
= B
->EXTRACT_16(v32Mask
, 1);
517 Value
* gather0
= B
->CALL(pX86IntrinFunc
, { src0
, pBase
, indices0
, mask0
, i8Scale
});
518 Value
* gather1
= B
->CALL(pX86IntrinFunc
, { src1
, pBase
, indices1
, mask1
, i8Scale
});
520 v32Gather
= B
->JOIN_16(gather0
, gather1
);
524 else if (arch
== AVX512
)
527 Function
* pX86IntrinFunc
;
528 if (srcTy
== B
->mFP32Ty
)
530 pX86IntrinFunc
= Intrinsic::getDeclaration(B
->JM()->mpCurrentModule
, Intrinsic::x86_avx512_gather_dps_512
);
531 iMask
= B
->BITCAST(vi1Mask
, B
->mInt16Ty
);
533 else if (srcTy
== B
->mInt32Ty
)
535 pX86IntrinFunc
= Intrinsic::getDeclaration(B
->JM()->mpCurrentModule
, Intrinsic::x86_avx512_gather_dpi_512
);
536 iMask
= B
->BITCAST(vi1Mask
, B
->mInt16Ty
);
538 else if (srcTy
== B
->mDoubleTy
)
540 pX86IntrinFunc
= Intrinsic::getDeclaration(B
->JM()->mpCurrentModule
, Intrinsic::x86_avx512_gather_dpd_512
);
541 iMask
= B
->BITCAST(vi1Mask
, B
->mInt8Ty
);
545 SWR_ASSERT(false, "Unsupported vector element type for gather.");
548 auto i32Scale
= B
->Z_EXT(i8Scale
, B
->mInt32Ty
);
549 v32Gather
= B
->CALL(pX86IntrinFunc
, { vSrc
, pBase
, vi32Indices
, iMask
, i32Scale
});
552 return cast
<Instruction
>(v32Gather
);
555 // No support for vroundps in avx512 (it is available in kncni), so emulate with avx instructions
556 Instruction
* VROUND_EMU(LowerX86
* pThis
, TargetArch arch
, TargetWidth width
, CallInst
* pCallInst
)
558 SWR_ASSERT(arch
== AVX512
);
561 auto vf32Src
= pCallInst
->getOperand(0);
562 auto i8Round
= pCallInst
->getOperand(1);
563 auto pfnFunc
= Intrinsic::getDeclaration(B
->JM()->mpCurrentModule
, Intrinsic::x86_avx_round_ps_256
);
567 return cast
<Instruction
>(B
->CALL2(pfnFunc
, vf32Src
, i8Round
));
569 else if (width
== W512
)
571 auto v8f32SrcLo
= B
->EXTRACT_16(vf32Src
, 0);
572 auto v8f32SrcHi
= B
->EXTRACT_16(vf32Src
, 1);
574 auto v8f32ResLo
= B
->CALL2(pfnFunc
, v8f32SrcLo
, i8Round
);
575 auto v8f32ResHi
= B
->CALL2(pfnFunc
, v8f32SrcHi
, i8Round
);
577 return cast
<Instruction
>(B
->JOIN_16(v8f32ResLo
, v8f32ResHi
));
581 SWR_ASSERT(false, "Unimplemented vector width.");
587 // No support for hsub in AVX512
588 Instruction
* VHSUB_EMU(LowerX86
* pThis
, TargetArch arch
, TargetWidth width
, CallInst
* pCallInst
)
590 SWR_ASSERT(arch
== AVX512
);
593 auto src0
= pCallInst
->getOperand(0);
594 auto src1
= pCallInst
->getOperand(1);
596 // 256b hsub can just use avx intrinsic
599 auto pX86IntrinFunc
= Intrinsic::getDeclaration(B
->JM()->mpCurrentModule
, Intrinsic::x86_avx_hsub_ps_256
);
600 return cast
<Instruction
>(B
->CALL2(pX86IntrinFunc
, src0
, src1
));
602 else if (width
== W512
)
604 // 512b hsub can be accomplished with shuf/sub combo
605 auto minuend
= B
->VSHUFFLE(src0
, src1
, B
->C({ 0, 2, 8, 10, 4, 6, 12, 14 }));
606 auto subtrahend
= B
->VSHUFFLE(src0
, src1
, B
->C({ 1, 3, 9, 11, 5, 7, 13, 15 }));
607 return cast
<Instruction
>(B
->SUB(minuend
, subtrahend
));
611 SWR_ASSERT(false, "Unimplemented vector width.");
616 // Double pump input using Intrin template arg. This blindly extracts lower and upper 256 from each vector argument and
617 // calls the 256 wide intrinsic, then merges the results to 512 wide
618 Instruction
* DOUBLE_EMU(LowerX86
* pThis
, TargetArch arch
, TargetWidth width
, CallInst
* pCallInst
, Intrinsic::ID intrin
)
621 SWR_ASSERT(width
== W512
);
623 Function
* pX86IntrinFunc
= Intrinsic::getDeclaration(B
->JM()->mpCurrentModule
, intrin
);
624 for (uint32_t i
= 0; i
< 2; ++i
)
626 SmallVector
<Value
*, 8> args
;
627 for (auto& arg
: pCallInst
->arg_operands())
629 auto argType
= arg
.get()->getType();
630 if (argType
->isVectorTy())
632 uint32_t vecWidth
= argType
->getVectorNumElements();
633 Value
*lanes
= B
->CInc
<int>(i
*vecWidth
/2, vecWidth
/2);
634 Value
*argToPush
= B
->VSHUFFLE(arg
.get(), B
->VUNDEF(argType
->getVectorElementType(), vecWidth
), lanes
);
635 args
.push_back(argToPush
);
639 args
.push_back(arg
.get());
642 result
[i
] = B
->CALLA(pX86IntrinFunc
, args
);
645 if (result
[0]->getType()->isVectorTy())
647 assert(result
[1]->getType()->isVectorTy());
648 vecWidth
= result
[0]->getType()->getVectorNumElements() +
649 result
[1]->getType()->getVectorNumElements();
655 Value
*lanes
= B
->CInc
<int>(0, vecWidth
);
656 return cast
<Instruction
>(B
->VSHUFFLE(result
[0], result
[1], lanes
));
661 using namespace SwrJit
;
663 INITIALIZE_PASS_BEGIN(LowerX86
, "LowerX86", "LowerX86", false, false)
664 INITIALIZE_PASS_END(LowerX86
, "LowerX86", "LowerX86", false, false)