swr: [rasterizer jitter] include cstdarg in builder_misc.cpp
[mesa.git] / src / gallium / drivers / swr / rasterizer / jitter / builder_misc.cpp
1 /****************************************************************************
2 * Copyright (C) 2014-2015 Intel Corporation. All Rights Reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * @file builder_misc.cpp
24 *
25 * @brief Implementation for miscellaneous builder functions
26 *
27 * Notes:
28 *
29 ******************************************************************************/
30 #include "builder.h"
31 #include "common/rdtsc_buckets.h"
32
33 #include <cstdarg>
34
35 namespace SwrJit
36 {
37 void __cdecl CallPrint(const char* fmt, ...);
38
39 //////////////////////////////////////////////////////////////////////////
40 /// @brief Convert an IEEE 754 32-bit single precision float to an
41 /// 16 bit float with 5 exponent bits and a variable
42 /// number of mantissa bits.
43 /// @param val - 32-bit float
44 /// @todo Maybe move this outside of this file into a header?
45 static uint16_t Convert32To16Float(float val)
46 {
47 uint32_t sign, exp, mant;
48 uint32_t roundBits;
49
50 // Extract the sign, exponent, and mantissa
51 uint32_t uf = *(uint32_t*)&val;
52 sign = (uf & 0x80000000) >> 31;
53 exp = (uf & 0x7F800000) >> 23;
54 mant = uf & 0x007FFFFF;
55
56 // Check for out of range
57 if (std::isnan(val))
58 {
59 exp = 0x1F;
60 mant = 0x200;
61 sign = 1; // set the sign bit for NANs
62 }
63 else if (std::isinf(val))
64 {
65 exp = 0x1f;
66 mant = 0x0;
67 }
68 else if (exp > (0x70 + 0x1E)) // Too big to represent -> max representable value
69 {
70 exp = 0x1E;
71 mant = 0x3FF;
72 }
73 else if ((exp <= 0x70) && (exp >= 0x66)) // It's a denorm
74 {
75 mant |= 0x00800000;
76 for (; exp <= 0x70; mant >>= 1, exp++)
77 ;
78 exp = 0;
79 mant = mant >> 13;
80 }
81 else if (exp < 0x66) // Too small to represent -> Zero
82 {
83 exp = 0;
84 mant = 0;
85 }
86 else
87 {
88 // Saves bits that will be shifted off for rounding
89 roundBits = mant & 0x1FFFu;
90 // convert exponent and mantissa to 16 bit format
91 exp = exp - 0x70;
92 mant = mant >> 13;
93
94 // Essentially RTZ, but round up if off by only 1 lsb
95 if (roundBits == 0x1FFFu)
96 {
97 mant++;
98 // check for overflow
99 if ((mant & 0xC00u) != 0)
100 exp++;
101 // make sure only the needed bits are used
102 mant &= 0x3FF;
103 }
104 }
105
106 uint32_t tmpVal = (sign << 15) | (exp << 10) | mant;
107 return (uint16_t)tmpVal;
108 }
109
110 //////////////////////////////////////////////////////////////////////////
111 /// @brief Convert an IEEE 754 16-bit float to an 32-bit single precision
112 /// float
113 /// @param val - 16-bit float
114 /// @todo Maybe move this outside of this file into a header?
115 static float ConvertSmallFloatTo32(UINT val)
116 {
117 UINT result;
118 if ((val & 0x7fff) == 0)
119 {
120 result = ((uint32_t)(val & 0x8000)) << 16;
121 }
122 else if ((val & 0x7c00) == 0x7c00)
123 {
124 result = ((val & 0x3ff) == 0) ? 0x7f800000 : 0x7fc00000;
125 result |= ((uint32_t)val & 0x8000) << 16;
126 }
127 else
128 {
129 uint32_t sign = (val & 0x8000) << 16;
130 uint32_t mant = (val & 0x3ff) << 13;
131 uint32_t exp = (val >> 10) & 0x1f;
132 if ((exp == 0) && (mant != 0)) // Adjust exponent and mantissa for denormals
133 {
134 mant <<= 1;
135 while (mant < (0x400 << 13))
136 {
137 exp--;
138 mant <<= 1;
139 }
140 mant &= (0x3ff << 13);
141 }
142 exp = ((exp - 15 + 127) & 0xff) << 23;
143 result = sign | exp | mant;
144 }
145
146 return *(float*)&result;
147 }
148
149 Constant *Builder::C(bool i)
150 {
151 return ConstantInt::get(IRB()->getInt1Ty(), (i ? 1 : 0));
152 }
153
154 Constant *Builder::C(char i)
155 {
156 return ConstantInt::get(IRB()->getInt8Ty(), i);
157 }
158
159 Constant *Builder::C(uint8_t i)
160 {
161 return ConstantInt::get(IRB()->getInt8Ty(), i);
162 }
163
164 Constant *Builder::C(int i)
165 {
166 return ConstantInt::get(IRB()->getInt32Ty(), i);
167 }
168
169 Constant *Builder::C(int64_t i)
170 {
171 return ConstantInt::get(IRB()->getInt64Ty(), i);
172 }
173
174 Constant *Builder::C(uint16_t i)
175 {
176 return ConstantInt::get(mInt16Ty,i);
177 }
178
179 Constant *Builder::C(uint32_t i)
180 {
181 return ConstantInt::get(IRB()->getInt32Ty(), i);
182 }
183
184 Constant *Builder::C(float i)
185 {
186 return ConstantFP::get(IRB()->getFloatTy(), i);
187 }
188
189 Constant *Builder::PRED(bool pred)
190 {
191 return ConstantInt::get(IRB()->getInt1Ty(), (pred ? 1 : 0));
192 }
193
194 Value *Builder::VIMMED1(int i)
195 {
196 return ConstantVector::getSplat(mVWidth, cast<ConstantInt>(C(i)));
197 }
198
199 Value *Builder::VIMMED1(uint32_t i)
200 {
201 return ConstantVector::getSplat(mVWidth, cast<ConstantInt>(C(i)));
202 }
203
204 Value *Builder::VIMMED1(float i)
205 {
206 return ConstantVector::getSplat(mVWidth, cast<ConstantFP>(C(i)));
207 }
208
209 Value *Builder::VIMMED1(bool i)
210 {
211 return ConstantVector::getSplat(mVWidth, cast<ConstantInt>(C(i)));
212 }
213
214 Value *Builder::VUNDEF_IPTR()
215 {
216 return UndefValue::get(VectorType::get(mInt32PtrTy,mVWidth));
217 }
218
219 Value *Builder::VUNDEF_I()
220 {
221 return UndefValue::get(VectorType::get(mInt32Ty, mVWidth));
222 }
223
224 Value *Builder::VUNDEF(Type *ty, uint32_t size)
225 {
226 return UndefValue::get(VectorType::get(ty, size));
227 }
228
229 Value *Builder::VUNDEF_F()
230 {
231 return UndefValue::get(VectorType::get(mFP32Ty, mVWidth));
232 }
233
234 Value *Builder::VUNDEF(Type* t)
235 {
236 return UndefValue::get(VectorType::get(t, mVWidth));
237 }
238
239 #if HAVE_LLVM == 0x306
240 Value *Builder::VINSERT(Value *vec, Value *val, uint64_t index)
241 {
242 return VINSERT(vec, val, C((int64_t)index));
243 }
244 #endif
245
246 Value *Builder::VBROADCAST(Value *src)
247 {
248 // check if src is already a vector
249 if (src->getType()->isVectorTy())
250 {
251 return src;
252 }
253
254 return VECTOR_SPLAT(mVWidth, src);
255 }
256
257 uint32_t Builder::IMMED(Value* v)
258 {
259 SWR_ASSERT(isa<ConstantInt>(v));
260 ConstantInt *pValConst = cast<ConstantInt>(v);
261 return pValConst->getZExtValue();
262 }
263
264 int32_t Builder::S_IMMED(Value* v)
265 {
266 SWR_ASSERT(isa<ConstantInt>(v));
267 ConstantInt *pValConst = cast<ConstantInt>(v);
268 return pValConst->getSExtValue();
269 }
270
271 Value *Builder::GEP(Value* ptr, const std::initializer_list<Value*> &indexList)
272 {
273 std::vector<Value*> indices;
274 for (auto i : indexList)
275 indices.push_back(i);
276 return GEPA(ptr, indices);
277 }
278
279 Value *Builder::GEP(Value* ptr, const std::initializer_list<uint32_t> &indexList)
280 {
281 std::vector<Value*> indices;
282 for (auto i : indexList)
283 indices.push_back(C(i));
284 return GEPA(ptr, indices);
285 }
286
287 LoadInst *Builder::LOAD(Value *basePtr, const std::initializer_list<uint32_t> &indices, const llvm::Twine& name)
288 {
289 std::vector<Value*> valIndices;
290 for (auto i : indices)
291 valIndices.push_back(C(i));
292 return LOAD(GEPA(basePtr, valIndices), name);
293 }
294
295 LoadInst *Builder::LOADV(Value *basePtr, const std::initializer_list<Value*> &indices, const llvm::Twine& name)
296 {
297 std::vector<Value*> valIndices;
298 for (auto i : indices)
299 valIndices.push_back(i);
300 return LOAD(GEPA(basePtr, valIndices), name);
301 }
302
303 StoreInst *Builder::STORE(Value *val, Value *basePtr, const std::initializer_list<uint32_t> &indices)
304 {
305 std::vector<Value*> valIndices;
306 for (auto i : indices)
307 valIndices.push_back(C(i));
308 return STORE(val, GEPA(basePtr, valIndices));
309 }
310
311 StoreInst *Builder::STOREV(Value *val, Value *basePtr, const std::initializer_list<Value*> &indices)
312 {
313 std::vector<Value*> valIndices;
314 for (auto i : indices)
315 valIndices.push_back(i);
316 return STORE(val, GEPA(basePtr, valIndices));
317 }
318
319 CallInst *Builder::CALL(Value *Callee, const std::initializer_list<Value*> &argsList)
320 {
321 std::vector<Value*> args;
322 for (auto arg : argsList)
323 args.push_back(arg);
324 return CALLA(Callee, args);
325 }
326
327 #if HAVE_LLVM > 0x306
328 CallInst *Builder::CALL(Value *Callee, Value* arg)
329 {
330 std::vector<Value*> args;
331 args.push_back(arg);
332 return CALLA(Callee, args);
333 }
334
335 CallInst *Builder::CALL2(Value *Callee, Value* arg1, Value* arg2)
336 {
337 std::vector<Value*> args;
338 args.push_back(arg1);
339 args.push_back(arg2);
340 return CALLA(Callee, args);
341 }
342
343 CallInst *Builder::CALL3(Value *Callee, Value* arg1, Value* arg2, Value* arg3)
344 {
345 std::vector<Value*> args;
346 args.push_back(arg1);
347 args.push_back(arg2);
348 args.push_back(arg3);
349 return CALLA(Callee, args);
350 }
351 #endif
352
353 Value *Builder::VRCP(Value *va)
354 {
355 return FDIV(VIMMED1(1.0f), va); // 1 / a
356 }
357
358 Value *Builder::VPLANEPS(Value* vA, Value* vB, Value* vC, Value* &vX, Value* &vY)
359 {
360 Value* vOut = FMADDPS(vA, vX, vC);
361 vOut = FMADDPS(vB, vY, vOut);
362 return vOut;
363 }
364
365 //////////////////////////////////////////////////////////////////////////
366 /// @brief Generate an i32 masked load operation in LLVM IR. If not
367 /// supported on the underlying platform, emulate it with float masked load
368 /// @param src - base address pointer for the load
369 /// @param vMask - SIMD wide mask that controls whether to access memory load 0
370 Value *Builder::MASKLOADD(Value* src,Value* mask)
371 {
372 Value* vResult;
373 // use avx2 gather instruction is available
374 if(JM()->mArch.AVX2())
375 {
376 Function *func = Intrinsic::getDeclaration(JM()->mpCurrentModule, Intrinsic::x86_avx2_maskload_d_256);
377 vResult = CALL(func,{src,mask});
378 }
379 else
380 {
381 // maskload intrinsic expects integer mask operand in llvm >= 3.8
382 #if (LLVM_VERSION_MAJOR > 3) || (LLVM_VERSION_MAJOR == 3 && LLVM_VERSION_MINOR >= 8)
383 mask = BITCAST(mask,VectorType::get(mInt32Ty,mVWidth));
384 #else
385 mask = BITCAST(mask,VectorType::get(mFP32Ty,mVWidth));
386 #endif
387 Function *func = Intrinsic::getDeclaration(JM()->mpCurrentModule,Intrinsic::x86_avx_maskload_ps_256);
388 vResult = BITCAST(CALL(func,{src,mask}), VectorType::get(mInt32Ty,mVWidth));
389 }
390 return vResult;
391 }
392
393 //////////////////////////////////////////////////////////////////////////
394 /// @brief insert a JIT call to CallPrint
395 /// - outputs formatted string to both stdout and VS output window
396 /// - DEBUG builds only
397 /// Usage example:
398 /// PRINT("index %d = 0x%p\n",{C(lane), pIndex});
399 /// where C(lane) creates a constant value to print, and pIndex is the Value*
400 /// result from a GEP, printing out the pointer to memory
401 /// @param printStr - constant string to print, which includes format specifiers
402 /// @param printArgs - initializer list of Value*'s to print to std out
403 CallInst *Builder::PRINT(const std::string &printStr,const std::initializer_list<Value*> &printArgs)
404 {
405 // push the arguments to CallPrint into a vector
406 std::vector<Value*> printCallArgs;
407 // save room for the format string. we still need to modify it for vectors
408 printCallArgs.resize(1);
409
410 // search through the format string for special processing
411 size_t pos = 0;
412 std::string tempStr(printStr);
413 pos = tempStr.find('%', pos);
414 auto v = printArgs.begin();
415
416 while ((pos != std::string::npos) && (v != printArgs.end()))
417 {
418 Value* pArg = *v;
419 Type* pType = pArg->getType();
420
421 if (pType->isVectorTy())
422 {
423 Type* pContainedType = pType->getContainedType(0);
424
425 if (toupper(tempStr[pos + 1]) == 'X')
426 {
427 tempStr[pos] = '0';
428 tempStr[pos + 1] = 'x';
429 tempStr.insert(pos + 2, "%08X ");
430 pos += 7;
431
432 printCallArgs.push_back(VEXTRACT(pArg, C(0)));
433
434 std::string vectorFormatStr;
435 for (uint32_t i = 1; i < pType->getVectorNumElements(); ++i)
436 {
437 vectorFormatStr += "0x%08X ";
438 printCallArgs.push_back(VEXTRACT(pArg, C(i)));
439 }
440
441 tempStr.insert(pos, vectorFormatStr);
442 pos += vectorFormatStr.size();
443 }
444 else if ((tempStr[pos + 1] == 'f') && (pContainedType->isFloatTy()))
445 {
446 uint32_t i = 0;
447 for (; i < (pArg->getType()->getVectorNumElements()) - 1; i++)
448 {
449 tempStr.insert(pos, std::string("%f "));
450 pos += 3;
451 printCallArgs.push_back(FP_EXT(VEXTRACT(pArg, C(i)), Type::getDoubleTy(JM()->mContext)));
452 }
453 printCallArgs.push_back(FP_EXT(VEXTRACT(pArg, C(i)), Type::getDoubleTy(JM()->mContext)));
454 }
455 else if ((tempStr[pos + 1] == 'd') && (pContainedType->isIntegerTy()))
456 {
457 uint32_t i = 0;
458 for (; i < (pArg->getType()->getVectorNumElements()) - 1; i++)
459 {
460 tempStr.insert(pos, std::string("%d "));
461 pos += 3;
462 printCallArgs.push_back(VEXTRACT(pArg, C(i)));
463 }
464 printCallArgs.push_back(VEXTRACT(pArg, C(i)));
465 }
466 }
467 else
468 {
469 if (toupper(tempStr[pos + 1]) == 'X')
470 {
471 tempStr[pos] = '0';
472 tempStr.insert(pos + 1, "x%08");
473 printCallArgs.push_back(pArg);
474 pos += 3;
475 }
476 // for %f we need to cast float Values to doubles so that they print out correctly
477 else if ((tempStr[pos + 1] == 'f') && (pType->isFloatTy()))
478 {
479 printCallArgs.push_back(FP_EXT(pArg, Type::getDoubleTy(JM()->mContext)));
480 pos++;
481 }
482 else
483 {
484 printCallArgs.push_back(pArg);
485 }
486 }
487
488 // advance to the next arguement
489 v++;
490 pos = tempStr.find('%', ++pos);
491 }
492
493 // create global variable constant string
494 Constant *constString = ConstantDataArray::getString(JM()->mContext,tempStr,true);
495 GlobalVariable *gvPtr = new GlobalVariable(constString->getType(),true,GlobalValue::InternalLinkage,constString,"printStr");
496 JM()->mpCurrentModule->getGlobalList().push_back(gvPtr);
497
498 // get a pointer to the first character in the constant string array
499 std::vector<Constant*> geplist{C(0),C(0)};
500 #if HAVE_LLVM == 0x306
501 Constant *strGEP = ConstantExpr::getGetElementPtr(gvPtr,geplist,false);
502 #else
503 Constant *strGEP = ConstantExpr::getGetElementPtr(nullptr, gvPtr,geplist,false);
504 #endif
505
506 // insert the pointer to the format string in the argument vector
507 printCallArgs[0] = strGEP;
508
509 // get pointer to CallPrint function and insert decl into the module if needed
510 std::vector<Type*> args;
511 args.push_back(PointerType::get(mInt8Ty,0));
512 FunctionType* callPrintTy = FunctionType::get(Type::getVoidTy(JM()->mContext),args,true);
513 Function *callPrintFn = cast<Function>(JM()->mpCurrentModule->getOrInsertFunction("CallPrint", callPrintTy));
514
515 // if we haven't yet added the symbol to the symbol table
516 if((sys::DynamicLibrary::SearchForAddressOfSymbol("CallPrint")) == nullptr)
517 {
518 sys::DynamicLibrary::AddSymbol("CallPrint", (void *)&CallPrint);
519 }
520
521 // insert a call to CallPrint
522 return CALLA(callPrintFn,printCallArgs);
523 }
524
525 //////////////////////////////////////////////////////////////////////////
526 /// @brief Wrapper around PRINT with initializer list.
527 CallInst* Builder::PRINT(const std::string &printStr)
528 {
529 return PRINT(printStr, {});
530 }
531
532 //////////////////////////////////////////////////////////////////////////
533 /// @brief Generate a masked gather operation in LLVM IR. If not
534 /// supported on the underlying platform, emulate it with loads
535 /// @param vSrc - SIMD wide value that will be loaded if mask is invalid
536 /// @param pBase - Int8* base VB address pointer value
537 /// @param vIndices - SIMD wide value of VB byte offsets
538 /// @param vMask - SIMD wide mask that controls whether to access memory or the src values
539 /// @param scale - value to scale indices by
540 Value *Builder::GATHERPS(Value* vSrc, Value* pBase, Value* vIndices, Value* vMask, Value* scale)
541 {
542 Value* vGather;
543
544 // use avx2 gather instruction if available
545 if(JM()->mArch.AVX2())
546 {
547 // force mask to <N x float>, required by vgather
548 vMask = BITCAST(vMask, mSimdFP32Ty);
549 vGather = VGATHERPS(vSrc,pBase,vIndices,vMask,scale);
550 }
551 else
552 {
553 Value* pStack = STACKSAVE();
554
555 // store vSrc on the stack. this way we can select between a valid load address and the vSrc address
556 Value* vSrcPtr = ALLOCA(vSrc->getType());
557 STORE(vSrc, vSrcPtr);
558
559 vGather = VUNDEF_F();
560 Value *vScaleVec = VBROADCAST(Z_EXT(scale,mInt32Ty));
561 Value *vOffsets = MUL(vIndices,vScaleVec);
562 Value *mask = MASK(vMask);
563 for(uint32_t i = 0; i < mVWidth; ++i)
564 {
565 // single component byte index
566 Value *offset = VEXTRACT(vOffsets,C(i));
567 // byte pointer to component
568 Value *loadAddress = GEP(pBase,offset);
569 loadAddress = BITCAST(loadAddress,PointerType::get(mFP32Ty,0));
570 // pointer to the value to load if we're masking off a component
571 Value *maskLoadAddress = GEP(vSrcPtr,{C(0), C(i)});
572 Value *selMask = VEXTRACT(mask,C(i));
573 // switch in a safe address to load if we're trying to access a vertex
574 Value *validAddress = SELECT(selMask, loadAddress, maskLoadAddress);
575 Value *val = LOAD(validAddress);
576 vGather = VINSERT(vGather,val,C(i));
577 }
578 STACKRESTORE(pStack);
579 }
580
581 return vGather;
582 }
583
584 //////////////////////////////////////////////////////////////////////////
585 /// @brief Generate a masked gather operation in LLVM IR. If not
586 /// supported on the underlying platform, emulate it with loads
587 /// @param vSrc - SIMD wide value that will be loaded if mask is invalid
588 /// @param pBase - Int8* base VB address pointer value
589 /// @param vIndices - SIMD wide value of VB byte offsets
590 /// @param vMask - SIMD wide mask that controls whether to access memory or the src values
591 /// @param scale - value to scale indices by
592 Value *Builder::GATHERDD(Value* vSrc, Value* pBase, Value* vIndices, Value* vMask, Value* scale)
593 {
594 Value* vGather;
595
596 // use avx2 gather instruction if available
597 if(JM()->mArch.AVX2())
598 {
599 vGather = VGATHERDD(vSrc, pBase, vIndices, vMask, scale);
600 }
601 else
602 {
603 Value* pStack = STACKSAVE();
604
605 // store vSrc on the stack. this way we can select between a valid load address and the vSrc address
606 Value* vSrcPtr = ALLOCA(vSrc->getType());
607 STORE(vSrc, vSrcPtr);
608
609 vGather = VUNDEF_I();
610 Value *vScaleVec = VBROADCAST(Z_EXT(scale, mInt32Ty));
611 Value *vOffsets = MUL(vIndices, vScaleVec);
612 Value *mask = MASK(vMask);
613 for(uint32_t i = 0; i < mVWidth; ++i)
614 {
615 // single component byte index
616 Value *offset = VEXTRACT(vOffsets, C(i));
617 // byte pointer to component
618 Value *loadAddress = GEP(pBase, offset);
619 loadAddress = BITCAST(loadAddress, PointerType::get(mInt32Ty, 0));
620 // pointer to the value to load if we're masking off a component
621 Value *maskLoadAddress = GEP(vSrcPtr, {C(0), C(i)});
622 Value *selMask = VEXTRACT(mask, C(i));
623 // switch in a safe address to load if we're trying to access a vertex
624 Value *validAddress = SELECT(selMask, loadAddress, maskLoadAddress);
625 Value *val = LOAD(validAddress, C(0));
626 vGather = VINSERT(vGather, val, C(i));
627 }
628
629 STACKRESTORE(pStack);
630 }
631 return vGather;
632 }
633
634 //////////////////////////////////////////////////////////////////////////
635 /// @brief convert x86 <N x float> mask to llvm <N x i1> mask
636 Value* Builder::MASK(Value* vmask)
637 {
638 Value* src = BITCAST(vmask, mSimdInt32Ty);
639 return ICMP_SLT(src, VIMMED1(0));
640 }
641
642 //////////////////////////////////////////////////////////////////////////
643 /// @brief convert llvm <N x i1> mask to x86 <N x i32> mask
644 Value* Builder::VMASK(Value* mask)
645 {
646 return S_EXT(mask, mSimdInt32Ty);
647 }
648
649 //////////////////////////////////////////////////////////////////////////
650 /// @brief Generate a VPSHUFB operation in LLVM IR. If not
651 /// supported on the underlying platform, emulate it
652 /// @param a - 256bit SIMD(32x8bit) of 8bit integer values
653 /// @param b - 256bit SIMD(32x8bit) of 8bit integer mask values
654 /// Byte masks in lower 128 lane of b selects 8 bit values from lower
655 /// 128bits of a, and vice versa for the upper lanes. If the mask
656 /// value is negative, '0' is inserted.
657 Value *Builder::PSHUFB(Value* a, Value* b)
658 {
659 Value* res;
660 // use avx2 pshufb instruction if available
661 if(JM()->mArch.AVX2())
662 {
663 res = VPSHUFB(a, b);
664 }
665 else
666 {
667 Constant* cB = dyn_cast<Constant>(b);
668 // number of 8 bit elements in b
669 uint32_t numElms = cast<VectorType>(cB->getType())->getNumElements();
670 // output vector
671 Value* vShuf = UndefValue::get(VectorType::get(mInt8Ty, numElms));
672
673 // insert an 8 bit value from the high and low lanes of a per loop iteration
674 numElms /= 2;
675 for(uint32_t i = 0; i < numElms; i++)
676 {
677 ConstantInt* cLow128b = cast<ConstantInt>(cB->getAggregateElement(i));
678 ConstantInt* cHigh128b = cast<ConstantInt>(cB->getAggregateElement(i + numElms));
679
680 // extract values from constant mask
681 char valLow128bLane = (char)(cLow128b->getSExtValue());
682 char valHigh128bLane = (char)(cHigh128b->getSExtValue());
683
684 Value* insertValLow128b;
685 Value* insertValHigh128b;
686
687 // if the mask value is negative, insert a '0' in the respective output position
688 // otherwise, lookup the value at mask position (bits 3..0 of the respective mask byte) in a and insert in output vector
689 insertValLow128b = (valLow128bLane < 0) ? C((char)0) : VEXTRACT(a, C((valLow128bLane & 0xF)));
690 insertValHigh128b = (valHigh128bLane < 0) ? C((char)0) : VEXTRACT(a, C((valHigh128bLane & 0xF) + numElms));
691
692 vShuf = VINSERT(vShuf, insertValLow128b, i);
693 vShuf = VINSERT(vShuf, insertValHigh128b, (i + numElms));
694 }
695 res = vShuf;
696 }
697 return res;
698 }
699
700 //////////////////////////////////////////////////////////////////////////
701 /// @brief Generate a VPSHUFB operation (sign extend 8 8bit values to 32
702 /// bits)in LLVM IR. If not supported on the underlying platform, emulate it
703 /// @param a - 128bit SIMD lane(16x8bit) of 8bit integer values. Only
704 /// lower 8 values are used.
705 Value *Builder::PMOVSXBD(Value* a)
706 {
707 // llvm-3.9 removed the pmovsxbd intrinsic
708 #if HAVE_LLVM < 0x309
709 // use avx2 byte sign extend instruction if available
710 if(JM()->mArch.AVX2())
711 {
712 Function *pmovsxbd = Intrinsic::getDeclaration(JM()->mpCurrentModule, Intrinsic::x86_avx2_pmovsxbd);
713 return CALL(pmovsxbd, std::initializer_list<Value*>{a});
714 }
715 else
716 #endif
717 {
718 // VPMOVSXBD output type
719 Type* v8x32Ty = VectorType::get(mInt32Ty, 8);
720 // Extract 8 values from 128bit lane and sign extend
721 return S_EXT(VSHUFFLE(a, a, C<int>({0, 1, 2, 3, 4, 5, 6, 7})), v8x32Ty);
722 }
723 }
724
725 //////////////////////////////////////////////////////////////////////////
726 /// @brief Generate a VPSHUFB operation (sign extend 8 16bit values to 32
727 /// bits)in LLVM IR. If not supported on the underlying platform, emulate it
728 /// @param a - 128bit SIMD lane(8x16bit) of 16bit integer values.
729 Value *Builder::PMOVSXWD(Value* a)
730 {
731 // llvm-3.9 removed the pmovsxwd intrinsic
732 #if HAVE_LLVM < 0x309
733 // use avx2 word sign extend if available
734 if(JM()->mArch.AVX2())
735 {
736 Function *pmovsxwd = Intrinsic::getDeclaration(JM()->mpCurrentModule, Intrinsic::x86_avx2_pmovsxwd);
737 return CALL(pmovsxwd, std::initializer_list<Value*>{a});
738 }
739 else
740 #endif
741 {
742 // VPMOVSXWD output type
743 Type* v8x32Ty = VectorType::get(mInt32Ty, 8);
744 // Extract 8 values from 128bit lane and sign extend
745 return S_EXT(VSHUFFLE(a, a, C<int>({0, 1, 2, 3, 4, 5, 6, 7})), v8x32Ty);
746 }
747 }
748
749 //////////////////////////////////////////////////////////////////////////
750 /// @brief Generate a VPERMD operation (shuffle 32 bit integer values
751 /// across 128 bit lanes) in LLVM IR. If not supported on the underlying
752 /// platform, emulate it
753 /// @param a - 256bit SIMD lane(8x32bit) of integer values.
754 /// @param idx - 256bit SIMD lane(8x32bit) of 3 bit lane index values
755 Value *Builder::PERMD(Value* a, Value* idx)
756 {
757 Value* res;
758 // use avx2 permute instruction if available
759 if(JM()->mArch.AVX2())
760 {
761 res = VPERMD(a, idx);
762 }
763 else
764 {
765 if (isa<Constant>(idx))
766 {
767 res = VSHUFFLE(a, a, idx);
768 }
769 else
770 {
771 res = VUNDEF_I();
772 for (uint32_t l = 0; l < JM()->mVWidth; ++l)
773 {
774 Value* pIndex = VEXTRACT(idx, C(l));
775 Value* pVal = VEXTRACT(a, pIndex);
776 res = VINSERT(res, pVal, C(l));
777 }
778 }
779 }
780 return res;
781 }
782
783 //////////////////////////////////////////////////////////////////////////
784 /// @brief Generate a VPERMPS operation (shuffle 32 bit float values
785 /// across 128 bit lanes) in LLVM IR. If not supported on the underlying
786 /// platform, emulate it
787 /// @param a - 256bit SIMD lane(8x32bit) of float values.
788 /// @param idx - 256bit SIMD lane(8x32bit) of 3 bit lane index values
789 Value *Builder::PERMPS(Value* a, Value* idx)
790 {
791 Value* res;
792 // use avx2 permute instruction if available
793 if (JM()->mArch.AVX2())
794 {
795 // llvm 3.6.0 swapped the order of the args to vpermd
796 res = VPERMPS(idx, a);
797 }
798 else
799 {
800 if (isa<Constant>(idx))
801 {
802 res = VSHUFFLE(a, a, idx);
803 }
804 else
805 {
806 res = VUNDEF_F();
807 for (uint32_t l = 0; l < JM()->mVWidth; ++l)
808 {
809 Value* pIndex = VEXTRACT(idx, C(l));
810 Value* pVal = VEXTRACT(a, pIndex);
811 res = VINSERT(res, pVal, C(l));
812 }
813 }
814 }
815
816 return res;
817 }
818
819 //////////////////////////////////////////////////////////////////////////
820 /// @brief Generate a VCVTPH2PS operation (float16->float32 conversion)
821 /// in LLVM IR. If not supported on the underlying platform, emulate it
822 /// @param a - 128bit SIMD lane(8x16bit) of float16 in int16 format.
823 Value *Builder::CVTPH2PS(Value* a)
824 {
825 if (JM()->mArch.F16C())
826 {
827 return VCVTPH2PS(a);
828 }
829 else
830 {
831 FunctionType* pFuncTy = FunctionType::get(mFP32Ty, mInt16Ty);
832 Function* pCvtPh2Ps = cast<Function>(JM()->mpCurrentModule->getOrInsertFunction("ConvertSmallFloatTo32", pFuncTy));
833
834 if (sys::DynamicLibrary::SearchForAddressOfSymbol("ConvertSmallFloatTo32") == nullptr)
835 {
836 sys::DynamicLibrary::AddSymbol("ConvertSmallFloatTo32", (void *)&ConvertSmallFloatTo32);
837 }
838
839 Value* pResult = UndefValue::get(mSimdFP32Ty);
840 for (uint32_t i = 0; i < mVWidth; ++i)
841 {
842 Value* pSrc = VEXTRACT(a, C(i));
843 Value* pConv = CALL(pCvtPh2Ps, std::initializer_list<Value*>{pSrc});
844 pResult = VINSERT(pResult, pConv, C(i));
845 }
846
847 return pResult;
848 }
849 }
850
851 //////////////////////////////////////////////////////////////////////////
852 /// @brief Generate a VCVTPS2PH operation (float32->float16 conversion)
853 /// in LLVM IR. If not supported on the underlying platform, emulate it
854 /// @param a - 128bit SIMD lane(8x16bit) of float16 in int16 format.
855 Value *Builder::CVTPS2PH(Value* a, Value* rounding)
856 {
857 if (JM()->mArch.F16C())
858 {
859 return VCVTPS2PH(a, rounding);
860 }
861 else
862 {
863 // call scalar C function for now
864 FunctionType* pFuncTy = FunctionType::get(mInt16Ty, mFP32Ty);
865 Function* pCvtPs2Ph = cast<Function>(JM()->mpCurrentModule->getOrInsertFunction("Convert32To16Float", pFuncTy));
866
867 if (sys::DynamicLibrary::SearchForAddressOfSymbol("Convert32To16Float") == nullptr)
868 {
869 sys::DynamicLibrary::AddSymbol("Convert32To16Float", (void *)&Convert32To16Float);
870 }
871
872 Value* pResult = UndefValue::get(mSimdInt16Ty);
873 for (uint32_t i = 0; i < mVWidth; ++i)
874 {
875 Value* pSrc = VEXTRACT(a, C(i));
876 Value* pConv = CALL(pCvtPs2Ph, std::initializer_list<Value*>{pSrc});
877 pResult = VINSERT(pResult, pConv, C(i));
878 }
879
880 return pResult;
881 }
882 }
883
884 Value *Builder::PMAXSD(Value* a, Value* b)
885 {
886 // llvm-3.9 removed the pmax intrinsics
887 #if HAVE_LLVM >= 0x309
888 Value* cmp = ICMP_SGT(a, b);
889 return SELECT(cmp, a, b);
890 #else
891 if (JM()->mArch.AVX2())
892 {
893 Function* pmaxsd = Intrinsic::getDeclaration(JM()->mpCurrentModule, Intrinsic::x86_avx2_pmaxs_d);
894 return CALL(pmaxsd, {a, b});
895 }
896 else
897 {
898 // use 4-wide sse max intrinsic on lower/upper halves of 8-wide sources
899 Function* pmaxsd = Intrinsic::getDeclaration(JM()->mpCurrentModule, Intrinsic::x86_sse41_pmaxsd);
900
901 // low 128
902 Value* aLo = VEXTRACTI128(a, C((uint8_t)0));
903 Value* bLo = VEXTRACTI128(b, C((uint8_t)0));
904 Value* resLo = CALL(pmaxsd, {aLo, bLo});
905
906 // high 128
907 Value* aHi = VEXTRACTI128(a, C((uint8_t)1));
908 Value* bHi = VEXTRACTI128(b, C((uint8_t)1));
909 Value* resHi = CALL(pmaxsd, {aHi, bHi});
910
911 // combine
912 Value* result = VINSERTI128(VUNDEF_I(), resLo, C((uint8_t)0));
913 result = VINSERTI128(result, resHi, C((uint8_t)1));
914
915 return result;
916 }
917 #endif
918 }
919
920 Value *Builder::PMINSD(Value* a, Value* b)
921 {
922 // llvm-3.9 removed the pmin intrinsics
923 #if HAVE_LLVM >= 0x309
924 Value* cmp = ICMP_SLT(a, b);
925 return SELECT(cmp, a, b);
926 #else
927 if (JM()->mArch.AVX2())
928 {
929 Function* pminsd = Intrinsic::getDeclaration(JM()->mpCurrentModule, Intrinsic::x86_avx2_pmins_d);
930 return CALL(pminsd, {a, b});
931 }
932 else
933 {
934 // use 4-wide sse max intrinsic on lower/upper halves of 8-wide sources
935 Function* pminsd = Intrinsic::getDeclaration(JM()->mpCurrentModule, Intrinsic::x86_sse41_pminsd);
936
937 // low 128
938 Value* aLo = VEXTRACTI128(a, C((uint8_t)0));
939 Value* bLo = VEXTRACTI128(b, C((uint8_t)0));
940 Value* resLo = CALL(pminsd, {aLo, bLo});
941
942 // high 128
943 Value* aHi = VEXTRACTI128(a, C((uint8_t)1));
944 Value* bHi = VEXTRACTI128(b, C((uint8_t)1));
945 Value* resHi = CALL(pminsd, {aHi, bHi});
946
947 // combine
948 Value* result = VINSERTI128(VUNDEF_I(), resLo, C((uint8_t)0));
949 result = VINSERTI128(result, resHi, C((uint8_t)1));
950
951 return result;
952 }
953 #endif
954 }
955
956 void Builder::Gather4(const SWR_FORMAT format, Value* pSrcBase, Value* byteOffsets,
957 Value* mask, Value* vGatherComponents[], bool bPackedOutput)
958 {
959 const SWR_FORMAT_INFO &info = GetFormatInfo(format);
960 if(info.type[0] == SWR_TYPE_FLOAT && info.bpc[0] == 32)
961 {
962 // ensure our mask is the correct type
963 mask = BITCAST(mask, mSimdFP32Ty);
964 GATHER4PS(info, pSrcBase, byteOffsets, mask, vGatherComponents, bPackedOutput);
965 }
966 else
967 {
968 // ensure our mask is the correct type
969 mask = BITCAST(mask, mSimdInt32Ty);
970 GATHER4DD(info, pSrcBase, byteOffsets, mask, vGatherComponents, bPackedOutput);
971 }
972 }
973
974 void Builder::GATHER4PS(const SWR_FORMAT_INFO &info, Value* pSrcBase, Value* byteOffsets,
975 Value* mask, Value* vGatherComponents[], bool bPackedOutput)
976 {
977 switch(info.bpp / info.numComps)
978 {
979 case 16:
980 {
981 Value* vGatherResult[2];
982 Value *vMask;
983
984 // TODO: vGatherMaskedVal
985 Value* vGatherMaskedVal = VIMMED1((float)0);
986
987 // always have at least one component out of x or y to fetch
988
989 // save mask as it is zero'd out after each gather
990 vMask = mask;
991
992 vGatherResult[0] = GATHERPS(vGatherMaskedVal, pSrcBase, byteOffsets, vMask, C((char)1));
993 // e.g. result of first 8x32bit integer gather for 16bit components
994 // 256i - 0 1 2 3 4 5 6 7
995 // xyxy xyxy xyxy xyxy xyxy xyxy xyxy xyxy
996 //
997
998 // if we have at least one component out of x or y to fetch
999 if(info.numComps > 2)
1000 {
1001 // offset base to the next components(zw) in the vertex to gather
1002 pSrcBase = GEP(pSrcBase, C((char)4));
1003 vMask = mask;
1004
1005 vGatherResult[1] = GATHERPS(vGatherMaskedVal, pSrcBase, byteOffsets, vMask, C((char)1));
1006 // e.g. result of second 8x32bit integer gather for 16bit components
1007 // 256i - 0 1 2 3 4 5 6 7
1008 // zwzw zwzw zwzw zwzw zwzw zwzw zwzw zwzw
1009 //
1010 }
1011 else
1012 {
1013 vGatherResult[1] = vGatherMaskedVal;
1014 }
1015
1016 // Shuffle gathered components into place, each row is a component
1017 Shuffle16bpcGather4(info, vGatherResult, vGatherComponents, bPackedOutput);
1018 }
1019 break;
1020 case 32:
1021 {
1022 // apply defaults
1023 for (uint32_t i = 0; i < 4; ++i)
1024 {
1025 vGatherComponents[i] = VIMMED1(*(float*)&info.defaults[i]);
1026 }
1027
1028 for(uint32_t i = 0; i < info.numComps; i++)
1029 {
1030 uint32_t swizzleIndex = info.swizzle[i];
1031
1032 // save mask as it is zero'd out after each gather
1033 Value *vMask = mask;
1034
1035 // Gather a SIMD of components
1036 vGatherComponents[swizzleIndex] = GATHERPS(vGatherComponents[swizzleIndex], pSrcBase, byteOffsets, vMask, C((char)1));
1037
1038 // offset base to the next component to gather
1039 pSrcBase = GEP(pSrcBase, C((char)4));
1040 }
1041 }
1042 break;
1043 default:
1044 SWR_ASSERT(0, "Invalid float format");
1045 break;
1046 }
1047 }
1048
1049 void Builder::GATHER4DD(const SWR_FORMAT_INFO &info, Value* pSrcBase, Value* byteOffsets,
1050 Value* mask, Value* vGatherComponents[], bool bPackedOutput)
1051 {
1052 switch (info.bpp / info.numComps)
1053 {
1054 case 8:
1055 {
1056 Value* vGatherMaskedVal = VIMMED1((int32_t)0);
1057 Value* vGatherResult = GATHERDD(vGatherMaskedVal, pSrcBase, byteOffsets, mask, C((char)1));
1058 // e.g. result of an 8x32bit integer gather for 8bit components
1059 // 256i - 0 1 2 3 4 5 6 7
1060 // xyzw xyzw xyzw xyzw xyzw xyzw xyzw xyzw
1061
1062 Shuffle8bpcGather4(info, vGatherResult, vGatherComponents, bPackedOutput);
1063 }
1064 break;
1065 case 16:
1066 {
1067 Value* vGatherResult[2];
1068 Value *vMask;
1069
1070 // TODO: vGatherMaskedVal
1071 Value* vGatherMaskedVal = VIMMED1((int32_t)0);
1072
1073 // always have at least one component out of x or y to fetch
1074
1075 // save mask as it is zero'd out after each gather
1076 vMask = mask;
1077
1078 vGatherResult[0] = GATHERDD(vGatherMaskedVal, pSrcBase, byteOffsets, vMask, C((char)1));
1079 // e.g. result of first 8x32bit integer gather for 16bit components
1080 // 256i - 0 1 2 3 4 5 6 7
1081 // xyxy xyxy xyxy xyxy xyxy xyxy xyxy xyxy
1082 //
1083
1084 // if we have at least one component out of x or y to fetch
1085 if(info.numComps > 2)
1086 {
1087 // offset base to the next components(zw) in the vertex to gather
1088 pSrcBase = GEP(pSrcBase, C((char)4));
1089 vMask = mask;
1090
1091 vGatherResult[1] = GATHERDD(vGatherMaskedVal, pSrcBase, byteOffsets, vMask, C((char)1));
1092 // e.g. result of second 8x32bit integer gather for 16bit components
1093 // 256i - 0 1 2 3 4 5 6 7
1094 // zwzw zwzw zwzw zwzw zwzw zwzw zwzw zwzw
1095 //
1096 }
1097 else
1098 {
1099 vGatherResult[1] = vGatherMaskedVal;
1100 }
1101
1102 // Shuffle gathered components into place, each row is a component
1103 Shuffle16bpcGather4(info, vGatherResult, vGatherComponents, bPackedOutput);
1104
1105 }
1106 break;
1107 case 32:
1108 {
1109 // apply defaults
1110 for (uint32_t i = 0; i < 4; ++i)
1111 {
1112 vGatherComponents[i] = VIMMED1((int)info.defaults[i]);
1113 }
1114
1115 for(uint32_t i = 0; i < info.numComps; i++)
1116 {
1117 uint32_t swizzleIndex = info.swizzle[i];
1118
1119 // save mask as it is zero'd out after each gather
1120 Value *vMask = mask;
1121
1122 // Gather a SIMD of components
1123 vGatherComponents[swizzleIndex] = GATHERDD(vGatherComponents[swizzleIndex], pSrcBase, byteOffsets, vMask, C((char)1));
1124
1125 // offset base to the next component to gather
1126 pSrcBase = GEP(pSrcBase, C((char)4));
1127 }
1128 }
1129 break;
1130 default:
1131 SWR_ASSERT(0, "unsupported format");
1132 break;
1133 }
1134 }
1135
1136 void Builder::Shuffle16bpcGather4(const SWR_FORMAT_INFO &info, Value* vGatherInput[2], Value* vGatherOutput[4], bool bPackedOutput)
1137 {
1138 // cast types
1139 Type* vGatherTy = VectorType::get(IntegerType::getInt32Ty(JM()->mContext), mVWidth);
1140 Type* v32x8Ty = VectorType::get(mInt8Ty, mVWidth * 4); // vwidth is units of 32 bits
1141
1142 // input could either be float or int vector; do shuffle work in int
1143 vGatherInput[0] = BITCAST(vGatherInput[0], mSimdInt32Ty);
1144 vGatherInput[1] = BITCAST(vGatherInput[1], mSimdInt32Ty);
1145
1146 if(bPackedOutput)
1147 {
1148 Type* v128bitTy = VectorType::get(IntegerType::getIntNTy(JM()->mContext, 128), mVWidth / 4); // vwidth is units of 32 bits
1149
1150 // shuffle mask
1151 Value* vConstMask = C<char>({0, 1, 4, 5, 8, 9, 12, 13, 2, 3, 6, 7, 10, 11, 14, 15,
1152 0, 1, 4, 5, 8, 9, 12, 13, 2, 3, 6, 7, 10, 11, 14, 15});
1153 Value* vShufResult = BITCAST(PSHUFB(BITCAST(vGatherInput[0], v32x8Ty), vConstMask), vGatherTy);
1154 // after pshufb: group components together in each 128bit lane
1155 // 256i - 0 1 2 3 4 5 6 7
1156 // xxxx xxxx yyyy yyyy xxxx xxxx yyyy yyyy
1157
1158 Value* vi128XY = BITCAST(PERMD(vShufResult, C<int32_t>({0, 1, 4, 5, 2, 3, 6, 7})), v128bitTy);
1159 // after PERMD: move and pack xy components into each 128bit lane
1160 // 256i - 0 1 2 3 4 5 6 7
1161 // xxxx xxxx xxxx xxxx yyyy yyyy yyyy yyyy
1162
1163 // do the same for zw components
1164 Value* vi128ZW = nullptr;
1165 if(info.numComps > 2)
1166 {
1167 Value* vShufResult = BITCAST(PSHUFB(BITCAST(vGatherInput[1], v32x8Ty), vConstMask), vGatherTy);
1168 vi128ZW = BITCAST(PERMD(vShufResult, C<int32_t>({0, 1, 4, 5, 2, 3, 6, 7})), v128bitTy);
1169 }
1170
1171 for(uint32_t i = 0; i < 4; i++)
1172 {
1173 uint32_t swizzleIndex = info.swizzle[i];
1174 // todo: fixed for packed
1175 Value* vGatherMaskedVal = VIMMED1((int32_t)(info.defaults[i]));
1176 if(i >= info.numComps)
1177 {
1178 // set the default component val
1179 vGatherOutput[swizzleIndex] = vGatherMaskedVal;
1180 continue;
1181 }
1182
1183 // if x or z, extract 128bits from lane 0, else for y or w, extract from lane 1
1184 uint32_t lane = ((i == 0) || (i == 2)) ? 0 : 1;
1185 // if x or y, use vi128XY permute result, else use vi128ZW
1186 Value* selectedPermute = (i < 2) ? vi128XY : vi128ZW;
1187
1188 // extract packed component 128 bit lanes
1189 vGatherOutput[swizzleIndex] = VEXTRACT(selectedPermute, C(lane));
1190 }
1191
1192 }
1193 else
1194 {
1195 // pshufb masks for each component
1196 Value* vConstMask[2];
1197 // x/z shuffle mask
1198 vConstMask[0] = C<char>({0, 1, -1, -1, 4, 5, -1, -1, 8, 9, -1, -1, 12, 13, -1, -1,
1199 0, 1, -1, -1, 4, 5, -1, -1, 8, 9, -1, -1, 12, 13, -1, -1, });
1200
1201 // y/w shuffle mask
1202 vConstMask[1] = C<char>({2, 3, -1, -1, 6, 7, -1, -1, 10, 11, -1, -1, 14, 15, -1, -1,
1203 2, 3, -1, -1, 6, 7, -1, -1, 10, 11, -1, -1, 14, 15, -1, -1});
1204
1205
1206 // shuffle enabled components into lower word of each 32bit lane, 0 extending to 32 bits
1207 // apply defaults
1208 for (uint32_t i = 0; i < 4; ++i)
1209 {
1210 vGatherOutput[i] = VIMMED1((int32_t)info.defaults[i]);
1211 }
1212
1213 for(uint32_t i = 0; i < info.numComps; i++)
1214 {
1215 uint32_t swizzleIndex = info.swizzle[i];
1216
1217 // select correct constMask for x/z or y/w pshufb
1218 uint32_t selectedMask = ((i == 0) || (i == 2)) ? 0 : 1;
1219 // if x or y, use vi128XY permute result, else use vi128ZW
1220 uint32_t selectedGather = (i < 2) ? 0 : 1;
1221
1222 vGatherOutput[swizzleIndex] = BITCAST(PSHUFB(BITCAST(vGatherInput[selectedGather], v32x8Ty), vConstMask[selectedMask]), vGatherTy);
1223 // after pshufb mask for x channel; z uses the same shuffle from the second gather
1224 // 256i - 0 1 2 3 4 5 6 7
1225 // xx00 xx00 xx00 xx00 xx00 xx00 xx00 xx00
1226 }
1227 }
1228 }
1229
1230 void Builder::Shuffle8bpcGather4(const SWR_FORMAT_INFO &info, Value* vGatherInput, Value* vGatherOutput[], bool bPackedOutput)
1231 {
1232 // cast types
1233 Type* vGatherTy = VectorType::get(IntegerType::getInt32Ty(JM()->mContext), mVWidth);
1234 Type* v32x8Ty = VectorType::get(mInt8Ty, mVWidth * 4 ); // vwidth is units of 32 bits
1235
1236 if(bPackedOutput)
1237 {
1238 Type* v128Ty = VectorType::get(IntegerType::getIntNTy(JM()->mContext, 128), mVWidth / 4); // vwidth is units of 32 bits
1239 // shuffle mask
1240 Value* vConstMask = C<char>({0, 4, 8, 12, 1, 5, 9, 13, 2, 6, 10, 14, 3, 7, 11, 15,
1241 0, 4, 8, 12, 1, 5, 9, 13, 2, 6, 10, 14, 3, 7, 11, 15});
1242 Value* vShufResult = BITCAST(PSHUFB(BITCAST(vGatherInput, v32x8Ty), vConstMask), vGatherTy);
1243 // after pshufb: group components together in each 128bit lane
1244 // 256i - 0 1 2 3 4 5 6 7
1245 // xxxx yyyy zzzz wwww xxxx yyyy zzzz wwww
1246
1247 Value* vi128XY = BITCAST(PERMD(vShufResult, C<int32_t>({0, 4, 0, 0, 1, 5, 0, 0})), v128Ty);
1248 // after PERMD: move and pack xy and zw components in low 64 bits of each 128bit lane
1249 // 256i - 0 1 2 3 4 5 6 7
1250 // xxxx xxxx dcdc dcdc yyyy yyyy dcdc dcdc (dc - don't care)
1251
1252 // do the same for zw components
1253 Value* vi128ZW = nullptr;
1254 if(info.numComps > 2)
1255 {
1256 vi128ZW = BITCAST(PERMD(vShufResult, C<int32_t>({2, 6, 0, 0, 3, 7, 0, 0})), v128Ty);
1257 }
1258
1259 // sign extend all enabled components. If we have a fill vVertexElements, output to current simdvertex
1260 for(uint32_t i = 0; i < 4; i++)
1261 {
1262 uint32_t swizzleIndex = info.swizzle[i];
1263 // todo: fix for packed
1264 Value* vGatherMaskedVal = VIMMED1((int32_t)(info.defaults[i]));
1265 if(i >= info.numComps)
1266 {
1267 // set the default component val
1268 vGatherOutput[swizzleIndex] = vGatherMaskedVal;
1269 continue;
1270 }
1271
1272 // if x or z, extract 128bits from lane 0, else for y or w, extract from lane 1
1273 uint32_t lane = ((i == 0) || (i == 2)) ? 0 : 1;
1274 // if x or y, use vi128XY permute result, else use vi128ZW
1275 Value* selectedPermute = (i < 2) ? vi128XY : vi128ZW;
1276
1277 // sign extend
1278 vGatherOutput[swizzleIndex] = VEXTRACT(selectedPermute, C(lane));
1279 }
1280 }
1281 // else zero extend
1282 else{
1283 // shuffle enabled components into lower byte of each 32bit lane, 0 extending to 32 bits
1284 // apply defaults
1285 for (uint32_t i = 0; i < 4; ++i)
1286 {
1287 vGatherOutput[i] = VIMMED1((int32_t)info.defaults[i]);
1288 }
1289
1290 for(uint32_t i = 0; i < info.numComps; i++){
1291 uint32_t swizzleIndex = info.swizzle[i];
1292
1293 // pshufb masks for each component
1294 Value* vConstMask;
1295 switch(i)
1296 {
1297 case 0:
1298 // x shuffle mask
1299 vConstMask = C<char>({0, -1, -1, -1, 4, -1, -1, -1, 8, -1, -1, -1, 12, -1, -1, -1,
1300 0, -1, -1, -1, 4, -1, -1, -1, 8, -1, -1, -1, 12, -1, -1, -1});
1301 break;
1302 case 1:
1303 // y shuffle mask
1304 vConstMask = C<char>({1, -1, -1, -1, 5, -1, -1, -1, 9, -1, -1, -1, 13, -1, -1, -1,
1305 1, -1, -1, -1, 5, -1, -1, -1, 9, -1, -1, -1, 13, -1, -1, -1});
1306 break;
1307 case 2:
1308 // z shuffle mask
1309 vConstMask = C<char>({2, -1, -1, -1, 6, -1, -1, -1, 10, -1, -1, -1, 14, -1, -1, -1,
1310 2, -1, -1, -1, 6, -1, -1, -1, 10, -1, -1, -1, 14, -1, -1, -1});
1311 break;
1312 case 3:
1313 // w shuffle mask
1314 vConstMask = C<char>({3, -1, -1, -1, 7, -1, -1, -1, 11, -1, -1, -1, 15, -1, -1, -1,
1315 3, -1, -1, -1, 7, -1, -1, -1, 11, -1, -1, -1, 15, -1, -1, -1});
1316 break;
1317 default:
1318 vConstMask = nullptr;
1319 break;
1320 }
1321
1322 vGatherOutput[swizzleIndex] = BITCAST(PSHUFB(BITCAST(vGatherInput, v32x8Ty), vConstMask), vGatherTy);
1323 // after pshufb for x channel
1324 // 256i - 0 1 2 3 4 5 6 7
1325 // x000 x000 x000 x000 x000 x000 x000 x000
1326 }
1327 }
1328 }
1329
1330 // Helper function to create alloca in entry block of function
1331 Value* Builder::CreateEntryAlloca(Function* pFunc, Type* pType)
1332 {
1333 auto saveIP = IRB()->saveIP();
1334 IRB()->SetInsertPoint(&pFunc->getEntryBlock(),
1335 pFunc->getEntryBlock().begin());
1336 Value* pAlloca = ALLOCA(pType);
1337 IRB()->restoreIP(saveIP);
1338 return pAlloca;
1339 }
1340
1341 //////////////////////////////////////////////////////////////////////////
1342 /// @brief emulates a scatter operation.
1343 /// @param pDst - pointer to destination
1344 /// @param vSrc - vector of src data to scatter
1345 /// @param vOffsets - vector of byte offsets from pDst
1346 /// @param vMask - mask of valid lanes
1347 void Builder::SCATTERPS(Value* pDst, Value* vSrc, Value* vOffsets, Value* vMask)
1348 {
1349 /* Scatter algorithm
1350
1351 while(Index = BitScanForward(mask))
1352 srcElem = srcVector[Index]
1353 offsetElem = offsetVector[Index]
1354 *(pDst + offsetElem) = srcElem
1355 Update mask (&= ~(1<<Index)
1356
1357 */
1358
1359 BasicBlock* pCurBB = IRB()->GetInsertBlock();
1360 Function* pFunc = pCurBB->getParent();
1361 Type* pSrcTy = vSrc->getType()->getVectorElementType();
1362
1363 // Store vectors on stack
1364 if (pScatterStackSrc == nullptr)
1365 {
1366 // Save off stack allocations and reuse per scatter. Significantly reduces stack
1367 // requirements for shaders with a lot of scatters.
1368 pScatterStackSrc = CreateEntryAlloca(pFunc, mSimdInt64Ty);
1369 pScatterStackOffsets = CreateEntryAlloca(pFunc, mSimdInt32Ty);
1370 }
1371
1372 Value* pSrcArrayPtr = BITCAST(pScatterStackSrc, PointerType::get(vSrc->getType(), 0));
1373 Value* pOffsetsArrayPtr = pScatterStackOffsets;
1374 STORE(vSrc, pSrcArrayPtr);
1375 STORE(vOffsets, pOffsetsArrayPtr);
1376
1377 // Cast to pointers for random access
1378 pSrcArrayPtr = POINTER_CAST(pSrcArrayPtr, PointerType::get(pSrcTy, 0));
1379 pOffsetsArrayPtr = POINTER_CAST(pOffsetsArrayPtr, PointerType::get(mInt32Ty, 0));
1380
1381 Value* pMask = VMOVMSKPS(BITCAST(vMask, mSimdFP32Ty));
1382
1383 // Get cttz function
1384 Function* pfnCttz = Intrinsic::getDeclaration(mpJitMgr->mpCurrentModule, Intrinsic::cttz, { mInt32Ty });
1385
1386 // Setup loop basic block
1387 BasicBlock* pLoop = BasicBlock::Create(mpJitMgr->mContext, "Scatter Loop", pFunc);
1388
1389 // compute first set bit
1390 Value* pIndex = CALL(pfnCttz, { pMask, C(false) });
1391
1392 Value* pIsUndef = ICMP_EQ(pIndex, C(32));
1393
1394 // Split current block
1395 BasicBlock* pPostLoop = pCurBB->splitBasicBlock(cast<Instruction>(pIsUndef)->getNextNode());
1396
1397 // Remove unconditional jump created by splitBasicBlock
1398 pCurBB->getTerminator()->eraseFromParent();
1399
1400 // Add terminator to end of original block
1401 IRB()->SetInsertPoint(pCurBB);
1402
1403 // Add conditional branch
1404 COND_BR(pIsUndef, pPostLoop, pLoop);
1405
1406 // Add loop basic block contents
1407 IRB()->SetInsertPoint(pLoop);
1408 PHINode* pIndexPhi = PHI(mInt32Ty, 2);
1409 PHINode* pMaskPhi = PHI(mInt32Ty, 2);
1410
1411 pIndexPhi->addIncoming(pIndex, pCurBB);
1412 pMaskPhi->addIncoming(pMask, pCurBB);
1413
1414 // Extract elements for this index
1415 Value* pSrcElem = LOADV(pSrcArrayPtr, { pIndexPhi });
1416 Value* pOffsetElem = LOADV(pOffsetsArrayPtr, { pIndexPhi });
1417
1418 // GEP to this offset in dst
1419 Value* pCurDst = GEP(pDst, pOffsetElem);
1420 pCurDst = POINTER_CAST(pCurDst, PointerType::get(pSrcTy, 0));
1421 STORE(pSrcElem, pCurDst);
1422
1423 // Update the mask
1424 Value* pNewMask = AND(pMaskPhi, NOT(SHL(C(1), pIndexPhi)));
1425
1426 // Terminator
1427 Value* pNewIndex = CALL(pfnCttz, { pNewMask, C(false) });
1428
1429 pIsUndef = ICMP_EQ(pNewIndex, C(32));
1430 COND_BR(pIsUndef, pPostLoop, pLoop);
1431
1432 // Update phi edges
1433 pIndexPhi->addIncoming(pNewIndex, pLoop);
1434 pMaskPhi->addIncoming(pNewMask, pLoop);
1435
1436 // Move builder to beginning of post loop
1437 IRB()->SetInsertPoint(pPostLoop, pPostLoop->begin());
1438 }
1439
1440 Value* Builder::VABSPS(Value* a)
1441 {
1442 Value* asInt = BITCAST(a, mSimdInt32Ty);
1443 Value* result = BITCAST(AND(asInt, VIMMED1(0x7fffffff)), mSimdFP32Ty);
1444 return result;
1445 }
1446
1447 Value *Builder::ICLAMP(Value* src, Value* low, Value* high)
1448 {
1449 Value *lowCmp = ICMP_SLT(src, low);
1450 Value *ret = SELECT(lowCmp, low, src);
1451
1452 Value *highCmp = ICMP_SGT(ret, high);
1453 ret = SELECT(highCmp, high, ret);
1454
1455 return ret;
1456 }
1457
1458 Value *Builder::FCLAMP(Value* src, Value* low, Value* high)
1459 {
1460 Value *lowCmp = FCMP_OLT(src, low);
1461 Value *ret = SELECT(lowCmp, low, src);
1462
1463 Value *highCmp = FCMP_OGT(ret, high);
1464 ret = SELECT(highCmp, high, ret);
1465
1466 return ret;
1467 }
1468
1469 Value *Builder::FCLAMP(Value* src, float low, float high)
1470 {
1471 Value* result = VMAXPS(src, VIMMED1(low));
1472 result = VMINPS(result, VIMMED1(high));
1473
1474 return result;
1475 }
1476
1477 //////////////////////////////////////////////////////////////////////////
1478 /// @brief save/restore stack, providing ability to push/pop the stack and
1479 /// reduce overall stack requirements for temporary stack use
1480 Value* Builder::STACKSAVE()
1481 {
1482 Function* pfnStackSave = Intrinsic::getDeclaration(JM()->mpCurrentModule, Intrinsic::stacksave);
1483 #if HAVE_LLVM == 0x306
1484 return CALL(pfnStackSave);
1485 #else
1486 return CALLA(pfnStackSave);
1487 #endif
1488 }
1489
1490 void Builder::STACKRESTORE(Value* pSaved)
1491 {
1492 Function* pfnStackRestore = Intrinsic::getDeclaration(JM()->mpCurrentModule, Intrinsic::stackrestore);
1493 CALL(pfnStackRestore, std::initializer_list<Value*>{pSaved});
1494 }
1495
1496 Value *Builder::FMADDPS(Value* a, Value* b, Value* c)
1497 {
1498 Value* vOut;
1499 // use FMADs if available
1500 if(JM()->mArch.AVX2())
1501 {
1502 vOut = VFMADDPS(a, b, c);
1503 }
1504 else
1505 {
1506 vOut = FADD(FMUL(a, b), c);
1507 }
1508 return vOut;
1509 }
1510
1511 Value* Builder::POPCNT(Value* a)
1512 {
1513 Function* pCtPop = Intrinsic::getDeclaration(JM()->mpCurrentModule, Intrinsic::ctpop, { a->getType() });
1514 return CALL(pCtPop, std::initializer_list<Value*>{a});
1515 }
1516
1517 //////////////////////////////////////////////////////////////////////////
1518 /// @brief C functions called by LLVM IR
1519 //////////////////////////////////////////////////////////////////////////
1520
1521 //////////////////////////////////////////////////////////////////////////
1522 /// @brief called in JIT code, inserted by PRINT
1523 /// output to both stdout and visual studio debug console
1524 void __cdecl CallPrint(const char* fmt, ...)
1525 {
1526 va_list args;
1527 va_start(args, fmt);
1528 vprintf(fmt, args);
1529
1530 #if defined( _WIN32 )
1531 char strBuf[1024];
1532 vsnprintf_s(strBuf, _TRUNCATE, fmt, args);
1533 OutputDebugString(strBuf);
1534 #endif
1535
1536 va_end(args);
1537 }
1538
1539 Value *Builder::VEXTRACTI128(Value* a, Constant* imm8)
1540 {
1541 #if HAVE_LLVM == 0x306
1542 Function *func =
1543 Intrinsic::getDeclaration(JM()->mpCurrentModule,
1544 Intrinsic::x86_avx_vextractf128_si_256);
1545 return CALL(func, {a, imm8});
1546 #else
1547 bool flag = !imm8->isZeroValue();
1548 SmallVector<Constant*,8> idx;
1549 for (unsigned i = 0; i < mVWidth / 2; i++) {
1550 idx.push_back(C(flag ? i + mVWidth / 2 : i));
1551 }
1552 return VSHUFFLE(a, VUNDEF_I(), ConstantVector::get(idx));
1553 #endif
1554 }
1555
1556 Value *Builder::VINSERTI128(Value* a, Value* b, Constant* imm8)
1557 {
1558 #if HAVE_LLVM == 0x306
1559 Function *func =
1560 Intrinsic::getDeclaration(JM()->mpCurrentModule,
1561 Intrinsic::x86_avx_vinsertf128_si_256);
1562 return CALL(func, {a, b, imm8});
1563 #else
1564 bool flag = !imm8->isZeroValue();
1565 SmallVector<Constant*,8> idx;
1566 for (unsigned i = 0; i < mVWidth; i++) {
1567 idx.push_back(C(i));
1568 }
1569 Value *inter = VSHUFFLE(b, VUNDEF_I(), ConstantVector::get(idx));
1570
1571 SmallVector<Constant*,8> idx2;
1572 for (unsigned i = 0; i < mVWidth / 2; i++) {
1573 idx2.push_back(C(flag ? i : i + mVWidth));
1574 }
1575 for (unsigned i = mVWidth / 2; i < mVWidth; i++) {
1576 idx2.push_back(C(flag ? i + mVWidth / 2 : i));
1577 }
1578 return VSHUFFLE(a, inter, ConstantVector::get(idx2));
1579 #endif
1580 }
1581
1582 // rdtsc buckets macros
1583 void Builder::RDTSC_START(Value* pBucketMgr, Value* pId)
1584 {
1585 // @todo due to an issue with thread local storage propagation in llvm, we can only safely call into
1586 // buckets framework when single threaded
1587 if (KNOB_SINGLE_THREADED)
1588 {
1589 std::vector<Type*> args{
1590 PointerType::get(mInt32Ty, 0), // pBucketMgr
1591 mInt32Ty // id
1592 };
1593
1594 FunctionType* pFuncTy = FunctionType::get(Type::getVoidTy(JM()->mContext), args, false);
1595 Function* pFunc = cast<Function>(JM()->mpCurrentModule->getOrInsertFunction("BucketManager_StartBucket", pFuncTy));
1596 if (sys::DynamicLibrary::SearchForAddressOfSymbol("BucketManager_StartBucket") == nullptr)
1597 {
1598 sys::DynamicLibrary::AddSymbol("BucketManager_StartBucket", (void*)&BucketManager_StartBucket);
1599 }
1600
1601 CALL(pFunc, { pBucketMgr, pId });
1602 }
1603 }
1604
1605 void Builder::RDTSC_STOP(Value* pBucketMgr, Value* pId)
1606 {
1607 // @todo due to an issue with thread local storage propagation in llvm, we can only safely call into
1608 // buckets framework when single threaded
1609 if (KNOB_SINGLE_THREADED)
1610 {
1611 std::vector<Type*> args{
1612 PointerType::get(mInt32Ty, 0), // pBucketMgr
1613 mInt32Ty // id
1614 };
1615
1616 FunctionType* pFuncTy = FunctionType::get(Type::getVoidTy(JM()->mContext), args, false);
1617 Function* pFunc = cast<Function>(JM()->mpCurrentModule->getOrInsertFunction("BucketManager_StopBucket", pFuncTy));
1618 if (sys::DynamicLibrary::SearchForAddressOfSymbol("BucketManager_StopBucket") == nullptr)
1619 {
1620 sys::DynamicLibrary::AddSymbol("BucketManager_StopBucket", (void*)&BucketManager_StopBucket);
1621 }
1622
1623 CALL(pFunc, { pBucketMgr, pId });
1624 }
1625 }
1626
1627 }