1 /****************************************************************************
2 * Copyright (C) 2014-2015 Intel Corporation. All Rights Reserved.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
23 * @file builder_misc.cpp
25 * @brief Implementation for miscellaneous builder functions
29 ******************************************************************************/
30 #include "jit_pch.hpp"
37 void Builder::AssertMemoryUsageParams(Value
* ptr
, JIT_MEM_CLIENT usage
)
40 ptr
->getType() != mInt64Ty
,
41 "Address appears to be GFX access. Requires translation through BuilderGfxMem.");
44 Value
* Builder::GEP(Value
* Ptr
, Value
* Idx
, Type
* Ty
, const Twine
& Name
)
46 return IRB()->CreateGEP(Ptr
, Idx
, Name
);
49 Value
* Builder::GEP(Type
* Ty
, Value
* Ptr
, Value
* Idx
, const Twine
& Name
)
51 return IRB()->CreateGEP(Ty
, Ptr
, Idx
, Name
);
54 Value
* Builder::GEP(Value
* ptr
, const std::initializer_list
<Value
*>& indexList
, Type
* Ty
)
56 std::vector
<Value
*> indices
;
57 for (auto i
: indexList
)
59 return GEPA(ptr
, indices
);
62 Value
* Builder::GEP(Value
* ptr
, const std::initializer_list
<uint32_t>& indexList
, Type
* Ty
)
64 std::vector
<Value
*> indices
;
65 for (auto i
: indexList
)
66 indices
.push_back(C(i
));
67 return GEPA(ptr
, indices
);
70 Value
* Builder::GEPA(Value
* Ptr
, ArrayRef
<Value
*> IdxList
, const Twine
& Name
)
72 return IRB()->CreateGEP(Ptr
, IdxList
, Name
);
75 Value
* Builder::GEPA(Type
* Ty
, Value
* Ptr
, ArrayRef
<Value
*> IdxList
, const Twine
& Name
)
77 return IRB()->CreateGEP(Ty
, Ptr
, IdxList
, Name
);
80 Value
* Builder::IN_BOUNDS_GEP(Value
* ptr
, const std::initializer_list
<Value
*>& indexList
)
82 std::vector
<Value
*> indices
;
83 for (auto i
: indexList
)
85 return IN_BOUNDS_GEP(ptr
, indices
);
88 Value
* Builder::IN_BOUNDS_GEP(Value
* ptr
, const std::initializer_list
<uint32_t>& indexList
)
90 std::vector
<Value
*> indices
;
91 for (auto i
: indexList
)
92 indices
.push_back(C(i
));
93 return IN_BOUNDS_GEP(ptr
, indices
);
96 LoadInst
* Builder::LOAD(Value
* Ptr
, const char* Name
, Type
* Ty
, JIT_MEM_CLIENT usage
)
98 AssertMemoryUsageParams(Ptr
, usage
);
99 return IRB()->CreateLoad(Ptr
, Name
);
102 LoadInst
* Builder::LOAD(Value
* Ptr
, const Twine
& Name
, Type
* Ty
, JIT_MEM_CLIENT usage
)
104 AssertMemoryUsageParams(Ptr
, usage
);
105 return IRB()->CreateLoad(Ptr
, Name
);
108 LoadInst
* Builder::LOAD(Type
* Ty
, Value
* Ptr
, const Twine
& Name
, JIT_MEM_CLIENT usage
)
110 AssertMemoryUsageParams(Ptr
, usage
);
111 return IRB()->CreateLoad(Ty
, Ptr
, Name
);
115 Builder::LOAD(Value
* Ptr
, bool isVolatile
, const Twine
& Name
, Type
* Ty
, JIT_MEM_CLIENT usage
)
117 AssertMemoryUsageParams(Ptr
, usage
);
118 return IRB()->CreateLoad(Ptr
, isVolatile
, Name
);
121 LoadInst
* Builder::LOAD(Value
* basePtr
,
122 const std::initializer_list
<uint32_t>& indices
,
123 const llvm::Twine
& name
,
125 JIT_MEM_CLIENT usage
)
127 std::vector
<Value
*> valIndices
;
128 for (auto i
: indices
)
129 valIndices
.push_back(C(i
));
130 return Builder::LOAD(GEPA(basePtr
, valIndices
), name
);
133 LoadInst
* Builder::LOADV(Value
* basePtr
,
134 const std::initializer_list
<Value
*>& indices
,
135 const llvm::Twine
& name
)
137 std::vector
<Value
*> valIndices
;
138 for (auto i
: indices
)
139 valIndices
.push_back(i
);
140 return LOAD(GEPA(basePtr
, valIndices
), name
);
144 Builder::STORE(Value
* val
, Value
* basePtr
, const std::initializer_list
<uint32_t>& indices
, Type
* Ty
, JIT_MEM_CLIENT usage
)
146 std::vector
<Value
*> valIndices
;
147 for (auto i
: indices
)
148 valIndices
.push_back(C(i
));
149 return STORE(val
, GEPA(basePtr
, valIndices
));
153 Builder::STOREV(Value
* val
, Value
* basePtr
, const std::initializer_list
<Value
*>& indices
)
155 std::vector
<Value
*> valIndices
;
156 for (auto i
: indices
)
157 valIndices
.push_back(i
);
158 return STORE(val
, GEPA(basePtr
, valIndices
));
161 Value
* Builder::OFFSET_TO_NEXT_COMPONENT(Value
* base
, Constant
* offset
)
163 return GEP(base
, offset
);
166 Value
* Builder::MEM_ADD(Value
* i32Incr
,
168 const std::initializer_list
<uint32_t>& indices
,
169 const llvm::Twine
& name
)
171 Value
* i32Value
= LOAD(GEP(basePtr
, indices
), name
);
172 Value
* i32Result
= ADD(i32Value
, i32Incr
);
173 return STORE(i32Result
, GEP(basePtr
, indices
));
176 //////////////////////////////////////////////////////////////////////////
177 /// @brief Generate a masked gather operation in LLVM IR. If not
178 /// supported on the underlying platform, emulate it with loads
179 /// @param vSrc - SIMD wide value that will be loaded if mask is invalid
180 /// @param pBase - Int8* base VB address pointer value
181 /// @param vIndices - SIMD wide value of VB byte offsets
182 /// @param vMask - SIMD wide mask that controls whether to access memory or the src values
183 /// @param scale - value to scale indices by
184 Value
* Builder::GATHERPS(Value
* vSrc
,
189 JIT_MEM_CLIENT usage
)
191 AssertMemoryUsageParams(pBase
, usage
);
193 return VGATHERPS(vSrc
, pBase
, vIndices
, vMask
, C(scale
));
196 //////////////////////////////////////////////////////////////////////////
197 /// @brief Generate a masked gather operation in LLVM IR. If not
198 /// supported on the underlying platform, emulate it with loads
199 /// @param vSrc - SIMD wide value that will be loaded if mask is invalid
200 /// @param pBase - Int8* base VB address pointer value
201 /// @param vIndices - SIMD wide value of VB byte offsets
202 /// @param vMask - SIMD wide mask that controls whether to access memory or the src values
203 /// @param scale - value to scale indices by
204 Value
* Builder::GATHERDD(Value
* vSrc
,
209 JIT_MEM_CLIENT usage
)
211 AssertMemoryUsageParams(pBase
, usage
);
213 return VGATHERDD(vSrc
, pBase
, vIndices
, vMask
, C(scale
));
216 //////////////////////////////////////////////////////////////////////////
217 /// @brief Generate a masked gather operation in LLVM IR. If not
218 /// supported on the underlying platform, emulate it with loads
219 /// @param vSrc - SIMD wide value that will be loaded if mask is invalid
220 /// @param pBase - Int8* base VB address pointer value
221 /// @param vIndices - SIMD wide value of VB byte offsets
222 /// @param vMask - SIMD wide mask that controls whether to access memory or the src values
223 /// @param scale - value to scale indices by
225 Builder::GATHERPD(Value
* vSrc
, Value
* pBase
, Value
* vIndices
, Value
* vMask
, uint8_t scale
)
227 return VGATHERPD(vSrc
, pBase
, vIndices
, vMask
, C(scale
));
230 //////////////////////////////////////////////////////////////////////////
231 /// @brief Alternative masked gather where source is a vector of pointers
232 /// @param pVecSrcPtr - SIMD wide vector of pointers
233 /// @param pVecMask - SIMD active lanes
234 /// @param pVecPassthru - SIMD wide vector of values to load when lane is inactive
235 Value
* Builder::GATHER_PTR(Value
* pVecSrcPtr
, Value
* pVecMask
, Value
* pVecPassthru
)
237 return MASKED_GATHER(pVecSrcPtr
, 4, pVecMask
, pVecPassthru
);
240 void Builder::Gather4(const SWR_FORMAT format
,
244 Value
* vGatherComponents
[],
246 JIT_MEM_CLIENT usage
)
248 const SWR_FORMAT_INFO
& info
= GetFormatInfo(format
);
249 if (info
.type
[0] == SWR_TYPE_FLOAT
&& info
.bpc
[0] == 32)
251 GATHER4PS(info
, pSrcBase
, byteOffsets
, mask
, vGatherComponents
, bPackedOutput
, usage
);
255 GATHER4DD(info
, pSrcBase
, byteOffsets
, mask
, vGatherComponents
, bPackedOutput
, usage
);
259 void Builder::GATHER4PS(const SWR_FORMAT_INFO
& info
,
263 Value
* vGatherComponents
[],
265 JIT_MEM_CLIENT usage
)
267 switch (info
.bpp
/ info
.numComps
)
271 Value
* vGatherResult
[2];
273 // TODO: vGatherMaskedVal
274 Value
* vGatherMaskedVal
= VIMMED1((float)0);
276 // always have at least one component out of x or y to fetch
278 vGatherResult
[0] = GATHERPS(vGatherMaskedVal
, pSrcBase
, byteOffsets
, vMask
, 1, usage
);
279 // e.g. result of first 8x32bit integer gather for 16bit components
280 // 256i - 0 1 2 3 4 5 6 7
281 // xyxy xyxy xyxy xyxy xyxy xyxy xyxy xyxy
284 // if we have at least one component out of x or y to fetch
285 if (info
.numComps
> 2)
287 // offset base to the next components(zw) in the vertex to gather
288 pSrcBase
= OFFSET_TO_NEXT_COMPONENT(pSrcBase
, C((intptr_t)4));
291 GATHERPS(vGatherMaskedVal
, pSrcBase
, byteOffsets
, vMask
, 1, usage
);
292 // e.g. result of second 8x32bit integer gather for 16bit components
293 // 256i - 0 1 2 3 4 5 6 7
294 // zwzw zwzw zwzw zwzw zwzw zwzw zwzw zwzw
299 vGatherResult
[1] = vGatherMaskedVal
;
302 // Shuffle gathered components into place, each row is a component
303 Shuffle16bpcGather4(info
, vGatherResult
, vGatherComponents
, bPackedOutput
);
309 for (uint32_t i
= 0; i
< 4; ++i
)
311 vGatherComponents
[i
] = VIMMED1(*(float*)&info
.defaults
[i
]);
314 for (uint32_t i
= 0; i
< info
.numComps
; i
++)
316 uint32_t swizzleIndex
= info
.swizzle
[i
];
318 // Gather a SIMD of components
319 vGatherComponents
[swizzleIndex
] = GATHERPS(
320 vGatherComponents
[swizzleIndex
], pSrcBase
, byteOffsets
, vMask
, 1, usage
);
322 // offset base to the next component to gather
323 pSrcBase
= OFFSET_TO_NEXT_COMPONENT(pSrcBase
, C((intptr_t)4));
328 SWR_INVALID("Invalid float format");
333 void Builder::GATHER4DD(const SWR_FORMAT_INFO
& info
,
337 Value
* vGatherComponents
[],
339 JIT_MEM_CLIENT usage
)
341 switch (info
.bpp
/ info
.numComps
)
345 Value
* vGatherMaskedVal
= VIMMED1((int32_t)0);
346 Value
* vGatherResult
=
347 GATHERDD(vGatherMaskedVal
, pSrcBase
, byteOffsets
, vMask
, 1, usage
);
348 // e.g. result of an 8x32bit integer gather for 8bit components
349 // 256i - 0 1 2 3 4 5 6 7
350 // xyzw xyzw xyzw xyzw xyzw xyzw xyzw xyzw
352 Shuffle8bpcGather4(info
, vGatherResult
, vGatherComponents
, bPackedOutput
);
357 Value
* vGatherResult
[2];
359 // TODO: vGatherMaskedVal
360 Value
* vGatherMaskedVal
= VIMMED1((int32_t)0);
362 // always have at least one component out of x or y to fetch
364 vGatherResult
[0] = GATHERDD(vGatherMaskedVal
, pSrcBase
, byteOffsets
, vMask
, 1, usage
);
365 // e.g. result of first 8x32bit integer gather for 16bit components
366 // 256i - 0 1 2 3 4 5 6 7
367 // xyxy xyxy xyxy xyxy xyxy xyxy xyxy xyxy
370 // if we have at least one component out of x or y to fetch
371 if (info
.numComps
> 2)
373 // offset base to the next components(zw) in the vertex to gather
374 pSrcBase
= OFFSET_TO_NEXT_COMPONENT(pSrcBase
, C((intptr_t)4));
377 GATHERDD(vGatherMaskedVal
, pSrcBase
, byteOffsets
, vMask
, 1, usage
);
378 // e.g. result of second 8x32bit integer gather for 16bit components
379 // 256i - 0 1 2 3 4 5 6 7
380 // zwzw zwzw zwzw zwzw zwzw zwzw zwzw zwzw
385 vGatherResult
[1] = vGatherMaskedVal
;
388 // Shuffle gathered components into place, each row is a component
389 Shuffle16bpcGather4(info
, vGatherResult
, vGatherComponents
, bPackedOutput
);
395 for (uint32_t i
= 0; i
< 4; ++i
)
397 vGatherComponents
[i
] = VIMMED1((int)info
.defaults
[i
]);
400 for (uint32_t i
= 0; i
< info
.numComps
; i
++)
402 uint32_t swizzleIndex
= info
.swizzle
[i
];
404 // Gather a SIMD of components
405 vGatherComponents
[swizzleIndex
] = GATHERDD(
406 vGatherComponents
[swizzleIndex
], pSrcBase
, byteOffsets
, vMask
, 1, usage
);
408 // offset base to the next component to gather
409 pSrcBase
= OFFSET_TO_NEXT_COMPONENT(pSrcBase
, C((intptr_t)4));
414 SWR_INVALID("unsupported format");
419 void Builder::Shuffle16bpcGather4(const SWR_FORMAT_INFO
& info
,
420 Value
* vGatherInput
[2],
421 Value
* vGatherOutput
[4],
425 Type
* vGatherTy
= VectorType::get(IntegerType::getInt32Ty(JM()->mContext
), mVWidth
);
426 Type
* v32x8Ty
= VectorType::get(mInt8Ty
, mVWidth
* 4); // vwidth is units of 32 bits
428 // input could either be float or int vector; do shuffle work in int
429 vGatherInput
[0] = BITCAST(vGatherInput
[0], mSimdInt32Ty
);
430 vGatherInput
[1] = BITCAST(vGatherInput
[1], mSimdInt32Ty
);
434 Type
* v128bitTy
= VectorType::get(IntegerType::getIntNTy(JM()->mContext
, 128),
435 mVWidth
/ 4); // vwidth is units of 32 bits
438 Value
* vConstMask
= C
<char>({0, 1, 4, 5, 8, 9, 12, 13, 2, 3, 6, 7, 10, 11, 14, 15,
439 0, 1, 4, 5, 8, 9, 12, 13, 2, 3, 6, 7, 10, 11, 14, 15});
441 BITCAST(PSHUFB(BITCAST(vGatherInput
[0], v32x8Ty
), vConstMask
), vGatherTy
);
442 // after pshufb: group components together in each 128bit lane
443 // 256i - 0 1 2 3 4 5 6 7
444 // xxxx xxxx yyyy yyyy xxxx xxxx yyyy yyyy
447 BITCAST(VPERMD(vShufResult
, C
<int32_t>({0, 1, 4, 5, 2, 3, 6, 7})), v128bitTy
);
448 // after PERMD: move and pack xy components into each 128bit lane
449 // 256i - 0 1 2 3 4 5 6 7
450 // xxxx xxxx xxxx xxxx yyyy yyyy yyyy yyyy
452 // do the same for zw components
453 Value
* vi128ZW
= nullptr;
454 if (info
.numComps
> 2)
457 BITCAST(PSHUFB(BITCAST(vGatherInput
[1], v32x8Ty
), vConstMask
), vGatherTy
);
459 BITCAST(VPERMD(vShufResult
, C
<int32_t>({0, 1, 4, 5, 2, 3, 6, 7})), v128bitTy
);
462 for (uint32_t i
= 0; i
< 4; i
++)
464 uint32_t swizzleIndex
= info
.swizzle
[i
];
465 // todo: fixed for packed
466 Value
* vGatherMaskedVal
= VIMMED1((int32_t)(info
.defaults
[i
]));
467 if (i
>= info
.numComps
)
469 // set the default component val
470 vGatherOutput
[swizzleIndex
] = vGatherMaskedVal
;
474 // if x or z, extract 128bits from lane 0, else for y or w, extract from lane 1
475 uint32_t lane
= ((i
== 0) || (i
== 2)) ? 0 : 1;
476 // if x or y, use vi128XY permute result, else use vi128ZW
477 Value
* selectedPermute
= (i
< 2) ? vi128XY
: vi128ZW
;
479 // extract packed component 128 bit lanes
480 vGatherOutput
[swizzleIndex
] = VEXTRACT(selectedPermute
, C(lane
));
485 // pshufb masks for each component
486 Value
* vConstMask
[2];
488 vConstMask
[0] = C
<char>({
489 0, 1, -1, -1, 4, 5, -1, -1, 8, 9, -1, -1, 12, 13, -1, -1,
490 0, 1, -1, -1, 4, 5, -1, -1, 8, 9, -1, -1, 12, 13, -1, -1,
494 vConstMask
[1] = C
<char>({2, 3, -1, -1, 6, 7, -1, -1, 10, 11, -1, -1, 14, 15, -1, -1,
495 2, 3, -1, -1, 6, 7, -1, -1, 10, 11, -1, -1, 14, 15, -1, -1});
497 // shuffle enabled components into lower word of each 32bit lane, 0 extending to 32 bits
499 for (uint32_t i
= 0; i
< 4; ++i
)
501 vGatherOutput
[i
] = VIMMED1((int32_t)info
.defaults
[i
]);
504 for (uint32_t i
= 0; i
< info
.numComps
; i
++)
506 uint32_t swizzleIndex
= info
.swizzle
[i
];
508 // select correct constMask for x/z or y/w pshufb
509 uint32_t selectedMask
= ((i
== 0) || (i
== 2)) ? 0 : 1;
510 // if x or y, use vi128XY permute result, else use vi128ZW
511 uint32_t selectedGather
= (i
< 2) ? 0 : 1;
513 vGatherOutput
[swizzleIndex
] =
514 BITCAST(PSHUFB(BITCAST(vGatherInput
[selectedGather
], v32x8Ty
),
515 vConstMask
[selectedMask
]),
517 // after pshufb mask for x channel; z uses the same shuffle from the second gather
518 // 256i - 0 1 2 3 4 5 6 7
519 // xx00 xx00 xx00 xx00 xx00 xx00 xx00 xx00
524 void Builder::Shuffle8bpcGather4(const SWR_FORMAT_INFO
& info
,
526 Value
* vGatherOutput
[],
530 Type
* vGatherTy
= VectorType::get(IntegerType::getInt32Ty(JM()->mContext
), mVWidth
);
531 Type
* v32x8Ty
= VectorType::get(mInt8Ty
, mVWidth
* 4); // vwidth is units of 32 bits
535 Type
* v128Ty
= VectorType::get(IntegerType::getIntNTy(JM()->mContext
, 128),
536 mVWidth
/ 4); // vwidth is units of 32 bits
538 Value
* vConstMask
= C
<char>({0, 4, 8, 12, 1, 5, 9, 13, 2, 6, 10, 14, 3, 7, 11, 15,
539 0, 4, 8, 12, 1, 5, 9, 13, 2, 6, 10, 14, 3, 7, 11, 15});
541 BITCAST(PSHUFB(BITCAST(vGatherInput
, v32x8Ty
), vConstMask
), vGatherTy
);
542 // after pshufb: group components together in each 128bit lane
543 // 256i - 0 1 2 3 4 5 6 7
544 // xxxx yyyy zzzz wwww xxxx yyyy zzzz wwww
547 BITCAST(VPERMD(vShufResult
, C
<int32_t>({0, 4, 0, 0, 1, 5, 0, 0})), v128Ty
);
548 // after PERMD: move and pack xy and zw components in low 64 bits of each 128bit lane
549 // 256i - 0 1 2 3 4 5 6 7
550 // xxxx xxxx dcdc dcdc yyyy yyyy dcdc dcdc (dc - don't care)
552 // do the same for zw components
553 Value
* vi128ZW
= nullptr;
554 if (info
.numComps
> 2)
557 BITCAST(VPERMD(vShufResult
, C
<int32_t>({2, 6, 0, 0, 3, 7, 0, 0})), v128Ty
);
560 // sign extend all enabled components. If we have a fill vVertexElements, output to
561 // current simdvertex
562 for (uint32_t i
= 0; i
< 4; i
++)
564 uint32_t swizzleIndex
= info
.swizzle
[i
];
565 // todo: fix for packed
566 Value
* vGatherMaskedVal
= VIMMED1((int32_t)(info
.defaults
[i
]));
567 if (i
>= info
.numComps
)
569 // set the default component val
570 vGatherOutput
[swizzleIndex
] = vGatherMaskedVal
;
574 // if x or z, extract 128bits from lane 0, else for y or w, extract from lane 1
575 uint32_t lane
= ((i
== 0) || (i
== 2)) ? 0 : 1;
576 // if x or y, use vi128XY permute result, else use vi128ZW
577 Value
* selectedPermute
= (i
< 2) ? vi128XY
: vi128ZW
;
580 vGatherOutput
[swizzleIndex
] = VEXTRACT(selectedPermute
, C(lane
));
586 // shuffle enabled components into lower byte of each 32bit lane, 0 extending to 32 bits
588 for (uint32_t i
= 0; i
< 4; ++i
)
590 vGatherOutput
[i
] = VIMMED1((int32_t)info
.defaults
[i
]);
593 for (uint32_t i
= 0; i
< info
.numComps
; i
++)
595 uint32_t swizzleIndex
= info
.swizzle
[i
];
597 // pshufb masks for each component
604 C
<char>({0, -1, -1, -1, 4, -1, -1, -1, 8, -1, -1, -1, 12, -1, -1, -1,
605 0, -1, -1, -1, 4, -1, -1, -1, 8, -1, -1, -1, 12, -1, -1, -1});
610 C
<char>({1, -1, -1, -1, 5, -1, -1, -1, 9, -1, -1, -1, 13, -1, -1, -1,
611 1, -1, -1, -1, 5, -1, -1, -1, 9, -1, -1, -1, 13, -1, -1, -1});
616 C
<char>({2, -1, -1, -1, 6, -1, -1, -1, 10, -1, -1, -1, 14, -1, -1, -1,
617 2, -1, -1, -1, 6, -1, -1, -1, 10, -1, -1, -1, 14, -1, -1, -1});
622 C
<char>({3, -1, -1, -1, 7, -1, -1, -1, 11, -1, -1, -1, 15, -1, -1, -1,
623 3, -1, -1, -1, 7, -1, -1, -1, 11, -1, -1, -1, 15, -1, -1, -1});
626 vConstMask
= nullptr;
630 vGatherOutput
[swizzleIndex
] =
631 BITCAST(PSHUFB(BITCAST(vGatherInput
, v32x8Ty
), vConstMask
), vGatherTy
);
632 // after pshufb for x channel
633 // 256i - 0 1 2 3 4 5 6 7
634 // x000 x000 x000 x000 x000 x000 x000 x000
639 //////////////////////////////////////////////////////////////////////////
640 /// @brief emulates a scatter operation.
641 /// @param pDst - pointer to destination
642 /// @param vSrc - vector of src data to scatter
643 /// @param vOffsets - vector of byte offsets from pDst
644 /// @param vMask - mask of valid lanes
645 void Builder::SCATTERPS(
646 Value
* pDst
, Value
* vSrc
, Value
* vOffsets
, Value
* vMask
, JIT_MEM_CLIENT usage
)
648 AssertMemoryUsageParams(pDst
, usage
);
650 SWR_ASSERT(vSrc
->getType()->getVectorElementType()->isFloatTy());
651 VSCATTERPS(pDst
, vMask
, vOffsets
, vSrc
, C(1));
656 while(Index = BitScanForward(mask))
657 srcElem = srcVector[Index]
658 offsetElem = offsetVector[Index]
659 *(pDst + offsetElem) = srcElem
660 Update mask (&= ~(1<<Index)
666 // Reference implementation kept around for reference
668 BasicBlock* pCurBB = IRB()->GetInsertBlock();
669 Function* pFunc = pCurBB->getParent();
670 Type* pSrcTy = vSrc->getType()->getVectorElementType();
672 // Store vectors on stack
673 if (pScatterStackSrc == nullptr)
675 // Save off stack allocations and reuse per scatter. Significantly reduces stack
676 // requirements for shaders with a lot of scatters.
677 pScatterStackSrc = CreateEntryAlloca(pFunc, mSimdInt64Ty);
678 pScatterStackOffsets = CreateEntryAlloca(pFunc, mSimdInt32Ty);
681 Value* pSrcArrayPtr = BITCAST(pScatterStackSrc, PointerType::get(vSrc->getType(), 0));
682 Value* pOffsetsArrayPtr = pScatterStackOffsets;
683 STORE(vSrc, pSrcArrayPtr);
684 STORE(vOffsets, pOffsetsArrayPtr);
686 // Cast to pointers for random access
687 pSrcArrayPtr = POINTER_CAST(pSrcArrayPtr, PointerType::get(pSrcTy, 0));
688 pOffsetsArrayPtr = POINTER_CAST(pOffsetsArrayPtr, PointerType::get(mInt32Ty, 0));
690 Value* pMask = VMOVMSK(vMask);
692 // Setup loop basic block
693 BasicBlock* pLoop = BasicBlock::Create(mpJitMgr->mContext, "Scatter_Loop", pFunc);
695 // compute first set bit
696 Value* pIndex = CTTZ(pMask, C(false));
698 Value* pIsUndef = ICMP_EQ(pIndex, C(32));
700 // Split current block or create new one if building inline
701 BasicBlock* pPostLoop;
702 if (pCurBB->getTerminator())
704 pPostLoop = pCurBB->splitBasicBlock(cast<Instruction>(pIsUndef)->getNextNode());
706 // Remove unconditional jump created by splitBasicBlock
707 pCurBB->getTerminator()->eraseFromParent();
709 // Add terminator to end of original block
710 IRB()->SetInsertPoint(pCurBB);
712 // Add conditional branch
713 COND_BR(pIsUndef, pPostLoop, pLoop);
717 pPostLoop = BasicBlock::Create(mpJitMgr->mContext, "PostScatter_Loop", pFunc);
719 // Add conditional branch
720 COND_BR(pIsUndef, pPostLoop, pLoop);
723 // Add loop basic block contents
724 IRB()->SetInsertPoint(pLoop);
725 PHINode* pIndexPhi = PHI(mInt32Ty, 2);
726 PHINode* pMaskPhi = PHI(mInt32Ty, 2);
728 pIndexPhi->addIncoming(pIndex, pCurBB);
729 pMaskPhi->addIncoming(pMask, pCurBB);
731 // Extract elements for this index
732 Value* pSrcElem = LOADV(pSrcArrayPtr, {pIndexPhi});
733 Value* pOffsetElem = LOADV(pOffsetsArrayPtr, {pIndexPhi});
735 // GEP to this offset in dst
736 Value* pCurDst = GEP(pDst, pOffsetElem, mInt8PtrTy);
737 pCurDst = POINTER_CAST(pCurDst, PointerType::get(pSrcTy, 0));
738 STORE(pSrcElem, pCurDst);
741 Value* pNewMask = AND(pMaskPhi, NOT(SHL(C(1), pIndexPhi)));
744 Value* pNewIndex = CTTZ(pNewMask, C(false));
746 pIsUndef = ICMP_EQ(pNewIndex, C(32));
747 COND_BR(pIsUndef, pPostLoop, pLoop);
750 pIndexPhi->addIncoming(pNewIndex, pLoop);
751 pMaskPhi->addIncoming(pNewMask, pLoop);
753 // Move builder to beginning of post loop
754 IRB()->SetInsertPoint(pPostLoop, pPostLoop->begin());
758 } // namespace SwrJit