1 /****************************************************************************
2 * Copyright (C) 2014-2015 Intel Corporation. All Rights Reserved.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 * @brief Implementation of the blend jitter
29 ******************************************************************************/
31 #include "blend_jit.h"
33 #include "state_llvm.h"
34 #include "common/containers.hpp"
35 #include "llvm/IR/DataLayout.h"
39 // components with bit-widths <= the QUANTIZE_THRESHOLD will be quantized
40 #define QUANTIZE_THRESHOLD 2
42 //////////////////////////////////////////////////////////////////////////
43 /// Interface to Jitting a blend shader
44 //////////////////////////////////////////////////////////////////////////
45 struct BlendJit
: public Builder
47 BlendJit(JitManager
* pJitMgr
) : Builder(pJitMgr
){};
49 template<bool Color
, bool Alpha
>
50 void GenerateBlendFactor(SWR_BLEND_FACTOR factor
, Value
* constColor
[4], Value
* src
[4], Value
* src1
[4], Value
* dst
[4], Value
* result
[4])
57 out
[0] = out
[1] = out
[2] = out
[3] = VIMMED1(1.0f
);
59 case BLENDFACTOR_SRC_COLOR
:
65 case BLENDFACTOR_SRC_ALPHA
:
66 out
[0] = out
[1] = out
[2] = out
[3] = src
[3];
68 case BLENDFACTOR_DST_ALPHA
:
69 out
[0] = out
[1] = out
[2] = out
[3] = dst
[3];
71 case BLENDFACTOR_DST_COLOR
:
77 case BLENDFACTOR_SRC_ALPHA_SATURATE
:
78 out
[0] = out
[1] = out
[2] = VMINPS(src
[3], FSUB(VIMMED1(1.0f
), dst
[3]));
79 out
[3] = VIMMED1(1.0f
);
81 case BLENDFACTOR_CONST_COLOR
:
82 out
[0] = constColor
[0];
83 out
[1] = constColor
[1];
84 out
[2] = constColor
[2];
85 out
[3] = constColor
[3];
87 case BLENDFACTOR_CONST_ALPHA
:
88 out
[0] = out
[1] = out
[2] = out
[3] = constColor
[3];
90 case BLENDFACTOR_SRC1_COLOR
:
96 case BLENDFACTOR_SRC1_ALPHA
:
97 out
[0] = out
[1] = out
[2] = out
[3] = src1
[3];
99 case BLENDFACTOR_ZERO
:
100 out
[0] = out
[1] = out
[2] = out
[3] = VIMMED1(0.0f
);
102 case BLENDFACTOR_INV_SRC_COLOR
:
103 out
[0] = FSUB(VIMMED1(1.0f
), src
[0]);
104 out
[1] = FSUB(VIMMED1(1.0f
), src
[1]);
105 out
[2] = FSUB(VIMMED1(1.0f
), src
[2]);
106 out
[3] = FSUB(VIMMED1(1.0f
), src
[3]);
108 case BLENDFACTOR_INV_SRC_ALPHA
:
109 out
[0] = out
[1] = out
[2] = out
[3] = FSUB(VIMMED1(1.0f
), src
[3]);
111 case BLENDFACTOR_INV_DST_ALPHA
:
112 out
[0] = out
[1] = out
[2] = out
[3] = FSUB(VIMMED1(1.0f
), dst
[3]);
114 case BLENDFACTOR_INV_DST_COLOR
:
115 out
[0] = FSUB(VIMMED1(1.0f
), dst
[0]);
116 out
[1] = FSUB(VIMMED1(1.0f
), dst
[1]);
117 out
[2] = FSUB(VIMMED1(1.0f
), dst
[2]);
118 out
[3] = FSUB(VIMMED1(1.0f
), dst
[3]);
120 case BLENDFACTOR_INV_CONST_COLOR
:
121 out
[0] = FSUB(VIMMED1(1.0f
), constColor
[0]);
122 out
[1] = FSUB(VIMMED1(1.0f
), constColor
[1]);
123 out
[2] = FSUB(VIMMED1(1.0f
), constColor
[2]);
124 out
[3] = FSUB(VIMMED1(1.0f
), constColor
[3]);
126 case BLENDFACTOR_INV_CONST_ALPHA
:
127 out
[0] = out
[1] = out
[2] = out
[3] = FSUB(VIMMED1(1.0f
), constColor
[3]);
129 case BLENDFACTOR_INV_SRC1_COLOR
:
130 out
[0] = FSUB(VIMMED1(1.0f
), src1
[0]);
131 out
[1] = FSUB(VIMMED1(1.0f
), src1
[1]);
132 out
[2] = FSUB(VIMMED1(1.0f
), src1
[2]);
133 out
[3] = FSUB(VIMMED1(1.0f
), src1
[3]);
135 case BLENDFACTOR_INV_SRC1_ALPHA
:
136 out
[0] = out
[1] = out
[2] = out
[3] = FSUB(VIMMED1(1.0f
), src1
[3]);
139 SWR_ASSERT(false, "Unsupported blend factor: %d", factor
);
140 out
[0] = out
[1] = out
[2] = out
[3] = VIMMED1(0.0f
);
157 void Clamp(SWR_FORMAT format
, Value
* src
[4])
159 const SWR_FORMAT_INFO
& info
= GetFormatInfo(format
);
160 SWR_TYPE type
= info
.type
[0];
168 src
[0] = VMINPS(VMAXPS(src
[0], VIMMED1(0.0f
)), VIMMED1(1.0f
));
169 src
[1] = VMINPS(VMAXPS(src
[1], VIMMED1(0.0f
)), VIMMED1(1.0f
));
170 src
[2] = VMINPS(VMAXPS(src
[2], VIMMED1(0.0f
)), VIMMED1(1.0f
));
171 src
[3] = VMINPS(VMAXPS(src
[3], VIMMED1(0.0f
)), VIMMED1(1.0f
));
175 src
[0] = VMINPS(VMAXPS(src
[0], VIMMED1(-1.0f
)), VIMMED1(1.0f
));
176 src
[1] = VMINPS(VMAXPS(src
[1], VIMMED1(-1.0f
)), VIMMED1(1.0f
));
177 src
[2] = VMINPS(VMAXPS(src
[2], VIMMED1(-1.0f
)), VIMMED1(1.0f
));
178 src
[3] = VMINPS(VMAXPS(src
[3], VIMMED1(-1.0f
)), VIMMED1(1.0f
));
181 default: SWR_ASSERT(false, "Unsupport format type: %d", type
);
185 void ApplyDefaults(SWR_FORMAT format
, Value
* src
[4])
187 const SWR_FORMAT_INFO
& info
= GetFormatInfo(format
);
189 bool valid
[] = { false, false, false, false };
190 for (uint32_t c
= 0; c
< info
.numComps
; ++c
)
192 valid
[info
.swizzle
[c
]] = true;
195 for (uint32_t c
= 0; c
< 4; ++c
)
199 src
[c
] = BITCAST(VIMMED1((int)info
.defaults
[c
]), mSimdFP32Ty
);
204 void ApplyUnusedDefaults(SWR_FORMAT format
, Value
* src
[4])
206 const SWR_FORMAT_INFO
& info
= GetFormatInfo(format
);
208 for (uint32_t c
= 0; c
< info
.numComps
; ++c
)
210 if (info
.type
[c
] == SWR_TYPE_UNUSED
)
212 src
[info
.swizzle
[c
]] = BITCAST(VIMMED1((int)info
.defaults
[info
.swizzle
[c
]]), mSimdFP32Ty
);
217 void Quantize(SWR_FORMAT format
, Value
* src
[4])
219 const SWR_FORMAT_INFO
& info
= GetFormatInfo(format
);
220 for (uint32_t c
= 0; c
< info
.numComps
; ++c
)
222 if (info
.bpc
[c
] <= QUANTIZE_THRESHOLD
)
224 uint32_t swizComp
= info
.swizzle
[c
];
225 float factor
= (float)((1 << info
.bpc
[c
]) - 1);
226 switch (info
.type
[c
])
229 src
[swizComp
] = FADD(FMUL(src
[swizComp
], VIMMED1(factor
)), VIMMED1(0.5f
));
230 src
[swizComp
] = VROUND(src
[swizComp
], C(_MM_FROUND_TO_ZERO
));
231 src
[swizComp
] = FMUL(src
[swizComp
], VIMMED1(1.0f
/factor
));
233 default: SWR_ASSERT(false, "Unsupported format type: %d", info
.type
[c
]);
239 template<bool Color
, bool Alpha
>
240 void BlendFunc(SWR_BLEND_OP blendOp
, Value
* src
[4], Value
* srcFactor
[4], Value
* dst
[4], Value
* dstFactor
[4], Value
* result
[4])
245 for (uint32_t i
= 0; i
< 4; ++i
)
247 srcBlend
[i
] = FMUL(src
[i
], srcFactor
[i
]);
248 dstBlend
[i
] = FMUL(dst
[i
], dstFactor
[i
]);
254 out
[0] = FADD(srcBlend
[0], dstBlend
[0]);
255 out
[1] = FADD(srcBlend
[1], dstBlend
[1]);
256 out
[2] = FADD(srcBlend
[2], dstBlend
[2]);
257 out
[3] = FADD(srcBlend
[3], dstBlend
[3]);
260 case BLENDOP_SUBTRACT
:
261 out
[0] = FSUB(srcBlend
[0], dstBlend
[0]);
262 out
[1] = FSUB(srcBlend
[1], dstBlend
[1]);
263 out
[2] = FSUB(srcBlend
[2], dstBlend
[2]);
264 out
[3] = FSUB(srcBlend
[3], dstBlend
[3]);
267 case BLENDOP_REVSUBTRACT
:
268 out
[0] = FSUB(dstBlend
[0], srcBlend
[0]);
269 out
[1] = FSUB(dstBlend
[1], srcBlend
[1]);
270 out
[2] = FSUB(dstBlend
[2], srcBlend
[2]);
271 out
[3] = FSUB(dstBlend
[3], srcBlend
[3]);
275 out
[0] = VMINPS(src
[0], dst
[0]);
276 out
[1] = VMINPS(src
[1], dst
[1]);
277 out
[2] = VMINPS(src
[2], dst
[2]);
278 out
[3] = VMINPS(src
[3], dst
[3]);
282 out
[0] = VMAXPS(src
[0], dst
[0]);
283 out
[1] = VMAXPS(src
[1], dst
[1]);
284 out
[2] = VMAXPS(src
[2], dst
[2]);
285 out
[3] = VMAXPS(src
[3], dst
[3]);
289 SWR_ASSERT(false, "Unsupported blend operation: %d", blendOp
);
290 out
[0] = out
[1] = out
[2] = out
[3] = VIMMED1(0.0f
);
307 void LogicOpFunc(SWR_LOGIC_OP logicOp
, Value
* src
[4], Value
* dst
[4], Value
* result
[4])
309 // Op: (s == PS output, d = RT contents)
313 result
[0] = VIMMED1(0);
314 result
[1] = VIMMED1(0);
315 result
[2] = VIMMED1(0);
316 result
[3] = VIMMED1(0);
321 result
[0] = XOR(OR(src
[0], dst
[0]), VIMMED1(0xFFFFFFFF));
322 result
[1] = XOR(OR(src
[1], dst
[1]), VIMMED1(0xFFFFFFFF));
323 result
[2] = XOR(OR(src
[2], dst
[2]), VIMMED1(0xFFFFFFFF));
324 result
[3] = XOR(OR(src
[3], dst
[3]), VIMMED1(0xFFFFFFFF));
327 case LOGICOP_AND_INVERTED
:
329 // todo: use avx andnot instr when I can find the intrinsic to call
330 result
[0] = AND(XOR(src
[0], VIMMED1(0xFFFFFFFF)), dst
[0]);
331 result
[1] = AND(XOR(src
[1], VIMMED1(0xFFFFFFFF)), dst
[1]);
332 result
[2] = AND(XOR(src
[2], VIMMED1(0xFFFFFFFF)), dst
[2]);
333 result
[3] = AND(XOR(src
[3], VIMMED1(0xFFFFFFFF)), dst
[3]);
336 case LOGICOP_COPY_INVERTED
:
338 result
[0] = XOR(src
[0], VIMMED1(0xFFFFFFFF));
339 result
[1] = XOR(src
[1], VIMMED1(0xFFFFFFFF));
340 result
[2] = XOR(src
[2], VIMMED1(0xFFFFFFFF));
341 result
[3] = XOR(src
[3], VIMMED1(0xFFFFFFFF));
344 case LOGICOP_AND_REVERSE
:
346 // todo: use avx andnot instr when I can find the intrinsic to call
347 result
[0] = AND(XOR(dst
[0], VIMMED1(0xFFFFFFFF)), src
[0]);
348 result
[1] = AND(XOR(dst
[1], VIMMED1(0xFFFFFFFF)), src
[1]);
349 result
[2] = AND(XOR(dst
[2], VIMMED1(0xFFFFFFFF)), src
[2]);
350 result
[3] = AND(XOR(dst
[3], VIMMED1(0xFFFFFFFF)), src
[3]);
355 result
[0] = XOR(dst
[0], VIMMED1(0xFFFFFFFF));
356 result
[1] = XOR(dst
[1], VIMMED1(0xFFFFFFFF));
357 result
[2] = XOR(dst
[2], VIMMED1(0xFFFFFFFF));
358 result
[3] = XOR(dst
[3], VIMMED1(0xFFFFFFFF));
363 result
[0] = XOR(src
[0], dst
[0]);
364 result
[1] = XOR(src
[1], dst
[1]);
365 result
[2] = XOR(src
[2], dst
[2]);
366 result
[3] = XOR(src
[3], dst
[3]);
371 result
[0] = XOR(AND(src
[0], dst
[0]), VIMMED1(0xFFFFFFFF));
372 result
[1] = XOR(AND(src
[1], dst
[1]), VIMMED1(0xFFFFFFFF));
373 result
[2] = XOR(AND(src
[2], dst
[2]), VIMMED1(0xFFFFFFFF));
374 result
[3] = XOR(AND(src
[3], dst
[3]), VIMMED1(0xFFFFFFFF));
379 result
[0] = AND(src
[0], dst
[0]);
380 result
[1] = AND(src
[1], dst
[1]);
381 result
[2] = AND(src
[2], dst
[2]);
382 result
[3] = AND(src
[3], dst
[3]);
387 result
[0] = XOR(XOR(src
[0], dst
[0]), VIMMED1(0xFFFFFFFF));
388 result
[1] = XOR(XOR(src
[1], dst
[1]), VIMMED1(0xFFFFFFFF));
389 result
[2] = XOR(XOR(src
[2], dst
[2]), VIMMED1(0xFFFFFFFF));
390 result
[3] = XOR(XOR(src
[3], dst
[3]), VIMMED1(0xFFFFFFFF));
400 case LOGICOP_OR_INVERTED
:
402 result
[0] = OR(XOR(src
[0], VIMMED1(0xFFFFFFFF)), dst
[0]);
403 result
[1] = OR(XOR(src
[1], VIMMED1(0xFFFFFFFF)), dst
[1]);
404 result
[2] = OR(XOR(src
[2], VIMMED1(0xFFFFFFFF)), dst
[2]);
405 result
[3] = OR(XOR(src
[3], VIMMED1(0xFFFFFFFF)), dst
[3]);
415 case LOGICOP_OR_REVERSE
:
417 result
[0] = OR(XOR(dst
[0], VIMMED1(0xFFFFFFFF)), src
[0]);
418 result
[1] = OR(XOR(dst
[1], VIMMED1(0xFFFFFFFF)), src
[1]);
419 result
[2] = OR(XOR(dst
[2], VIMMED1(0xFFFFFFFF)), src
[2]);
420 result
[3] = OR(XOR(dst
[3], VIMMED1(0xFFFFFFFF)), src
[3]);
425 result
[0] = OR(src
[0], dst
[0]);
426 result
[1] = OR(src
[1], dst
[1]);
427 result
[2] = OR(src
[2], dst
[2]);
428 result
[3] = OR(src
[3], dst
[3]);
432 result
[0] = VIMMED1(0xFFFFFFFF);
433 result
[1] = VIMMED1(0xFFFFFFFF);
434 result
[2] = VIMMED1(0xFFFFFFFF);
435 result
[3] = VIMMED1(0xFFFFFFFF);
439 SWR_ASSERT(false, "Unsupported logic operation: %d", logicOp
);
440 result
[0] = result
[1] = result
[2] = result
[3] = VIMMED1(0.0f
);
445 void AlphaTest(const BLEND_COMPILE_STATE
& state
, Value
* pBlendState
, Value
* pAlpha
, Value
* ppMask
)
447 // load uint32_t reference
448 Value
* pRef
= VBROADCAST(LOAD(pBlendState
, { 0, SWR_BLEND_STATE_alphaTestReference
}));
450 Value
* pTest
= nullptr;
451 if (state
.alphaTestFormat
== ALPHA_TEST_UNORM8
)
453 // convert float alpha to unorm8
454 Value
* pAlphaU8
= FMUL(pAlpha
, VIMMED1(256.0f
));
455 pAlphaU8
= FP_TO_UI(pAlphaU8
, mSimdInt32Ty
);
458 switch (state
.alphaTestFunction
)
460 case ZFUNC_ALWAYS
: pTest
= VIMMED1(true); break;
461 case ZFUNC_NEVER
: pTest
= VIMMED1(false); break;
462 case ZFUNC_LT
: pTest
= ICMP_ULT(pAlphaU8
, pRef
); break;
463 case ZFUNC_EQ
: pTest
= ICMP_EQ(pAlphaU8
, pRef
); break;
464 case ZFUNC_LE
: pTest
= ICMP_ULE(pAlphaU8
, pRef
); break;
465 case ZFUNC_GT
: pTest
= ICMP_UGT(pAlphaU8
, pRef
); break;
466 case ZFUNC_NE
: pTest
= ICMP_NE(pAlphaU8
, pRef
); break;
467 case ZFUNC_GE
: pTest
= ICMP_UGE(pAlphaU8
, pRef
); break;
469 SWR_ASSERT(false, "Invalid alpha test function");
476 pRef
= BITCAST(pRef
, mSimdFP32Ty
);
479 switch (state
.alphaTestFunction
)
481 case ZFUNC_ALWAYS
: pTest
= VIMMED1(true); break;
482 case ZFUNC_NEVER
: pTest
= VIMMED1(false); break;
483 case ZFUNC_LT
: pTest
= FCMP_OLT(pAlpha
, pRef
); break;
484 case ZFUNC_EQ
: pTest
= FCMP_OEQ(pAlpha
, pRef
); break;
485 case ZFUNC_LE
: pTest
= FCMP_OLE(pAlpha
, pRef
); break;
486 case ZFUNC_GT
: pTest
= FCMP_OGT(pAlpha
, pRef
); break;
487 case ZFUNC_NE
: pTest
= FCMP_ONE(pAlpha
, pRef
); break;
488 case ZFUNC_GE
: pTest
= FCMP_OGE(pAlpha
, pRef
); break;
490 SWR_ASSERT(false, "Invalid alpha test function");
496 Value
* pMask
= LOAD(ppMask
);
498 // convert to int1 mask
501 // and with alpha test result
502 pMask
= AND(pMask
, pTest
);
504 // convert back to vector mask
505 pMask
= VMASK(pMask
);
508 STORE(pMask
, ppMask
);
511 Function
* Create(const BLEND_COMPILE_STATE
& state
)
513 static std::size_t jitNum
= 0;
515 std::stringstream
fnName("BlendShader", std::ios_base::in
| std::ios_base::out
| std::ios_base::ate
);
518 // blend function signature
519 //typedef void(*PFN_BLEND_JIT_FUNC)(const SWR_BLEND_STATE*, simdvector&, simdvector&, uint32_t, BYTE*, simdvector&, simdscalari*, simdscalari*);
521 std::vector
<Type
*> args
{
522 PointerType::get(Gen_SWR_BLEND_STATE(JM()), 0), // SWR_BLEND_STATE*
523 PointerType::get(mSimdFP32Ty
, 0), // simdvector& src
524 PointerType::get(mSimdFP32Ty
, 0), // simdvector& src1
525 Type::getInt32Ty(JM()->mContext
), // sampleNum
526 PointerType::get(mSimdFP32Ty
, 0), // uint8_t* pDst
527 PointerType::get(mSimdFP32Ty
, 0), // simdvector& result
528 PointerType::get(mSimdInt32Ty
, 0), // simdscalari* oMask
529 PointerType::get(mSimdInt32Ty
, 0), // simdscalari* pMask
532 FunctionType
* fTy
= FunctionType::get(IRB()->getVoidTy(), args
, false);
533 Function
* blendFunc
= Function::Create(fTy
, GlobalValue::ExternalLinkage
, fnName
.str(), JM()->mpCurrentModule
);
535 BasicBlock
* entry
= BasicBlock::Create(JM()->mContext
, "entry", blendFunc
);
537 IRB()->SetInsertPoint(entry
);
540 auto argitr
= blendFunc
->getArgumentList().begin();
541 Value
* pBlendState
= &*argitr
++;
542 pBlendState
->setName("pBlendState");
543 Value
* pSrc
= &*argitr
++;
544 pSrc
->setName("src");
545 Value
* pSrc1
= &*argitr
++;
546 pSrc1
->setName("src1");
547 Value
* sampleNum
= &*argitr
++;
548 sampleNum
->setName("sampleNum");
549 Value
* pDst
= &*argitr
++;
550 pDst
->setName("pDst");
551 Value
* pResult
= &*argitr
++;
552 pResult
->setName("result");
553 Value
* ppoMask
= &*argitr
++;
554 ppoMask
->setName("ppoMask");
555 Value
* ppMask
= &*argitr
++;
556 ppMask
->setName("pMask");
558 static_assert(KNOB_COLOR_HOT_TILE_FORMAT
== R32G32B32A32_FLOAT
, "Unsupported hot tile format");
560 Value
* constantColor
[4];
564 for (uint32_t i
= 0; i
< 4; ++i
)
567 dst
[i
] = LOAD(pDst
, { i
});
569 // load constant color
570 constantColor
[i
] = VBROADCAST(LOAD(pBlendState
, { 0, SWR_BLEND_STATE_constantColor
, i
}));
573 src
[i
] = LOAD(pSrc
, { i
});
576 src1
[i
] = LOAD(pSrc1
, { i
});
578 Value
* currentMask
= VIMMED1(-1);
579 if(state
.desc
.alphaToCoverageEnable
)
581 currentMask
= FP_TO_SI(FMUL(src
[3], VBROADCAST(C((float)state
.desc
.numSamples
))), mSimdInt32Ty
);
585 if (state
.desc
.alphaTestEnable
)
587 AlphaTest(state
, pBlendState
, src
[3], ppMask
);
591 if (state
.blendState
.blendEnable
)
594 Clamp(state
.format
, src
);
595 Clamp(state
.format
, src1
);
596 Clamp(state
.format
, dst
);
597 Clamp(state
.format
, constantColor
);
599 // apply defaults to hottile contents to take into account missing components
600 ApplyDefaults(state
.format
, dst
);
602 // Force defaults for unused 'X' components
603 ApplyUnusedDefaults(state
.format
, dst
);
605 // Quantize low precision components
606 Quantize(state
.format
, dst
);
608 // special case clamping for R11G11B10_float which has no sign bit
609 if (state
.format
== R11G11B10_FLOAT
)
611 dst
[0] = VMAXPS(dst
[0], VIMMED1(0.0f
));
612 dst
[1] = VMAXPS(dst
[1], VIMMED1(0.0f
));
613 dst
[2] = VMAXPS(dst
[2], VIMMED1(0.0f
));
614 dst
[3] = VMAXPS(dst
[3], VIMMED1(0.0f
));
619 if (state
.desc
.independentAlphaBlendEnable
)
621 GenerateBlendFactor
<true, false>(state
.blendState
.sourceBlendFactor
, constantColor
, src
, src1
, dst
, srcFactor
);
622 GenerateBlendFactor
<false, true>(state
.blendState
.sourceAlphaBlendFactor
, constantColor
, src
, src1
, dst
, srcFactor
);
624 GenerateBlendFactor
<true, false>(state
.blendState
.destBlendFactor
, constantColor
, src
, src1
, dst
, dstFactor
);
625 GenerateBlendFactor
<false, true>(state
.blendState
.destAlphaBlendFactor
, constantColor
, src
, src1
, dst
, dstFactor
);
627 BlendFunc
<true, false>(state
.blendState
.colorBlendFunc
, src
, srcFactor
, dst
, dstFactor
, result
);
628 BlendFunc
<false, true>(state
.blendState
.alphaBlendFunc
, src
, srcFactor
, dst
, dstFactor
, result
);
632 GenerateBlendFactor
<true, true>(state
.blendState
.sourceBlendFactor
, constantColor
, src
, src1
, dst
, srcFactor
);
633 GenerateBlendFactor
<true, true>(state
.blendState
.destBlendFactor
, constantColor
, src
, src1
, dst
, dstFactor
);
635 BlendFunc
<true, true>(state
.blendState
.colorBlendFunc
, src
, srcFactor
, dst
, dstFactor
, result
);
639 for (uint32_t i
= 0; i
< 4; ++i
)
641 STORE(result
[i
], pResult
, { i
});
645 if(state
.blendState
.logicOpEnable
)
647 const SWR_FORMAT_INFO
& info
= GetFormatInfo(state
.format
);
648 SWR_ASSERT(info
.type
[0] == SWR_TYPE_UINT
);
650 for(uint32_t i
= 0; i
< 4; i
++)
654 case 0: vMask
[i
] = VIMMED1(0x00000000); break;
655 case 2: vMask
[i
] = VIMMED1(0x00000003); break;
656 case 5: vMask
[i
] = VIMMED1(0x0000001F); break;
657 case 6: vMask
[i
] = VIMMED1(0x0000003F); break;
658 case 8: vMask
[i
] = VIMMED1(0x000000FF); break;
659 case 10: vMask
[i
] = VIMMED1(0x000003FF); break;
660 case 11: vMask
[i
] = VIMMED1(0x000007FF); break;
661 case 16: vMask
[i
] = VIMMED1(0x0000FFFF); break;
662 case 24: vMask
[i
] = VIMMED1(0x00FFFFFF); break;
663 case 32: vMask
[i
] = VIMMED1(0xFFFFFFFF); break;
665 vMask
[i
] = VIMMED1(0x0);
666 SWR_ASSERT(0, "Unsupported bpc for logic op\n");
669 src
[i
] = BITCAST(src
[i
], mSimdInt32Ty
);//, vMask[i]);
670 dst
[i
] = BITCAST(dst
[i
], mSimdInt32Ty
);
673 LogicOpFunc(state
.blendState
.logicOpFunc
, src
, dst
, result
);
676 for(uint32_t i
= 0; i
< 4; ++i
)
678 // clear upper bits from PS output not in RT format after doing logic op
679 result
[i
] = AND(result
[i
], vMask
[i
]);
681 STORE(BITCAST(result
[i
], mSimdFP32Ty
), pResult
, {i
});
685 if(state
.desc
.oMaskEnable
)
687 assert(!(state
.desc
.alphaToCoverageEnable
));
689 Value
* oMask
= LOAD(ppoMask
);
690 Value
* sampleMasked
= VBROADCAST(SHL(C(1), sampleNum
));
691 oMask
= AND(oMask
, sampleMasked
);
692 currentMask
= AND(oMask
, currentMask
);
695 if(state
.desc
.sampleMaskEnable
)
697 Value
* sampleMask
= LOAD(pBlendState
, { 0, SWR_BLEND_STATE_sampleMask
});
698 Value
* sampleMasked
= SHL(C(1), sampleNum
);
699 sampleMask
= AND(sampleMask
, sampleMasked
);
700 sampleMask
= VBROADCAST(ICMP_SGT(sampleMask
, C(0)));
701 sampleMask
= S_EXT(sampleMask
, mSimdInt32Ty
);
702 currentMask
= AND(sampleMask
, currentMask
);
705 if(state
.desc
.sampleMaskEnable
|| state
.desc
.alphaToCoverageEnable
||
706 state
.desc
.oMaskEnable
)
709 Value
* pMask
= LOAD(ppMask
);
710 currentMask
= S_EXT(ICMP_SGT(currentMask
, VBROADCAST(C(0))), mSimdInt32Ty
);
711 Value
* outputMask
= AND(pMask
, currentMask
);
713 STORE(outputMask
, GEP(ppMask
, C(0)));
718 JitManager::DumpToFile(blendFunc
, "");
720 FunctionPassManager
passes(JM()->mpCurrentModule
);
721 passes
.add(createBreakCriticalEdgesPass());
722 passes
.add(createCFGSimplificationPass());
723 passes
.add(createEarlyCSEPass());
724 passes
.add(createPromoteMemoryToRegisterPass());
725 passes
.add(createCFGSimplificationPass());
726 passes
.add(createEarlyCSEPass());
727 passes
.add(createInstructionCombiningPass());
728 passes
.add(createInstructionSimplifierPass());
729 passes
.add(createConstantPropagationPass());
730 passes
.add(createSCCPPass());
731 passes
.add(createAggressiveDCEPass());
733 passes
.run(*blendFunc
);
735 JitManager::DumpToFile(blendFunc
, "optimized");
741 //////////////////////////////////////////////////////////////////////////
742 /// @brief JITs from fetch shader IR
743 /// @param hJitMgr - JitManager handle
744 /// @param func - LLVM function IR
745 /// @return PFN_FETCH_FUNC - pointer to fetch code
746 PFN_BLEND_JIT_FUNC
JitBlendFunc(HANDLE hJitMgr
, const HANDLE hFunc
)
748 const llvm::Function
*func
= (const llvm::Function
*)hFunc
;
749 JitManager
* pJitMgr
= reinterpret_cast<JitManager
*>(hJitMgr
);
750 PFN_BLEND_JIT_FUNC pfnBlend
;
751 pfnBlend
= (PFN_BLEND_JIT_FUNC
)(pJitMgr
->mpExec
->getFunctionAddress(func
->getName().str()));
752 // MCJIT finalizes modules the first time you JIT code from them. After finalized, you cannot add new IR to the module
753 pJitMgr
->mIsModuleFinalized
= true;
758 //////////////////////////////////////////////////////////////////////////
759 /// @brief JIT compiles blend shader
760 /// @param hJitMgr - JitManager handle
761 /// @param state - blend state to build function from
762 extern "C" PFN_BLEND_JIT_FUNC JITCALL
JitCompileBlend(HANDLE hJitMgr
, const BLEND_COMPILE_STATE
& state
)
764 JitManager
* pJitMgr
= reinterpret_cast<JitManager
*>(hJitMgr
);
766 pJitMgr
->SetupNewModule();
768 BlendJit
theJit(pJitMgr
);
769 HANDLE hFunc
= theJit
.Create(state
);
771 return JitBlendFunc(hJitMgr
, hFunc
);