*/
lp_native_vector_width = 128;
}
-
+
lp_native_vector_width = debug_get_num_option("LP_NATIVE_VECTOR_WIDTH",
lp_native_vector_width);
}
}
+ assert(gallivm != NULL);
return gallivm;
}
BUCKET_DESC& desc = mBuckets[bucket.id];
// construct hierarchy visualization
+ std::string str = arrows[level];
+ str += desc.name;
char hier[80];
- strcpy(hier, arrows[level]);
- strcat(hier, desc.name.c_str());
+ strcpy_s(hier, sizeof(hier), str.c_str());
// print out
fprintf(f,
{
{
FILE* f = fopen(filename.c_str(), "w");
+ assert(f);
mThreadMutex.lock();
for (const BUCKET_THREAD& thread : mThreads)
#define SWR_ASSERT(e, ...) _SWR_ASSERT(true, e, ##__VA_ARGS__)
#define SWR_ASSUME_ASSERT(e, ...) SWR_ASSERT(e, ##__VA_ARGS__)
#define SWR_TRACE(_fmtstr, ...) _SWR_TRACE(_fmtstr, ##__VA_ARGS__)
-
-#if defined(assert)
-#undef assert
-#endif
-#define assert(exp) SWR_ASSERT(exp)
-
#endif // SWR_ENABLE_ASSERTS
#if SWR_ENABLE_REL_ASSERTS
{
SWR_CONTEXT* pContext = GetContext(hContext);
auto pSrc = GetDrawState(pContext);
- SWR_ASSERT(pOutputStateBlock && memSize >= sizeof(*pSrc));
+ assert(pOutputStateBlock && memSize >= sizeof(*pSrc));
memcpy(pOutputStateBlock, pSrc, sizeof(*pSrc));
}
{
SWR_CONTEXT* pContext = GetContext(hContext);
auto pDst = GetDrawState(pContext);
- SWR_ASSERT(pStateBlock && memSize >= sizeof(*pDst));
+ assert(pStateBlock && memSize >= sizeof(*pDst));
memcpy(pDst, pStateBlock, sizeof(*pDst));
}
if (pBlock)
{
- SWR_ASSUME_ASSERT(pPrevBlock && pPrevBlock->pNext == pBlock);
+ assert(pPrevBlock && pPrevBlock->pNext == pBlock);
pPrevBlock->pNext = pBlock->pNext;
pBlock->pNext = nullptr;
default:
SWR_INVALID("Invalid Tessellation Domain: %d", Domain);
+ assert(false);
}
NumDomainPoints = (uint32_t)SUPER::GetPointCount();
CPUNumaNodes nodes;
uint32_t numThreadsPerProcGroup = 0;
CalculateProcessorTopology(nodes, numThreadsPerProcGroup);
+ assert(numThreadsPerProcGroup > 0);
// Assumption, for asymmetric topologies, multi-threaded cores will appear
// in the list before single-threaded cores. This appears to be true for
pContext->NumWorkerThreads = pPool->numThreads;
pPool->pThreadData = new (std::nothrow) THREAD_DATA[pPool->numThreads];
- SWR_ASSERT(pPool->pThreadData);
+ assert(pPool->pThreadData);
memset(pPool->pThreadData, 0, sizeof(THREAD_DATA) * pPool->numThreads);
pPool->numaMask = 0;
pContext->workerPrivateState.pfnInitWorkerData = nullptr;
pContext->workerPrivateState.pfnFinishWorkerData = nullptr;
}
-
+
// initialize contents of SWR_WORKER_DATA
size_t perWorkerSize =
AlignUpPow2(pContext->workerPrivateState.perWorkerPrivateStateSize, 64);
}
pPool->pThreads = new (std::nothrow) THREAD_PTR[pPool->numThreads];
- SWR_ASSERT(pPool->pThreads);
+ assert(pPool->pThreads);
if (pContext->threadInfo.MAX_WORKER_THREADS)
{
if (numRemovedThreads)
{
--numRemovedThreads;
- SWR_REL_ASSERT(numReservedThreads);
+ assert(numReservedThreads);
--numReservedThreads;
pPool->pApiThreadData[numReservedThreads].workerId = 0xFFFFFFFFU;
pPool->pApiThreadData[numReservedThreads].procGroupId = core.procGroup;
break;
}
+ assert(vConstMask && "Invalid info.numComps value");
vGatherOutput[swizzleIndex] =
BITCAST(PSHUFB(BITCAST(vGatherInput, v32x8Ty), vConstMask), vGatherTy);
// after pshufb for x channel
// if (vSrc->getType() != mSimdFP32Ty)
// {
// vSrc = BITCAST(vSrc, mSimdFP32Ty);
-// }
+// }
SWR_ASSERT(vSrc->getType()->getVectorElementType()->isFloatTy());
VSCATTERPS(pDst, vMask, vOffsets, vSrc, C(1));
return;
else
{
Constant* cB = dyn_cast<Constant>(b);
+ assert(cB != nullptr);
// number of 8 bit elements in b
uint32_t numElms = cast<VectorType>(cB->getType())->getNumElements();
// output vector
: vIndices = GetSimdValid32bitIndices(indices, pLastIndex);
break; // incoming type is already 32bit int
default:
- SWR_INVALID("Unsupported index type");
vIndices = nullptr;
+ assert(false && "Unsupported index type");
break;
}
conversionFactor = VIMMED1((float)(1.0));
break;
case CONVERT_USCALED:
- SWR_INVALID("Type should not be sign extended!");
+ assert(false && "Type should not be sign extended!");
conversionFactor = nullptr;
break;
default:
- SWR_ASSERT(conversionType == CONVERT_NONE);
+ assert(conversionType == CONVERT_NONE);
conversionFactor = nullptr;
break;
}
conversionFactor = VIMMED1((float)(1.0));
break;
case CONVERT_SSCALED:
- SWR_INVALID("Type should not be zero extended!");
+ assert(false && "Type should not be zero extended!");
conversionFactor = nullptr;
break;
default:
- SWR_ASSERT(conversionType == CONVERT_NONE);
+ assert(conversionType == CONVERT_NONE);
conversionFactor = nullptr;
break;
}
3, -1, -1, -1, 7, -1, -1, -1, 11, -1, -1, -1, 15, -1, -1, -1});
break;
default:
+ assert(false && "Invalid component");
vConstMask = nullptr;
break;
}
conversionFactor = VIMMED1((float)(1.0));
break;
case CONVERT_USCALED:
- SWR_INVALID("Type should not be sign extended!");
+ assert(false && "Type should not be sign extended!");
conversionFactor = nullptr;
break;
default:
- SWR_ASSERT(conversionType == CONVERT_NONE);
+ assert(conversionType == CONVERT_NONE);
conversionFactor = nullptr;
break;
}
TargetWidth* pWidth,
Type** pTy)
{
+ assert(pCallInst);
Type* pVecTy = pCallInst->getType();
// Check for intrinsic specific types
// VCVTPD2PS type comes from src, not dst
if (intrinName.equals("meta.intrinsic.VCVTPD2PS"))
{
- pVecTy = pCallInst->getOperand(0)->getType();
+ Value* pOp = pCallInst->getOperand(0);
+ assert(pOp);
+ pVecTy = pOp->getType();
}
if (!pVecTy->isVectorTy())
Instruction* ProcessIntrinsicAdvanced(CallInst* pCallInst)
{
- Function* pFunc = pCallInst->getCalledFunction();
+ Function* pFunc = pCallInst->getCalledFunction();
+ assert(pFunc);
+
auto& intrinsic = intrinsicMap2[mTarget][pFunc->getName()];
TargetWidth vecWidth;
Type* pElemTy;
Instruction* ProcessIntrinsic(CallInst* pCallInst)
{
Function* pFunc = pCallInst->getCalledFunction();
+ assert(pFunc);
// Forward to the advanced support if found
if (intrinsicMap2[mTarget].find(pFunc->getName()) != intrinsicMap2[mTarget].end())
auto B = pThis->B;
auto vf32Src = pCallInst->getOperand(0);
+ assert(vf32Src);
auto i8Round = pCallInst->getOperand(1);
+ assert(i8Round);
auto pfnFunc =
Intrinsic::getDeclaration(B->JM()->mpCurrentModule, Intrinsic::x86_avx_round_ps_256);
assert(index < MAX_SO_STREAMS);
pq = (struct swr_query *) AlignedMalloc(sizeof(struct swr_query), 64);
- memset(pq, 0, sizeof(*pq));
if (pq) {
+ memset(pq, 0, sizeof(*pq));
pq->type = type;
pq->index = index;
}
struct lp_build_sampler_soa *sampler =
swr_sampler_soa_create(key.sampler, PIPE_SHADER_GEOMETRY);
+ assert(sampler != nullptr);
struct lp_bld_tgsi_system_values system_values;
memset(&system_values, 0, sizeof(system_values));
ubyte semantic_idx = info->input_semantic_index[slot];
unsigned vs_slot = locate_linkage(semantic_name, semantic_idx, &ctx->vs->info.base);
+ assert(vs_slot < PIPE_MAX_SHADER_OUTPUTS);
vs_slot += VERTEX_ATTRIB_START_SLOT;
struct lp_build_sampler_soa *sampler =
swr_sampler_soa_create(key.sampler, PIPE_SHADER_TESS_EVAL);
+ assert(sampler != nullptr);
struct lp_bld_tgsi_system_values system_values;
memset(&system_values, 0, sizeof(system_values));
// Where in TCS output is my attribute?
// TESS_TODO: revisit after implement pass-through TCS
unsigned tcs_slot = locate_linkage(semantic_name, semantic_idx, pPrevShader);
+ assert(tcs_slot < PIPE_MAX_SHADER_OUTPUTS);
// Skip tessellation levels - these go to the tessellator, not TES
switch (semantic_name) {
struct lp_build_sampler_soa *sampler =
swr_sampler_soa_create(key.sampler, PIPE_SHADER_TESS_CTRL);
+ assert(sampler != nullptr);
struct lp_bld_tgsi_system_values system_values;
memset(&system_values, 0, sizeof(system_values));
unsigned vs_slot =
locate_linkage(semantic_name, semantic_idx, &ctx->vs->info.base);
+ assert(vs_slot < PIPE_MAX_SHADER_OUTPUTS);
vs_slot += VERTEX_ATTRIB_START_SLOT;
struct lp_build_sampler_soa *sampler =
swr_sampler_soa_create(key.sampler, PIPE_SHADER_VERTEX);
+ assert(sampler != nullptr);
struct lp_bld_tgsi_system_values system_values;
memset(&system_values, 0, sizeof(system_values));
}
}
}
+ assert(cv < PIPE_MAX_SHADER_OUTPUTS);
LLVMValueRef cx = LLVMBuildLoad(gallivm->builder, outputs[cv][0], "");
LLVMValueRef cy = LLVMBuildLoad(gallivm->builder, outputs[cv][1], "");
LLVMValueRef cz = LLVMBuildLoad(gallivm->builder, outputs[cv][2], "");
if ((pLastFE->clipdist_writemask & clip_mask & (1 << val)) ||
((pLastFE->culldist_writemask << pLastFE->num_written_clipdistance) & (1 << val))) {
unsigned cv = locate_linkage(TGSI_SEMANTIC_CLIPDIST, val < 4 ? 0 : 1, pLastFE);
+ assert(cv < PIPE_MAX_SHADER_OUTPUTS);
if (val < 4) {
LLVMValueRef dist = LLVMBuildLoad(gallivm->builder, outputs[cv][val], "");
WriteVS(unwrap(dist), pVsCtx, vtxOutput, VERTEX_CLIPCULL_DIST_LO_SLOT, val);
linkedAttrib = pPrevShader->num_outputs + extraAttribs - 1;
swr_fs->pointSpriteMask |= (1 << linkedAttrib);
extraAttribs++;
- } else if (linkedAttrib == 0xFFFFFFFF) {
+ } else if (linkedAttrib + 1 == 0xFFFFFFFF) {
inputs[attrib][0] = wrap(VIMMED1(0.0f));
inputs[attrib][1] = wrap(VIMMED1(0.0f));
inputs[attrib][2] = wrap(VIMMED1(0.0f));
Value *offset = NULL;
if (semantic_name == TGSI_SEMANTIC_COLOR && key.light_twoside) {
bcolorAttrib = locate_linkage(
- TGSI_SEMANTIC_BCOLOR, semantic_idx, pPrevShader) - 1;
+ TGSI_SEMANTIC_BCOLOR, semantic_idx, pPrevShader);
/* Neither front nor back colors were available. Nothing to load. */
if (bcolorAttrib == 0xFFFFFFFF && linkedAttrib == 0xFFFFFFFF)
continue;
/* If there is no front color, just always use the back color. */
- if (linkedAttrib == 0xFFFFFFFF)
+ if (linkedAttrib + 1 == 0xFFFFFFFF)
linkedAttrib = bcolorAttrib;
if (bcolorAttrib != 0xFFFFFFFF) {
+ bcolorAttrib -= 1;
if (interpMode == TGSI_INTERPOLATE_CONSTANT) {
swr_fs->constantMask |= 1 << bcolorAttrib;
} else if (interpMode == TGSI_INTERPOLATE_COLOR) {
}
sampler = swr_sampler_soa_create(key.sampler, PIPE_SHADER_FRAGMENT);
+ assert(sampler != nullptr);
struct lp_bld_tgsi_system_values system_values;
memset(&system_values, 0, sizeof(system_values));
const struct pipe_blend_state *blend)
{
struct swr_blend_state *state = CALLOC_STRUCT(swr_blend_state);
+ assert(state != nullptr);
memcpy(&state->pipe, blend, sizeof(*blend));