From b39c530f88454addd507ea1ddc167597d3f333ae Mon Sep 17 00:00:00 2001 From: Tim Rowley Date: Mon, 2 May 2016 11:37:37 -0600 Subject: [PATCH] swr: [rasterizer] Add SWR_ASSUME / SWR_ASSUME_ASSERT macros Fix static code analysis errors found by coverity on Linux Reviewed-by: Bruce Cherniak --- .../swr/rasterizer/common/swr_assert.h | 50 +++++++++++++++++-- .../drivers/swr/rasterizer/core/arena.h | 16 +++--- 2 files changed, 52 insertions(+), 14 deletions(-) diff --git a/src/gallium/drivers/swr/rasterizer/common/swr_assert.h b/src/gallium/drivers/swr/rasterizer/common/swr_assert.h index a805acc777c..04d768191b4 100644 --- a/src/gallium/drivers/swr/rasterizer/common/swr_assert.h +++ b/src/gallium/drivers/swr/rasterizer/common/swr_assert.h @@ -28,6 +28,41 @@ #error swr_assert.h should not be included directly, please include "common/os.h" instead. #endif +//============================================================================= +// +// MACROS defined in this file: +// +// - SWR_ASSUME(expression, ...): Tell compiler that the expression is true. +// Helps with static code analysis as well. +// DO NOT USE if code after this dynamically +// checks for errors and handles them. The +// compiler may optimize out the error check. +// +// - SWR_ASSERT(expression, ...): Inform the user is expression is false. +// This check is only conditionally made, +// usually only in debug mode. +// +// - SWR_REL_ASSERT(expression, ...): Unconditionally enabled version of SWR_ASSERT +// +// - SWR_ASSUME_ASSERT(expression, ...): Conditionally enabled SWR_ASSERT. Uses +// SWR_ASSUME if SWR_ASSERT is disabled. +// DO NOT USE in combination with actual +// error checking (see SWR_ASSUME) +// +// - SWR_REL_ASSUME_ASSERT(expression, ...): Same as SWR_REL_ASSERT. +// +//============================================================================= + +#if defined(_WIN32) +#define SWR_ASSUME(e, ...) __assume(e) +#elif defined(__clang__) +#define SWR_ASSUME(e, ...) __builtin_assume(e) +#elif defined(__GNUC__) +#define SWR_ASSUME(e, ...) ((e) ? ((void)0) : __builtin_unreachable()) +#else +#define SWR_ASSUME(e, ...) ASSUME(e) +#endif + #if !defined(SWR_ENABLE_ASSERTS) #if !defined(NDEBUG) @@ -79,28 +114,33 @@ bool SwrAssert( } #if SWR_ENABLE_ASSERTS -#define SWR_ASSERT(e, ...) _SWR_ASSERT(true, e, ##__VA_ARGS__) +#define SWR_ASSERT(e, ...) _SWR_ASSERT(true, e, ##__VA_ARGS__) +#define SWR_ASSUME_ASSERT(e, ...) SWR_ASSERT(e, ##__VA_ARGS__) #if defined(assert) #undef assert #endif #define assert(exp) SWR_ASSERT(exp) -#endif +#endif // SWR_ENABLE_ASSERTS #if SWR_ENABLE_REL_ASSERTS -#define SWR_REL_ASSERT(e, ...) _SWR_ASSERT(false, e, ##__VA_ARGS__) +#define SWR_REL_ASSERT(e, ...) _SWR_ASSERT(false, e, ##__VA_ARGS__) +#define SWR_REL_ASSUME_ASSERT(e, ...) SWR_REL_ASSERT(e, ##__VA_ARGS__) #endif + #endif // C++ #endif // SWR_ENABLE_ASSERTS || SWR_ENABLE_REL_ASSERTS #if !SWR_ENABLE_ASSERTS -#define SWR_ASSERT(e, ...) (void)(0) +#define SWR_ASSERT(e, ...) (void)(0) +#define SWR_ASSUME_ASSERT(e, ...) SWR_ASSUME(e, ##__VA_ARGS__) #endif #if !SWR_ENABLE_REL_ASSERTS -#define SWR_REL_ASSERT(e, ...) (void)(0) +#define SWR_REL_ASSERT(e, ...) (void)(0) +#define SWR_REL_ASSUME_ASSERT(e, ...) SWR_ASSUME(e, ##__VA_ARGS__) #endif #define SWR_NOT_IMPL SWR_ASSERT(0, "%s not implemented", __FUNCTION__) diff --git a/src/gallium/drivers/swr/rasterizer/core/arena.h b/src/gallium/drivers/swr/rasterizer/core/arena.h index b80f6f0d257..26785db183f 100644 --- a/src/gallium/drivers/swr/rasterizer/core/arena.h +++ b/src/gallium/drivers/swr/rasterizer/core/arena.h @@ -52,7 +52,7 @@ class DefaultAllocator public: ArenaBlock* AllocateAligned(size_t size, size_t align) { - SWR_ASSERT(size >= sizeof(ArenaBlock)); + SWR_ASSUME_ASSERT(size >= sizeof(ArenaBlock)); ArenaBlock* p = new (_aligned_malloc(size, align)) ArenaBlock(); p->blockSize = size; @@ -63,7 +63,7 @@ public: { if (pMem) { - SWR_ASSERT(pMem->blockSize < size_t(0xdddddddd)); + SWR_ASSUME_ASSERT(pMem->blockSize < size_t(0xdddddddd)); _aligned_free(pMem); } } @@ -75,8 +75,8 @@ struct CachingAllocatorT : DefaultAllocator { ArenaBlock* AllocateAligned(size_t size, size_t align) { - SWR_ASSERT(size >= sizeof(ArenaBlock)); - SWR_ASSERT(size <= uint32_t(-1)); + SWR_ASSUME_ASSERT(size >= sizeof(ArenaBlock)); + SWR_ASSUME_ASSERT(size <= uint32_t(-1)); uint32_t bucket = GetBucketId(size); @@ -111,7 +111,7 @@ struct CachingAllocatorT : DefaultAllocator if (pBlock) { - SWR_ASSERT(pPrevBlock && pPrevBlock->pNext == pBlock); + SWR_ASSUME_ASSERT(pPrevBlock && pPrevBlock->pNext == pBlock); pPrevBlock->pNext = pBlock->pNext; pBlock->pNext = nullptr; @@ -143,8 +143,6 @@ struct CachingAllocatorT : DefaultAllocator { if (pMem) { - SWR_ASSERT(pMem->blockSize >= 0); - std::unique_lock l(m_mutex); InsertCachedBlock(GetBucketId(pMem->blockSize), pMem); } @@ -260,7 +258,7 @@ private: template void InsertCachedBlock(uint32_t bucketId, ArenaBlock* pNewBlock) { - SWR_ASSERT(bucketId < CACHE_NUM_BUCKETS); + SWR_ASSUME_ASSERT(bucketId < CACHE_NUM_BUCKETS); ArenaBlock* pPrevBlock = OldBlockT ? &m_oldCachedBlocks[bucketId] : &m_cachedBlocks[bucketId]; ArenaBlock* pBlock = pPrevBlock->pNext; @@ -277,7 +275,7 @@ private: } // Insert into list - SWR_ASSERT(pPrevBlock); + SWR_ASSUME_ASSERT(pPrevBlock); pPrevBlock->pNext = pNewBlock; pNewBlock->pNext = pBlock; -- 2.30.2