}
}
+// simplified from std.traits.maxAlignment
+template maxAlignment(U...)
+{
+ static if (U.length == 0)
+ static assert(0);
+ else static if (U.length == 1)
+ enum maxAlignment = U[0].alignof;
+ else static if (U.length == 2)
+ enum maxAlignment = U[0].alignof > U[1].alignof ? U[0].alignof : U[1].alignof;
+ else
+ {
+ enum a = maxAlignment!(U[0 .. ($+1)/2]);
+ enum b = maxAlignment!(U[($+1)/2 .. $]);
+ enum maxAlignment = a > b ? a : b;
+ }
+}
+
+template classInstanceAlignment(T)
+if (is(T == class))
+{
+ alias classInstanceAlignment = maxAlignment!(void*, typeof(T.tupleof));
+}
+
// Somehow fails for non-static nested structs without support for aliases
template hasElaborateDestructor(T...)
{
{
import core.atomic, core.memory, core.sync.mutex;
+ // Handling unaligned mutexes are not supported on all platforms, so we must
+ // ensure that the address of all shared data are appropriately aligned.
+ import core.internal.traits : classInstanceAlignment;
+
+ enum mutexAlign = classInstanceAlignment!Mutex;
+ enum mutexClassInstanceSize = __traits(classInstanceSize, Mutex);
+
//
// exposed by compiler runtime
//
// lock order inversion.
@property static Mutex slock() nothrow @nogc
{
- return cast(Mutex)_locks[0].ptr;
+ return cast(Mutex)_slock.ptr;
}
@property static Mutex criticalRegionLock() nothrow @nogc
{
- return cast(Mutex)_locks[1].ptr;
+ return cast(Mutex)_criticalRegionLock.ptr;
}
- __gshared align(Mutex.alignof) void[__traits(classInstanceSize, Mutex)][2] _locks;
+ __gshared align(mutexAlign) void[mutexClassInstanceSize] _slock;
+ __gshared align(mutexAlign) void[mutexClassInstanceSize] _criticalRegionLock;
static void initLocks()
{
- foreach (ref lock; _locks)
- {
- lock[] = typeid(Mutex).initializer[];
- (cast(Mutex)lock.ptr).__ctor();
- }
+ _slock[] = typeid(Mutex).initializer[];
+ (cast(Mutex)_slock.ptr).__ctor();
+
+ _criticalRegionLock[] = typeid(Mutex).initializer[];
+ (cast(Mutex)_criticalRegionLock.ptr).__ctor();
}
static void termLocks()
{
- foreach (ref lock; _locks)
- (cast(Mutex)lock.ptr).__dtor();
+ (cast(Mutex)_slock.ptr).__dtor();
+ (cast(Mutex)_criticalRegionLock.ptr).__dtor();
}
__gshared Context* sm_cbeg;