1 /****************************************************************************
2 * Copyright (C) 2014-2016 Intel Corporation. All Rights Reserved.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
22 ****************************************************************************/
33 #if defined(__linux__) || defined(__gnu_linux__)
39 #include "common/os.h"
43 #include "rasterizer.h"
44 #include "rdtsc_core.h"
53 uint32_t procGroup
= 0;
54 std::vector
<uint32_t> threadIds
;
59 std::vector
<Core
> cores
;
62 typedef std::vector
<NumaNode
> CPUNumaNodes
;
64 void CalculateProcessorTopology(CPUNumaNodes
& out_nodes
, uint32_t& out_numThreadsPerProcGroup
)
67 out_numThreadsPerProcGroup
= 0;
71 std::vector
<KAFFINITY
> threadMaskPerProcGroup
;
74 std::lock_guard
<std::mutex
> l(m
);
78 BOOL ret
= GetLogicalProcessorInformationEx(RelationProcessorCore
, nullptr, &bufSize
);
79 SWR_ASSERT(ret
== FALSE
&& GetLastError() == ERROR_INSUFFICIENT_BUFFER
);
81 PSYSTEM_LOGICAL_PROCESSOR_INFORMATION_EX pBufferMem
= (PSYSTEM_LOGICAL_PROCESSOR_INFORMATION_EX
)malloc(bufSize
);
82 SWR_ASSERT(pBufferMem
);
84 ret
= GetLogicalProcessorInformationEx(RelationProcessorCore
, pBufferMem
, &bufSize
);
85 SWR_ASSERT(ret
!= FALSE
, "Failed to get Processor Topology Information");
87 uint32_t count
= bufSize
/ pBufferMem
->Size
;
88 PSYSTEM_LOGICAL_PROCESSOR_INFORMATION_EX pBuffer
= pBufferMem
;
90 for (uint32_t i
= 0; i
< count
; ++i
)
92 SWR_ASSERT(pBuffer
->Relationship
== RelationProcessorCore
);
93 for (uint32_t g
= 0; g
< pBuffer
->Processor
.GroupCount
; ++g
)
95 auto& gmask
= pBuffer
->Processor
.GroupMask
[g
];
96 uint32_t threadId
= 0;
97 uint32_t procGroup
= gmask
.Group
;
99 Core
* pCore
= nullptr;
101 uint32_t numThreads
= (uint32_t)_mm_popcount_sizeT(gmask
.Mask
);
103 while (BitScanForwardSizeT((unsigned long*)&threadId
, gmask
.Mask
))
106 KAFFINITY threadMask
= KAFFINITY(1) << threadId
;
107 gmask
.Mask
&= ~threadMask
;
109 if (procGroup
>= threadMaskPerProcGroup
.size())
111 threadMaskPerProcGroup
.resize(procGroup
+ 1);
114 if (threadMaskPerProcGroup
[procGroup
] & threadMask
)
116 // Already seen this mask. This means that we are in 32-bit mode and
117 // have seen more than 32 HW threads for this procGroup
120 SWR_ASSERT(false, "Shouldn't get here in 64-bit mode");
125 threadMaskPerProcGroup
[procGroup
] |= (KAFFINITY(1) << threadId
);
129 PROCESSOR_NUMBER procNum
= {};
130 procNum
.Group
= WORD(procGroup
);
131 procNum
.Number
= UCHAR(threadId
);
133 ret
= GetNumaProcessorNodeEx(&procNum
, (PUSHORT
)&numaId
);
137 if (out_nodes
.size() <= numaId
) out_nodes
.resize(numaId
+ 1);
138 auto& numaNode
= out_nodes
[numaId
];
142 if (nullptr == pCore
)
144 numaNode
.cores
.push_back(Core());
145 pCore
= &numaNode
.cores
.back();
146 pCore
->procGroup
= procGroup
;
148 pCore
->threadIds
.push_back(threadId
);
151 out_numThreadsPerProcGroup
++;
155 pBuffer
= PtrAdd(pBuffer
, pBuffer
->Size
);
161 #elif defined(__linux__) || defined (__gnu_linux__)
163 // Parse /proc/cpuinfo to get full topology
164 std::ifstream
input("/proc/cpuinfo");
167 uint32_t threadId
= uint32_t(-1);
168 uint32_t coreId
= uint32_t(-1);
169 uint32_t numaId
= uint32_t(-1);
171 while (std::getline(input
, line
))
173 if (line
.find("processor") != std::string::npos
)
175 if (threadId
!= uint32_t(-1))
178 if (out_nodes
.size() <= numaId
) out_nodes
.resize(numaId
+ 1);
179 auto& numaNode
= out_nodes
[numaId
];
180 if (numaNode
.cores
.size() <= coreId
) numaNode
.cores
.resize(coreId
+ 1);
181 auto& core
= numaNode
.cores
[coreId
];
183 core
.procGroup
= coreId
;
184 core
.threadIds
.push_back(threadId
);
186 out_numThreadsPerProcGroup
++;
189 auto data_start
= line
.find(": ") + 2;
190 threadId
= std::strtoul(&line
.c_str()[data_start
], &c
, 10);
193 if (line
.find("core id") != std::string::npos
)
195 auto data_start
= line
.find(": ") + 2;
196 coreId
= std::strtoul(&line
.c_str()[data_start
], &c
, 10);
199 if (line
.find("physical id") != std::string::npos
)
201 auto data_start
= line
.find(": ") + 2;
202 numaId
= std::strtoul(&line
.c_str()[data_start
], &c
, 10);
207 if (threadId
!= uint32_t(-1))
210 if (out_nodes
.size() <= numaId
) out_nodes
.resize(numaId
+ 1);
211 auto& numaNode
= out_nodes
[numaId
];
212 if (numaNode
.cores
.size() <= coreId
) numaNode
.cores
.resize(coreId
+ 1);
213 auto& core
= numaNode
.cores
[coreId
];
215 core
.procGroup
= coreId
;
216 core
.threadIds
.push_back(threadId
);
217 out_numThreadsPerProcGroup
++;
220 for (uint32_t node
= 0; node
< out_nodes
.size(); node
++) {
221 auto& numaNode
= out_nodes
[node
];
222 auto it
= numaNode
.cores
.begin();
223 for ( ; it
!= numaNode
.cores
.end(); ) {
224 if (it
->threadIds
.size() == 0)
225 numaNode
.cores
.erase(it
);
233 #error Unsupported platform
239 void bindThread(SWR_CONTEXT
* pContext
, uint32_t threadId
, uint32_t procGroupId
= 0, bool bindProcGroup
=false)
241 // Only bind threads when MAX_WORKER_THREADS isn't set.
242 if (pContext
->threadInfo
.MAX_WORKER_THREADS
&& bindProcGroup
== false)
249 GROUP_AFFINITY affinity
= {};
250 affinity
.Group
= procGroupId
;
255 // Hopefully we don't get here. Logic in CreateThreadPool should prevent this.
256 SWR_REL_ASSERT(false, "Shouldn't get here");
258 // In a 32-bit process on Windows it is impossible to bind
259 // to logical processors 32-63 within a processor group.
260 // In this case set the mask to 0 and let the system assign
261 // the processor. Hopefully it will make smart choices.
267 // If MAX_WORKER_THREADS is set, only bind to the proc group,
268 // Not the individual HW thread.
269 if (!pContext
->threadInfo
.MAX_WORKER_THREADS
)
271 affinity
.Mask
= KAFFINITY(1) << threadId
;
275 SetThreadGroupAffinity(GetCurrentThread(), &affinity
, nullptr);
280 pthread_t thread
= pthread_self();
282 CPU_SET(threadId
, &cpuset
);
284 pthread_setaffinity_np(thread
, sizeof(cpu_set_t
), &cpuset
);
290 uint32_t GetEnqueuedDraw(SWR_CONTEXT
*pContext
)
292 return pContext
->dcRing
.GetHead();
296 DRAW_CONTEXT
*GetDC(SWR_CONTEXT
*pContext
, uint32_t drawId
)
298 return &pContext
->dcRing
[(drawId
-1) % KNOB_MAX_DRAWS_IN_FLIGHT
];
302 bool IDComparesLess(uint32_t a
, uint32_t b
)
304 // Use signed delta to ensure that wrap-around to 0 is correctly handled.
305 int32_t delta
= int32_t(a
- b
);
309 // returns true if dependency not met
311 bool CheckDependency(SWR_CONTEXT
*pContext
, DRAW_CONTEXT
*pDC
, uint32_t lastRetiredDraw
)
313 return pDC
->dependent
&& IDComparesLess(lastRetiredDraw
, pDC
->drawId
- 1);
316 //////////////////////////////////////////////////////////////////////////
317 /// @brief Update client stats.
318 INLINE
void UpdateClientStats(SWR_CONTEXT
* pContext
, DRAW_CONTEXT
* pDC
)
320 if ((pContext
->pfnUpdateStats
== nullptr) || (GetApiState(pDC
).enableStats
== false))
325 DRAW_DYNAMIC_STATE
& dynState
= pDC
->dynState
;
326 SWR_STATS stats
{ 0 };
328 // Sum up stats across all workers before sending to client.
329 for (uint32_t i
= 0; i
< pContext
->NumWorkerThreads
; ++i
)
331 stats
.DepthPassCount
+= dynState
.pStats
[i
].DepthPassCount
;
333 stats
.PsInvocations
+= dynState
.pStats
[i
].PsInvocations
;
334 stats
.CsInvocations
+= dynState
.pStats
[i
].CsInvocations
;
337 pContext
->pfnUpdateStats(GetPrivateState(pDC
), &stats
);
340 INLINE
void ExecuteCallbacks(SWR_CONTEXT
* pContext
, DRAW_CONTEXT
* pDC
)
342 UpdateClientStats(pContext
, pDC
);
344 if (pDC
->retireCallback
.pfnCallbackFunc
)
346 pDC
->retireCallback
.pfnCallbackFunc(pDC
->retireCallback
.userData
,
347 pDC
->retireCallback
.userData2
,
348 pDC
->retireCallback
.userData3
);
352 // inlined-only version
353 INLINE
int32_t CompleteDrawContextInl(SWR_CONTEXT
* pContext
, DRAW_CONTEXT
* pDC
)
355 int32_t result
= InterlockedDecrement((volatile LONG
*)&pDC
->threadsDone
);
356 SWR_ASSERT(result
>= 0);
360 ExecuteCallbacks(pContext
, pDC
);
362 // Cleanup memory allocations
363 pDC
->pArena
->Reset(true);
366 pDC
->pTileMgr
->initialize();
368 if (pDC
->cleanupState
)
370 pDC
->pState
->pArena
->Reset(true);
375 pContext
->dcRing
.Dequeue(); // Remove from tail
381 // available to other translation modules
382 int32_t CompleteDrawContext(SWR_CONTEXT
* pContext
, DRAW_CONTEXT
* pDC
)
384 return CompleteDrawContextInl(pContext
, pDC
);
387 INLINE
bool FindFirstIncompleteDraw(SWR_CONTEXT
* pContext
, uint32_t& curDrawBE
, uint32_t& drawEnqueued
)
389 // increment our current draw id to the first incomplete draw
390 drawEnqueued
= GetEnqueuedDraw(pContext
);
391 while (IDComparesLess(curDrawBE
, drawEnqueued
))
393 DRAW_CONTEXT
*pDC
= &pContext
->dcRing
[curDrawBE
% KNOB_MAX_DRAWS_IN_FLIGHT
];
395 // If its not compute and FE is not done then break out of loop.
396 if (!pDC
->doneFE
&& !pDC
->isCompute
) break;
398 bool isWorkComplete
= pDC
->isCompute
?
399 pDC
->pDispatch
->isWorkComplete() :
400 pDC
->pTileMgr
->isWorkComplete();
405 CompleteDrawContextInl(pContext
, pDC
);
413 // If there are no more incomplete draws then return false.
414 return IDComparesLess(curDrawBE
, drawEnqueued
);
417 //////////////////////////////////////////////////////////////////////////
418 /// @brief If there is any BE work then go work on it.
419 /// @param pContext - pointer to SWR context.
420 /// @param workerId - The unique worker ID that is assigned to this thread.
421 /// @param curDrawBE - This tracks the draw contexts that this thread has processed. Each worker thread
422 /// has its own curDrawBE counter and this ensures that each worker processes all the
424 /// @param lockedTiles - This is the set of tiles locked by other threads. Each thread maintains its
425 /// own set and each time it fails to lock a macrotile, because its already locked,
426 /// then it will add that tile to the lockedTiles set. As a worker begins to work
427 /// on future draws the lockedTiles ensure that it doesn't work on tiles that may
428 /// still have work pending in a previous draw. Additionally, the lockedTiles is
429 /// hueristic that can steer a worker back to the same macrotile that it had been
430 /// working on in a previous draw.
431 /// @returns true if worker thread should shutdown
433 SWR_CONTEXT
*pContext
,
436 TileSet
& lockedTiles
,
440 bool bShutdown
= false;
442 // Find the first incomplete draw that has pending work. If no such draw is found then
443 // return. FindFirstIncompleteDraw is responsible for incrementing the curDrawBE.
444 uint32_t drawEnqueued
= 0;
445 if (FindFirstIncompleteDraw(pContext
, curDrawBE
, drawEnqueued
) == false)
450 uint32_t lastRetiredDraw
= pContext
->dcRing
[curDrawBE
% KNOB_MAX_DRAWS_IN_FLIGHT
].drawId
- 1;
452 // Reset our history for locked tiles. We'll have to re-learn which tiles are locked.
455 // Try to work on each draw in order of the available draws in flight.
456 // 1. If we're on curDrawBE, we can work on any macrotile that is available.
457 // 2. If we're trying to work on draws after curDrawBE, we are restricted to
458 // working on those macrotiles that are known to be complete in the prior draw to
459 // maintain order. The locked tiles provides the history to ensures this.
460 for (uint32_t i
= curDrawBE
; IDComparesLess(i
, drawEnqueued
); ++i
)
462 DRAW_CONTEXT
*pDC
= &pContext
->dcRing
[i
% KNOB_MAX_DRAWS_IN_FLIGHT
];
464 if (pDC
->isCompute
) return false; // We don't look at compute work.
466 // First wait for FE to be finished with this draw. This keeps threading model simple
467 // but if there are lots of bubbles between draws then serializing FE and BE may
468 // need to be revisited.
469 if (!pDC
->doneFE
) return false;
471 // If this draw is dependent on a previous draw then we need to bail.
472 if (CheckDependency(pContext
, pDC
, lastRetiredDraw
))
477 // Grab the list of all dirty macrotiles. A tile is dirty if it has work queued to it.
478 auto ¯oTiles
= pDC
->pTileMgr
->getDirtyTiles();
480 for (auto tile
: macroTiles
)
482 uint32_t tileID
= tile
->mId
;
484 // Only work on tiles for this numa node
486 pDC
->pTileMgr
->getTileIndices(tileID
, x
, y
);
487 if (((x
^ y
) & numaMask
) != numaNode
)
492 if (!tile
->getNumQueued())
497 // can only work on this draw if it's not in use by other threads
498 if (lockedTiles
.find(tileID
) != lockedTiles
.end())
507 AR_BEGIN(WorkerFoundWork
, pDC
->drawId
);
509 uint32_t numWorkItems
= tile
->getNumQueued();
510 SWR_ASSERT(numWorkItems
);
512 pWork
= tile
->peek();
514 if (pWork
->type
== DRAW
)
516 pContext
->pHotTileMgr
->InitializeHotTiles(pContext
, pDC
, workerId
, tileID
);
518 else if (pWork
->type
== SHUTDOWN
)
523 while ((pWork
= tile
->peek()) != nullptr)
525 pWork
->pfnWork(pDC
, workerId
, tileID
, &pWork
->desc
);
528 AR_END(WorkerFoundWork
, numWorkItems
);
532 pDC
->pTileMgr
->markTileComplete(tileID
);
534 // Optimization: If the draw is complete and we're the last one to have worked on it then
535 // we can reset the locked list as we know that all previous draws before the next are guaranteed to be complete.
536 if ((curDrawBE
== i
) && (bShutdown
|| pDC
->pTileMgr
->isWorkComplete()))
538 // We can increment the current BE and safely move to next draw since we know this draw is complete.
540 CompleteDrawContextInl(pContext
, pDC
);
555 // This tile is already locked. So let's add it to our locked tiles set. This way we don't try locking this one again.
556 lockedTiles
.insert(tileID
);
564 //////////////////////////////////////////////////////////////////////////
565 /// @brief Called when FE work is complete for this DC.
566 INLINE
void CompleteDrawFE(SWR_CONTEXT
* pContext
, DRAW_CONTEXT
* pDC
)
568 if (pContext
->pfnUpdateStatsFE
&& GetApiState(pDC
).enableStats
)
570 pContext
->pfnUpdateStatsFE(GetPrivateState(pDC
), &pDC
->dynState
.statsFE
);
573 if (pContext
->pfnUpdateSoWriteOffset
)
575 for (uint32_t i
= 0; i
< MAX_SO_BUFFERS
; ++i
)
577 if ((pDC
->dynState
.SoWriteOffsetDirty
[i
]) &&
578 (pDC
->pState
->state
.soBuffer
[i
].soWriteEnable
))
580 pContext
->pfnUpdateSoWriteOffset(GetPrivateState(pDC
), i
, pDC
->dynState
.SoWriteOffset
[i
]);
585 // Ensure all streaming writes are globally visible before marking this FE done
588 InterlockedDecrement((volatile LONG
*)&pContext
->drawsOutstandingFE
);
591 void WorkOnFifoFE(SWR_CONTEXT
*pContext
, uint32_t workerId
, uint32_t &curDrawFE
)
593 // Try to grab the next DC from the ring
594 uint32_t drawEnqueued
= GetEnqueuedDraw(pContext
);
595 while (IDComparesLess(curDrawFE
, drawEnqueued
))
597 uint32_t dcSlot
= curDrawFE
% KNOB_MAX_DRAWS_IN_FLIGHT
;
598 DRAW_CONTEXT
*pDC
= &pContext
->dcRing
[dcSlot
];
599 if (pDC
->isCompute
|| pDC
->doneFE
|| pDC
->FeLock
)
601 CompleteDrawContextInl(pContext
, pDC
);
610 uint32_t curDraw
= curDrawFE
;
611 while (IDComparesLess(curDraw
, drawEnqueued
))
613 uint32_t dcSlot
= curDraw
% KNOB_MAX_DRAWS_IN_FLIGHT
;
614 DRAW_CONTEXT
*pDC
= &pContext
->dcRing
[dcSlot
];
616 if (!pDC
->isCompute
&& !pDC
->FeLock
)
618 uint32_t initial
= InterlockedCompareExchange((volatile uint32_t*)&pDC
->FeLock
, 1, 0);
621 // successfully grabbed the DC, now run the FE
622 pDC
->FeWork
.pfnWork(pContext
, pDC
, workerId
, &pDC
->FeWork
.desc
);
624 CompleteDrawFE(pContext
, pDC
);
631 //////////////////////////////////////////////////////////////////////////
632 /// @brief If there is any compute work then go work on it.
633 /// @param pContext - pointer to SWR context.
634 /// @param workerId - The unique worker ID that is assigned to this thread.
635 /// @param curDrawBE - This tracks the draw contexts that this thread has processed. Each worker thread
636 /// has its own curDrawBE counter and this ensures that each worker processes all the
639 SWR_CONTEXT
*pContext
,
643 uint32_t drawEnqueued
= 0;
644 if (FindFirstIncompleteDraw(pContext
, curDrawBE
, drawEnqueued
) == false)
649 uint32_t lastRetiredDraw
= pContext
->dcRing
[curDrawBE
% KNOB_MAX_DRAWS_IN_FLIGHT
].drawId
- 1;
651 for (uint64_t i
= curDrawBE
; IDComparesLess(i
, drawEnqueued
); ++i
)
653 DRAW_CONTEXT
*pDC
= &pContext
->dcRing
[i
% KNOB_MAX_DRAWS_IN_FLIGHT
];
654 if (pDC
->isCompute
== false) return;
656 // check dependencies
657 if (CheckDependency(pContext
, pDC
, lastRetiredDraw
))
662 SWR_ASSERT(pDC
->pDispatch
!= nullptr);
663 DispatchQueue
& queue
= *pDC
->pDispatch
;
665 // Is there any work remaining?
666 if (queue
.getNumQueued() > 0)
668 void* pSpillFillBuffer
= nullptr;
669 uint32_t threadGroupId
= 0;
670 while (queue
.getWork(threadGroupId
))
672 queue
.dispatch(pDC
, workerId
, threadGroupId
, pSpillFillBuffer
);
673 queue
.finishedWork();
676 // Ensure all streaming writes are globally visible before moving onto the next draw
682 template<bool IsFEThread
, bool IsBEThread
>
683 DWORD
workerThreadMain(LPVOID pData
)
685 THREAD_DATA
*pThreadData
= (THREAD_DATA
*)pData
;
686 SWR_CONTEXT
*pContext
= pThreadData
->pContext
;
687 uint32_t threadId
= pThreadData
->threadId
;
688 uint32_t workerId
= pThreadData
->workerId
;
690 bindThread(pContext
, threadId
, pThreadData
->procGroupId
, pThreadData
->forceBindProcGroup
);
692 RDTSC_INIT(threadId
);
694 uint32_t numaNode
= pThreadData
->numaId
;
695 uint32_t numaMask
= pContext
->threadPool
.numaMask
;
697 // flush denormals to 0
698 _mm_setcsr(_mm_getcsr() | _MM_FLUSH_ZERO_ON
| _MM_DENORMALS_ZERO_ON
);
700 // Track tiles locked by other threads. If we try to lock a macrotile and find its already
701 // locked then we'll add it to this list so that we don't try and lock it again.
704 // each worker has the ability to work on any of the queued draws as long as certain
705 // conditions are met. the data associated
706 // with a draw is guaranteed to be active as long as a worker hasn't signaled that he
707 // has moved on to the next draw when he determines there is no more work to do. The api
708 // thread will not increment the head of the dc ring until all workers have moved past the
710 // the logic to determine what to work on is:
711 // 1- try to work on the FE any draw that is queued. For now there are no dependencies
712 // on the FE work, so any worker can grab any FE and process in parallel. Eventually
713 // we'll need dependency tracking to force serialization on FEs. The worker will try
714 // to pick an FE by atomically incrementing a counter in the swr context. he'll keep
715 // trying until he reaches the tail.
716 // 2- BE work must be done in strict order. we accomplish this today by pulling work off
717 // the oldest draw (ie the head) of the dcRing. the worker can determine if there is
718 // any work left by comparing the total # of binned work items and the total # of completed
719 // work items. If they are equal, then there is no more work to do for this draw, and
720 // the worker can safely increment its oldestDraw counter and move on to the next draw.
721 std::unique_lock
<std::mutex
> lock(pContext
->WaitLock
, std::defer_lock
);
723 auto threadHasWork
= [&](uint32_t curDraw
) { return curDraw
!= pContext
->dcRing
.GetHead(); };
725 uint32_t curDrawBE
= 0;
726 uint32_t curDrawFE
= 0;
728 bool bShutdown
= false;
732 if (bShutdown
&& !threadHasWork(curDrawBE
))
738 while (loop
++ < KNOB_WORKER_SPIN_LOOP_COUNT
&& !threadHasWork(curDrawBE
))
743 if (!threadHasWork(curDrawBE
))
747 // check for thread idle condition again under lock
748 if (threadHasWork(curDrawBE
))
754 AR_BEGIN(WorkerWaitForThreadEvent
, 0);
756 pContext
->FifosNotEmpty
.wait(lock
);
759 AR_END(WorkerWaitForThreadEvent
, 0);
764 AR_BEGIN(WorkerWorkOnFifoBE
, 0);
765 bShutdown
|= WorkOnFifoBE(pContext
, workerId
, curDrawBE
, lockedTiles
, numaNode
, numaMask
);
766 AR_END(WorkerWorkOnFifoBE
, 0);
768 WorkOnCompute(pContext
, workerId
, curDrawBE
);
773 WorkOnFifoFE(pContext
, workerId
, curDrawFE
);
777 curDrawBE
= curDrawFE
;
784 template<> DWORD workerThreadMain
<false, false>(LPVOID
) = delete;
786 template <bool IsFEThread
, bool IsBEThread
>
787 DWORD
workerThreadInit(LPVOID pData
)
793 return workerThreadMain
<IsFEThread
, IsBEThread
>(pData
);
797 __except(EXCEPTION_CONTINUE_SEARCH
)
805 template<> DWORD workerThreadInit
<false, false>(LPVOID pData
) = delete;
807 //////////////////////////////////////////////////////////////////////////
808 /// @brief Creates thread pool info but doesn't launch threads.
809 /// @param pContext - pointer to context
810 /// @param pPool - pointer to thread pool object.
811 void CreateThreadPool(SWR_CONTEXT
* pContext
, THREAD_POOL
* pPool
)
813 bindThread(pContext
, 0);
816 uint32_t numThreadsPerProcGroup
= 0;
817 CalculateProcessorTopology(nodes
, numThreadsPerProcGroup
);
819 uint32_t numHWNodes
= (uint32_t)nodes
.size();
820 uint32_t numHWCoresPerNode
= (uint32_t)nodes
[0].cores
.size();
821 uint32_t numHWHyperThreads
= (uint32_t)nodes
[0].cores
[0].threadIds
.size();
823 // Calculate num HW threads. Due to asymmetric topologies, this is not
824 // a trivial multiplication.
825 uint32_t numHWThreads
= 0;
826 for (auto& node
: nodes
)
828 for (auto& core
: node
.cores
)
830 numHWThreads
+= (uint32_t)core
.threadIds
.size();
834 uint32_t numNodes
= numHWNodes
;
835 uint32_t numCoresPerNode
= numHWCoresPerNode
;
836 uint32_t numHyperThreads
= numHWHyperThreads
;
838 if (pContext
->threadInfo
.MAX_NUMA_NODES
)
840 numNodes
= std::min(numNodes
, pContext
->threadInfo
.MAX_NUMA_NODES
);
843 if (pContext
->threadInfo
.MAX_CORES_PER_NUMA_NODE
)
845 numCoresPerNode
= std::min(numCoresPerNode
, pContext
->threadInfo
.MAX_CORES_PER_NUMA_NODE
);
848 if (pContext
->threadInfo
.MAX_THREADS_PER_CORE
)
850 numHyperThreads
= std::min(numHyperThreads
, pContext
->threadInfo
.MAX_THREADS_PER_CORE
);
853 #if defined(_WIN32) && !defined(_WIN64)
854 if (!pContext
->threadInfo
.MAX_WORKER_THREADS
)
856 // Limit 32-bit windows to bindable HW threads only
857 if ((numCoresPerNode
* numHWHyperThreads
) > 32)
859 numCoresPerNode
= 32 / numHWHyperThreads
;
864 // Calculate numThreads
865 uint32_t numThreads
= numNodes
* numCoresPerNode
* numHyperThreads
;
866 numThreads
= std::min(numThreads
, numHWThreads
);
868 if (pContext
->threadInfo
.MAX_WORKER_THREADS
)
870 uint32_t maxHWThreads
= numHWNodes
* numHWCoresPerNode
* numHWHyperThreads
;
871 numThreads
= std::min(pContext
->threadInfo
.MAX_WORKER_THREADS
, maxHWThreads
);
874 uint32_t numAPIReservedThreads
= 1;
879 // If only 1 worker threads, try to move it to an available
880 // HW thread. If that fails, use the API thread.
881 if (numCoresPerNode
< numHWCoresPerNode
)
885 else if (numHyperThreads
< numHWHyperThreads
)
889 else if (numNodes
< numHWNodes
)
895 pContext
->threadInfo
.SINGLE_THREADED
= true;
900 // Save HW threads for the API if we can
901 if (numThreads
> numAPIReservedThreads
)
903 numThreads
-= numAPIReservedThreads
;
907 numAPIReservedThreads
= 0;
911 if (pContext
->threadInfo
.SINGLE_THREADED
)
916 // Initialize DRAW_CONTEXT's per-thread stats
917 for (uint32_t dc
= 0; dc
< KNOB_MAX_DRAWS_IN_FLIGHT
; ++dc
)
919 pContext
->dcRing
[dc
].dynState
.pStats
= new SWR_STATS
[numThreads
];
920 memset(pContext
->dcRing
[dc
].dynState
.pStats
, 0, sizeof(SWR_STATS
) * numThreads
);
923 if (pContext
->threadInfo
.SINGLE_THREADED
)
925 pContext
->NumWorkerThreads
= 1;
926 pContext
->NumFEThreads
= 1;
927 pContext
->NumBEThreads
= 1;
928 pPool
->numThreads
= 0;
933 pPool
->numThreads
= numThreads
;
934 pContext
->NumWorkerThreads
= pPool
->numThreads
;
936 pPool
->pThreadData
= (THREAD_DATA
*)malloc(pPool
->numThreads
* sizeof(THREAD_DATA
));
939 pPool
->pThreads
= new THREAD_PTR
[pPool
->numThreads
];
941 if (pContext
->threadInfo
.MAX_WORKER_THREADS
)
943 bool bForceBindProcGroup
= (numThreads
> numThreadsPerProcGroup
);
944 uint32_t numProcGroups
= (numThreads
+ numThreadsPerProcGroup
- 1) / numThreadsPerProcGroup
;
945 // When MAX_WORKER_THREADS is set we don't bother to bind to specific HW threads
946 // But Windows will still require binding to specific process groups
947 for (uint32_t workerId
= 0; workerId
< numThreads
; ++workerId
)
949 pPool
->pThreadData
[workerId
].workerId
= workerId
;
950 pPool
->pThreadData
[workerId
].procGroupId
= workerId
% numProcGroups
;
951 pPool
->pThreadData
[workerId
].threadId
= 0;
952 pPool
->pThreadData
[workerId
].numaId
= 0;
953 pPool
->pThreadData
[workerId
].coreId
= 0;
954 pPool
->pThreadData
[workerId
].htId
= 0;
955 pPool
->pThreadData
[workerId
].pContext
= pContext
;
956 pPool
->pThreadData
[workerId
].forceBindProcGroup
= bForceBindProcGroup
;
958 pContext
->NumBEThreads
++;
959 pContext
->NumFEThreads
++;
964 pPool
->numaMask
= numNodes
- 1; // Only works for 2**n numa nodes (1, 2, 4, etc.)
966 uint32_t workerId
= 0;
967 for (uint32_t n
= 0; n
< numNodes
; ++n
)
969 auto& node
= nodes
[n
];
970 uint32_t numCores
= numCoresPerNode
;
971 for (uint32_t c
= 0; c
< numCores
; ++c
)
973 if (c
>= node
.cores
.size())
978 auto& core
= node
.cores
[c
];
979 for (uint32_t t
= 0; t
< numHyperThreads
; ++t
)
981 if (t
>= core
.threadIds
.size())
986 if (numAPIReservedThreads
)
988 --numAPIReservedThreads
;
992 SWR_ASSERT(workerId
< numThreads
);
994 pPool
->pThreadData
[workerId
].workerId
= workerId
;
995 pPool
->pThreadData
[workerId
].procGroupId
= core
.procGroup
;
996 pPool
->pThreadData
[workerId
].threadId
= core
.threadIds
[t
];
997 pPool
->pThreadData
[workerId
].numaId
= n
;
998 pPool
->pThreadData
[workerId
].coreId
= c
;
999 pPool
->pThreadData
[workerId
].htId
= t
;
1000 pPool
->pThreadData
[workerId
].pContext
= pContext
;
1002 pContext
->NumBEThreads
++;
1003 pContext
->NumFEThreads
++;
1009 SWR_ASSERT(workerId
== pContext
->NumWorkerThreads
);
1013 //////////////////////////////////////////////////////////////////////////
1014 /// @brief Launches worker threads in thread pool.
1015 /// @param pContext - pointer to context
1016 /// @param pPool - pointer to thread pool object.
1017 void StartThreadPool(SWR_CONTEXT
* pContext
, THREAD_POOL
* pPool
)
1019 if (pContext
->threadInfo
.SINGLE_THREADED
)
1024 for (uint32_t workerId
= 0; workerId
< pContext
->NumWorkerThreads
; ++workerId
)
1026 pPool
->pThreads
[workerId
] = new std::thread(workerThreadInit
<true, true>, &pPool
->pThreadData
[workerId
]);
1030 //////////////////////////////////////////////////////////////////////////
1031 /// @brief Destroys thread pool.
1032 /// @param pContext - pointer to context
1033 /// @param pPool - pointer to thread pool object.
1034 void DestroyThreadPool(SWR_CONTEXT
*pContext
, THREAD_POOL
*pPool
)
1036 if (!pContext
->threadInfo
.SINGLE_THREADED
)
1038 // Wait for all threads to finish
1039 SwrWaitForIdle(pContext
);
1041 // Wait for threads to finish and destroy them
1042 for (uint32_t t
= 0; t
< pPool
->numThreads
; ++t
)
1044 // Detach from thread. Cannot join() due to possibility (in Windows) of code
1045 // in some DLLMain(THREAD_DETATCH case) blocking the thread until after this returns.
1046 pPool
->pThreads
[t
]->detach();
1047 delete(pPool
->pThreads
[t
]);
1050 delete [] pPool
->pThreads
;
1052 // Clean up data used by threads
1053 free(pPool
->pThreadData
);