e03632b443bfb8434baa7beb43e455be7c983727
[mesa.git] / src / gallium / drivers / swr / rasterizer / core / threads.cpp
1 /****************************************************************************
2 * Copyright (C) 2014-2016 Intel Corporation. All Rights Reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 ****************************************************************************/
23
24 #include <stdio.h>
25 #include <thread>
26 #include <algorithm>
27 #include <float.h>
28 #include <vector>
29 #include <utility>
30 #include <fstream>
31 #include <string>
32
33 #if defined(__linux__) || defined(__gnu_linux__)
34 #include <pthread.h>
35 #include <sched.h>
36 #include <unistd.h>
37 #endif
38
39 #include "common/os.h"
40 #include "context.h"
41 #include "frontend.h"
42 #include "backend.h"
43 #include "rasterizer.h"
44 #include "rdtsc_core.h"
45 #include "tilemgr.h"
46
47
48
49
50 // ThreadId
51 struct Core
52 {
53 uint32_t procGroup = 0;
54 std::vector<uint32_t> threadIds;
55 };
56
57 struct NumaNode
58 {
59 uint32_t numaId;
60 std::vector<Core> cores;
61 };
62
63 typedef std::vector<NumaNode> CPUNumaNodes;
64
65 void CalculateProcessorTopology(CPUNumaNodes& out_nodes, uint32_t& out_numThreadsPerProcGroup)
66 {
67 out_nodes.clear();
68 out_numThreadsPerProcGroup = 0;
69
70 #if defined(_WIN32)
71
72 std::vector<KAFFINITY> threadMaskPerProcGroup;
73
74 static std::mutex m;
75 std::lock_guard<std::mutex> l(m);
76
77 DWORD bufSize = 0;
78
79 BOOL ret = GetLogicalProcessorInformationEx(RelationProcessorCore, nullptr, &bufSize);
80 SWR_ASSERT(ret == FALSE && GetLastError() == ERROR_INSUFFICIENT_BUFFER);
81
82 PSYSTEM_LOGICAL_PROCESSOR_INFORMATION_EX pBufferMem = (PSYSTEM_LOGICAL_PROCESSOR_INFORMATION_EX)malloc(bufSize);
83 SWR_ASSERT(pBufferMem);
84
85 ret = GetLogicalProcessorInformationEx(RelationProcessorCore, pBufferMem, &bufSize);
86 SWR_ASSERT(ret != FALSE, "Failed to get Processor Topology Information");
87
88 uint32_t count = bufSize / pBufferMem->Size;
89 PSYSTEM_LOGICAL_PROCESSOR_INFORMATION_EX pBuffer = pBufferMem;
90
91 for (uint32_t i = 0; i < count; ++i)
92 {
93 SWR_ASSERT(pBuffer->Relationship == RelationProcessorCore);
94 for (uint32_t g = 0; g < pBuffer->Processor.GroupCount; ++g)
95 {
96 auto& gmask = pBuffer->Processor.GroupMask[g];
97 uint32_t threadId = 0;
98 uint32_t procGroup = gmask.Group;
99
100 Core* pCore = nullptr;
101
102 uint32_t numThreads = (uint32_t)_mm_popcount_sizeT(gmask.Mask);
103
104 while (BitScanForwardSizeT((unsigned long*)&threadId, gmask.Mask))
105 {
106 // clear mask
107 KAFFINITY threadMask = KAFFINITY(1) << threadId;
108 gmask.Mask &= ~threadMask;
109
110 if (procGroup >= threadMaskPerProcGroup.size())
111 {
112 threadMaskPerProcGroup.resize(procGroup + 1);
113 }
114
115 if (threadMaskPerProcGroup[procGroup] & threadMask)
116 {
117 // Already seen this mask. This means that we are in 32-bit mode and
118 // have seen more than 32 HW threads for this procGroup
119 // Don't use it
120 #if defined(_WIN64)
121 SWR_INVALID("Shouldn't get here in 64-bit mode");
122 #endif
123 continue;
124 }
125
126 threadMaskPerProcGroup[procGroup] |= (KAFFINITY(1) << threadId);
127
128 // Find Numa Node
129 uint32_t numaId = 0;
130 PROCESSOR_NUMBER procNum = {};
131 procNum.Group = WORD(procGroup);
132 procNum.Number = UCHAR(threadId);
133
134 ret = GetNumaProcessorNodeEx(&procNum, (PUSHORT)&numaId);
135 SWR_ASSERT(ret);
136
137 // Store data
138 if (out_nodes.size() <= numaId)
139 {
140 out_nodes.resize(numaId + 1);
141 }
142 auto& numaNode = out_nodes[numaId];
143 numaNode.numaId = numaId;
144
145 uint32_t coreId = 0;
146
147 if (nullptr == pCore)
148 {
149 numaNode.cores.push_back(Core());
150 pCore = &numaNode.cores.back();
151 pCore->procGroup = procGroup;
152 }
153 pCore->threadIds.push_back(threadId);
154 if (procGroup == 0)
155 {
156 out_numThreadsPerProcGroup++;
157 }
158 }
159 }
160 pBuffer = PtrAdd(pBuffer, pBuffer->Size);
161 }
162
163 free(pBufferMem);
164
165
166 #elif defined(__linux__) || defined (__gnu_linux__)
167
168 // Parse /proc/cpuinfo to get full topology
169 std::ifstream input("/proc/cpuinfo");
170 std::string line;
171 char* c;
172 uint32_t threadId = uint32_t(-1);
173 uint32_t coreId = uint32_t(-1);
174 uint32_t numaId = uint32_t(-1);
175
176 while (std::getline(input, line))
177 {
178 if (line.find("processor") != std::string::npos)
179 {
180 if (threadId != uint32_t(-1))
181 {
182 // Save information.
183 if (out_nodes.size() <= numaId)
184 {
185 out_nodes.resize(numaId + 1);
186 }
187
188 auto& numaNode = out_nodes[numaId];
189 if (numaNode.cores.size() <= coreId)
190 {
191 numaNode.cores.resize(coreId + 1);
192 }
193
194 auto& core = numaNode.cores[coreId];
195 core.procGroup = coreId;
196 core.threadIds.push_back(threadId);
197
198 out_numThreadsPerProcGroup++;
199 }
200
201 auto data_start = line.find(": ") + 2;
202 threadId = std::strtoul(&line.c_str()[data_start], &c, 10);
203 continue;
204 }
205 if (line.find("core id") != std::string::npos)
206 {
207 auto data_start = line.find(": ") + 2;
208 coreId = std::strtoul(&line.c_str()[data_start], &c, 10);
209 continue;
210 }
211 if (line.find("physical id") != std::string::npos)
212 {
213 auto data_start = line.find(": ") + 2;
214 numaId = std::strtoul(&line.c_str()[data_start], &c, 10);
215 continue;
216 }
217 }
218
219 if (threadId != uint32_t(-1))
220 {
221 // Save information.
222 if (out_nodes.size() <= numaId)
223 {
224 out_nodes.resize(numaId + 1);
225 }
226 auto& numaNode = out_nodes[numaId];
227 numaNode.numaId = numaId;
228 if (numaNode.cores.size() <= coreId)
229 {
230 numaNode.cores.resize(coreId + 1);
231 }
232 auto& core = numaNode.cores[coreId];
233
234 core.procGroup = coreId;
235 core.threadIds.push_back(threadId);
236 out_numThreadsPerProcGroup++;
237 }
238
239 #else
240
241 #error Unsupported platform
242
243 #endif
244
245 // Prune empty cores and numa nodes
246 for (auto node_it = out_nodes.begin(); node_it != out_nodes.end(); )
247 {
248 // Erase empty cores (first)
249 for (auto core_it = node_it->cores.begin(); core_it != node_it->cores.end(); )
250 {
251 if (core_it->threadIds.size() == 0)
252 {
253 core_it = node_it->cores.erase(core_it);
254 }
255 else
256 {
257 ++core_it;
258 }
259 }
260
261 // Erase empty numa nodes (second)
262 if (node_it->cores.size() == 0)
263 {
264 node_it = out_nodes.erase(node_it);
265 }
266 else
267 {
268 ++node_it;
269 }
270 }
271 }
272
273
274 void bindThread(SWR_CONTEXT* pContext, uint32_t threadId, uint32_t procGroupId = 0, bool bindProcGroup=false)
275 {
276 // Only bind threads when MAX_WORKER_THREADS isn't set.
277 if (pContext->threadInfo.SINGLE_THREADED || (pContext->threadInfo.MAX_WORKER_THREADS && bindProcGroup == false))
278 {
279 return;
280 }
281
282 #if defined(_WIN32)
283
284 GROUP_AFFINITY affinity = {};
285 affinity.Group = procGroupId;
286
287 #if !defined(_WIN64)
288 if (threadId >= 32)
289 {
290 // Hopefully we don't get here. Logic in CreateThreadPool should prevent this.
291 SWR_INVALID("Shouldn't get here");
292
293 // In a 32-bit process on Windows it is impossible to bind
294 // to logical processors 32-63 within a processor group.
295 // In this case set the mask to 0 and let the system assign
296 // the processor. Hopefully it will make smart choices.
297 affinity.Mask = 0;
298 }
299 else
300 #endif
301 {
302 // If MAX_WORKER_THREADS is set, only bind to the proc group,
303 // Not the individual HW thread.
304 if (!pContext->threadInfo.MAX_WORKER_THREADS)
305 {
306 affinity.Mask = KAFFINITY(1) << threadId;
307 }
308 }
309
310 SetThreadGroupAffinity(GetCurrentThread(), &affinity, nullptr);
311
312 #else
313
314 cpu_set_t cpuset;
315 pthread_t thread = pthread_self();
316 CPU_ZERO(&cpuset);
317 CPU_SET(threadId, &cpuset);
318
319 pthread_setaffinity_np(thread, sizeof(cpu_set_t), &cpuset);
320
321 #endif
322 }
323
324 INLINE
325 uint32_t GetEnqueuedDraw(SWR_CONTEXT *pContext)
326 {
327 return pContext->dcRing.GetHead();
328 }
329
330 INLINE
331 DRAW_CONTEXT *GetDC(SWR_CONTEXT *pContext, uint32_t drawId)
332 {
333 return &pContext->dcRing[(drawId-1) % KNOB_MAX_DRAWS_IN_FLIGHT];
334 }
335
336 INLINE
337 bool IDComparesLess(uint32_t a, uint32_t b)
338 {
339 // Use signed delta to ensure that wrap-around to 0 is correctly handled.
340 int32_t delta = int32_t(a - b);
341 return (delta < 0);
342 }
343
344 // returns true if dependency not met
345 INLINE
346 bool CheckDependency(SWR_CONTEXT *pContext, DRAW_CONTEXT *pDC, uint32_t lastRetiredDraw)
347 {
348 return pDC->dependent && IDComparesLess(lastRetiredDraw, pDC->drawId - 1);
349 }
350
351 bool CheckDependencyFE(SWR_CONTEXT *pContext, DRAW_CONTEXT *pDC, uint32_t lastRetiredDraw)
352 {
353 return pDC->dependentFE && IDComparesLess(lastRetiredDraw, pDC->drawId - 1);
354 }
355
356 //////////////////////////////////////////////////////////////////////////
357 /// @brief Update client stats.
358 INLINE void UpdateClientStats(SWR_CONTEXT* pContext, uint32_t workerId, DRAW_CONTEXT* pDC)
359 {
360 if ((pContext->pfnUpdateStats == nullptr) || (GetApiState(pDC).enableStatsBE == false))
361 {
362 return;
363 }
364
365 DRAW_DYNAMIC_STATE& dynState = pDC->dynState;
366 SWR_STATS stats{ 0 };
367
368 // Sum up stats across all workers before sending to client.
369 for (uint32_t i = 0; i < pContext->NumWorkerThreads; ++i)
370 {
371 stats.DepthPassCount += dynState.pStats[i].DepthPassCount;
372
373 stats.PsInvocations += dynState.pStats[i].PsInvocations;
374 stats.CsInvocations += dynState.pStats[i].CsInvocations;
375 }
376
377
378 pContext->pfnUpdateStats(GetPrivateState(pDC), &stats);
379 }
380
381 INLINE void ExecuteCallbacks(SWR_CONTEXT* pContext, uint32_t workerId, DRAW_CONTEXT* pDC)
382 {
383 UpdateClientStats(pContext, workerId, pDC);
384
385 if (pDC->retireCallback.pfnCallbackFunc)
386 {
387 pDC->retireCallback.pfnCallbackFunc(pDC->retireCallback.userData,
388 pDC->retireCallback.userData2,
389 pDC->retireCallback.userData3);
390 }
391 }
392
393 // inlined-only version
394 INLINE int32_t CompleteDrawContextInl(SWR_CONTEXT* pContext, uint32_t workerId, DRAW_CONTEXT* pDC)
395 {
396 int32_t result = InterlockedDecrement((volatile LONG*)&pDC->threadsDone);
397 SWR_ASSERT(result >= 0);
398
399 AR_FLUSH(pDC->drawId);
400
401 if (result == 0)
402 {
403 ExecuteCallbacks(pContext, workerId, pDC);
404
405 // Cleanup memory allocations
406 pDC->pArena->Reset(true);
407 if (!pDC->isCompute)
408 {
409 pDC->pTileMgr->initialize();
410 }
411 if (pDC->cleanupState)
412 {
413 pDC->pState->pArena->Reset(true);
414 }
415
416 _ReadWriteBarrier();
417
418 pContext->dcRing.Dequeue(); // Remove from tail
419 }
420
421 return result;
422 }
423
424 // available to other translation modules
425 int32_t CompleteDrawContext(SWR_CONTEXT* pContext, DRAW_CONTEXT* pDC)
426 {
427 return CompleteDrawContextInl(pContext, 0, pDC);
428 }
429
430 INLINE bool FindFirstIncompleteDraw(SWR_CONTEXT* pContext, uint32_t workerId, uint32_t& curDrawBE, uint32_t& drawEnqueued)
431 {
432 // increment our current draw id to the first incomplete draw
433 drawEnqueued = GetEnqueuedDraw(pContext);
434 while (IDComparesLess(curDrawBE, drawEnqueued))
435 {
436 DRAW_CONTEXT *pDC = &pContext->dcRing[curDrawBE % KNOB_MAX_DRAWS_IN_FLIGHT];
437
438 // If its not compute and FE is not done then break out of loop.
439 if (!pDC->doneFE && !pDC->isCompute) break;
440
441 bool isWorkComplete = pDC->isCompute ?
442 pDC->pDispatch->isWorkComplete() :
443 pDC->pTileMgr->isWorkComplete();
444
445 if (isWorkComplete)
446 {
447 curDrawBE++;
448 CompleteDrawContextInl(pContext, workerId, pDC);
449 }
450 else
451 {
452 break;
453 }
454 }
455
456 // If there are no more incomplete draws then return false.
457 return IDComparesLess(curDrawBE, drawEnqueued);
458 }
459
460 //////////////////////////////////////////////////////////////////////////
461 /// @brief If there is any BE work then go work on it.
462 /// @param pContext - pointer to SWR context.
463 /// @param workerId - The unique worker ID that is assigned to this thread.
464 /// @param curDrawBE - This tracks the draw contexts that this thread has processed. Each worker thread
465 /// has its own curDrawBE counter and this ensures that each worker processes all the
466 /// draws in order.
467 /// @param lockedTiles - This is the set of tiles locked by other threads. Each thread maintains its
468 /// own set and each time it fails to lock a macrotile, because its already locked,
469 /// then it will add that tile to the lockedTiles set. As a worker begins to work
470 /// on future draws the lockedTiles ensure that it doesn't work on tiles that may
471 /// still have work pending in a previous draw. Additionally, the lockedTiles is
472 /// hueristic that can steer a worker back to the same macrotile that it had been
473 /// working on in a previous draw.
474 /// @returns true if worker thread should shutdown
475 bool WorkOnFifoBE(
476 SWR_CONTEXT *pContext,
477 uint32_t workerId,
478 uint32_t &curDrawBE,
479 TileSet& lockedTiles,
480 uint32_t numaNode,
481 uint32_t numaMask)
482 {
483 bool bShutdown = false;
484
485 // Find the first incomplete draw that has pending work. If no such draw is found then
486 // return. FindFirstIncompleteDraw is responsible for incrementing the curDrawBE.
487 uint32_t drawEnqueued = 0;
488 if (FindFirstIncompleteDraw(pContext, workerId, curDrawBE, drawEnqueued) == false)
489 {
490 return false;
491 }
492
493 uint32_t lastRetiredDraw = pContext->dcRing[curDrawBE % KNOB_MAX_DRAWS_IN_FLIGHT].drawId - 1;
494
495 // Reset our history for locked tiles. We'll have to re-learn which tiles are locked.
496 lockedTiles.clear();
497
498 // Try to work on each draw in order of the available draws in flight.
499 // 1. If we're on curDrawBE, we can work on any macrotile that is available.
500 // 2. If we're trying to work on draws after curDrawBE, we are restricted to
501 // working on those macrotiles that are known to be complete in the prior draw to
502 // maintain order. The locked tiles provides the history to ensures this.
503 for (uint32_t i = curDrawBE; IDComparesLess(i, drawEnqueued); ++i)
504 {
505 DRAW_CONTEXT *pDC = &pContext->dcRing[i % KNOB_MAX_DRAWS_IN_FLIGHT];
506
507 if (pDC->isCompute) return false; // We don't look at compute work.
508
509 // First wait for FE to be finished with this draw. This keeps threading model simple
510 // but if there are lots of bubbles between draws then serializing FE and BE may
511 // need to be revisited.
512 if (!pDC->doneFE) return false;
513
514 // If this draw is dependent on a previous draw then we need to bail.
515 if (CheckDependency(pContext, pDC, lastRetiredDraw))
516 {
517 return false;
518 }
519
520 // Grab the list of all dirty macrotiles. A tile is dirty if it has work queued to it.
521 auto &macroTiles = pDC->pTileMgr->getDirtyTiles();
522
523 for (auto tile : macroTiles)
524 {
525 uint32_t tileID = tile->mId;
526
527 // Only work on tiles for this numa node
528 uint32_t x, y;
529 pDC->pTileMgr->getTileIndices(tileID, x, y);
530 if (((x ^ y) & numaMask) != numaNode)
531 {
532 continue;
533 }
534
535 if (!tile->getNumQueued())
536 {
537 continue;
538 }
539
540 // can only work on this draw if it's not in use by other threads
541 if (lockedTiles.find(tileID) != lockedTiles.end())
542 {
543 continue;
544 }
545
546 if (tile->tryLock())
547 {
548 BE_WORK *pWork;
549
550 AR_BEGIN(WorkerFoundWork, pDC->drawId);
551
552 uint32_t numWorkItems = tile->getNumQueued();
553 SWR_ASSERT(numWorkItems);
554
555 pWork = tile->peek();
556 SWR_ASSERT(pWork);
557 if (pWork->type == DRAW)
558 {
559 pContext->pHotTileMgr->InitializeHotTiles(pContext, pDC, workerId, tileID);
560 }
561 else if (pWork->type == SHUTDOWN)
562 {
563 bShutdown = true;
564 }
565
566 while ((pWork = tile->peek()) != nullptr)
567 {
568 pWork->pfnWork(pDC, workerId, tileID, &pWork->desc);
569 tile->dequeue();
570 }
571 AR_END(WorkerFoundWork, numWorkItems);
572
573 _ReadWriteBarrier();
574
575 pDC->pTileMgr->markTileComplete(tileID);
576
577 // Optimization: If the draw is complete and we're the last one to have worked on it then
578 // we can reset the locked list as we know that all previous draws before the next are guaranteed to be complete.
579 if ((curDrawBE == i) && (bShutdown || pDC->pTileMgr->isWorkComplete()))
580 {
581 // We can increment the current BE and safely move to next draw since we know this draw is complete.
582 curDrawBE++;
583 CompleteDrawContextInl(pContext, workerId, pDC);
584
585 lastRetiredDraw++;
586
587 lockedTiles.clear();
588 break;
589 }
590
591 if (bShutdown)
592 {
593 break;
594 }
595 }
596 else
597 {
598 // This tile is already locked. So let's add it to our locked tiles set. This way we don't try locking this one again.
599 lockedTiles.insert(tileID);
600 }
601 }
602 }
603
604 return bShutdown;
605 }
606
607 //////////////////////////////////////////////////////////////////////////
608 /// @brief Called when FE work is complete for this DC.
609 INLINE void CompleteDrawFE(SWR_CONTEXT* pContext, uint32_t workerId, DRAW_CONTEXT* pDC)
610 {
611 if (pContext->pfnUpdateStatsFE && GetApiState(pDC).enableStatsFE)
612 {
613 SWR_STATS_FE& stats = pDC->dynState.statsFE;
614
615 AR_EVENT(FrontendStatsEvent(pDC->drawId,
616 stats.IaVertices, stats.IaPrimitives, stats.VsInvocations, stats.HsInvocations,
617 stats.DsInvocations, stats.GsInvocations, stats.GsPrimitives, stats.CInvocations, stats.CPrimitives,
618 stats.SoPrimStorageNeeded[0], stats.SoPrimStorageNeeded[1], stats.SoPrimStorageNeeded[2], stats.SoPrimStorageNeeded[3],
619 stats.SoNumPrimsWritten[0], stats.SoNumPrimsWritten[1], stats.SoNumPrimsWritten[2], stats.SoNumPrimsWritten[3]
620 ));
621 AR_EVENT(FrontendDrawEndEvent(pDC->drawId));
622
623 pContext->pfnUpdateStatsFE(GetPrivateState(pDC), &stats);
624 }
625
626 if (pContext->pfnUpdateSoWriteOffset)
627 {
628 for (uint32_t i = 0; i < MAX_SO_BUFFERS; ++i)
629 {
630 if ((pDC->dynState.SoWriteOffsetDirty[i]) &&
631 (pDC->pState->state.soBuffer[i].soWriteEnable))
632 {
633 pContext->pfnUpdateSoWriteOffset(GetPrivateState(pDC), i, pDC->dynState.SoWriteOffset[i]);
634 }
635 }
636 }
637
638 // Ensure all streaming writes are globally visible before marking this FE done
639 _mm_mfence();
640 pDC->doneFE = true;
641
642 InterlockedDecrement((volatile LONG*)&pContext->drawsOutstandingFE);
643 }
644
645 void WorkOnFifoFE(SWR_CONTEXT *pContext, uint32_t workerId, uint32_t &curDrawFE)
646 {
647 // Try to grab the next DC from the ring
648 uint32_t drawEnqueued = GetEnqueuedDraw(pContext);
649 while (IDComparesLess(curDrawFE, drawEnqueued))
650 {
651 uint32_t dcSlot = curDrawFE % KNOB_MAX_DRAWS_IN_FLIGHT;
652 DRAW_CONTEXT *pDC = &pContext->dcRing[dcSlot];
653 if (pDC->isCompute || pDC->doneFE)
654 {
655 CompleteDrawContextInl(pContext, workerId, pDC);
656 curDrawFE++;
657 }
658 else
659 {
660 break;
661 }
662 }
663
664 uint32_t lastRetiredFE = curDrawFE - 1;
665 uint32_t curDraw = curDrawFE;
666 while (IDComparesLess(curDraw, drawEnqueued))
667 {
668 uint32_t dcSlot = curDraw % KNOB_MAX_DRAWS_IN_FLIGHT;
669 DRAW_CONTEXT *pDC = &pContext->dcRing[dcSlot];
670
671 if (!pDC->isCompute && !pDC->FeLock)
672 {
673 if (CheckDependencyFE(pContext, pDC, lastRetiredFE))
674 {
675 return;
676 }
677
678 uint32_t initial = InterlockedCompareExchange((volatile uint32_t*)&pDC->FeLock, 1, 0);
679 if (initial == 0)
680 {
681 // successfully grabbed the DC, now run the FE
682 pDC->FeWork.pfnWork(pContext, pDC, workerId, &pDC->FeWork.desc);
683
684 CompleteDrawFE(pContext, workerId, pDC);
685 }
686 }
687 curDraw++;
688 }
689 }
690
691 //////////////////////////////////////////////////////////////////////////
692 /// @brief If there is any compute work then go work on it.
693 /// @param pContext - pointer to SWR context.
694 /// @param workerId - The unique worker ID that is assigned to this thread.
695 /// @param curDrawBE - This tracks the draw contexts that this thread has processed. Each worker thread
696 /// has its own curDrawBE counter and this ensures that each worker processes all the
697 /// draws in order.
698 void WorkOnCompute(
699 SWR_CONTEXT *pContext,
700 uint32_t workerId,
701 uint32_t& curDrawBE)
702 {
703 uint32_t drawEnqueued = 0;
704 if (FindFirstIncompleteDraw(pContext, workerId, curDrawBE, drawEnqueued) == false)
705 {
706 return;
707 }
708
709 uint32_t lastRetiredDraw = pContext->dcRing[curDrawBE % KNOB_MAX_DRAWS_IN_FLIGHT].drawId - 1;
710
711 for (uint64_t i = curDrawBE; IDComparesLess(i, drawEnqueued); ++i)
712 {
713 DRAW_CONTEXT *pDC = &pContext->dcRing[i % KNOB_MAX_DRAWS_IN_FLIGHT];
714 if (pDC->isCompute == false) return;
715
716 // check dependencies
717 if (CheckDependency(pContext, pDC, lastRetiredDraw))
718 {
719 return;
720 }
721
722 SWR_ASSERT(pDC->pDispatch != nullptr);
723 DispatchQueue& queue = *pDC->pDispatch;
724
725 // Is there any work remaining?
726 if (queue.getNumQueued() > 0)
727 {
728 void* pSpillFillBuffer = nullptr;
729 void* pScratchSpace = nullptr;
730 uint32_t threadGroupId = 0;
731 while (queue.getWork(threadGroupId))
732 {
733 queue.dispatch(pDC, workerId, threadGroupId, pSpillFillBuffer, pScratchSpace);
734 queue.finishedWork();
735 }
736
737 // Ensure all streaming writes are globally visible before moving onto the next draw
738 _mm_mfence();
739 }
740 }
741 }
742
743 template<bool IsFEThread, bool IsBEThread>
744 DWORD workerThreadMain(LPVOID pData)
745 {
746 THREAD_DATA *pThreadData = (THREAD_DATA*)pData;
747 SWR_CONTEXT *pContext = pThreadData->pContext;
748 uint32_t threadId = pThreadData->threadId;
749 uint32_t workerId = pThreadData->workerId;
750
751 bindThread(pContext, threadId, pThreadData->procGroupId, pThreadData->forceBindProcGroup);
752
753 {
754 char threadName[64];
755 sprintf_s(threadName,
756 #if defined(_WIN32)
757 "SWRWorker_%02d_NUMA%d_Core%02d_T%d",
758 #else
759 // linux pthread name limited to 16 chars (including \0)
760 "w%03d-n%d-c%03d-t%d",
761 #endif
762 workerId, pThreadData->numaId, pThreadData->coreId, pThreadData->htId);
763 SetCurrentThreadName(threadName);
764 }
765
766 RDTSC_INIT(threadId);
767
768 uint32_t numaNode = pThreadData->numaId;
769 uint32_t numaMask = pContext->threadPool.numaMask;
770
771 // flush denormals to 0
772 _mm_setcsr(_mm_getcsr() | _MM_FLUSH_ZERO_ON | _MM_DENORMALS_ZERO_ON);
773
774 // Track tiles locked by other threads. If we try to lock a macrotile and find its already
775 // locked then we'll add it to this list so that we don't try and lock it again.
776 TileSet lockedTiles;
777
778 // each worker has the ability to work on any of the queued draws as long as certain
779 // conditions are met. the data associated
780 // with a draw is guaranteed to be active as long as a worker hasn't signaled that he
781 // has moved on to the next draw when he determines there is no more work to do. The api
782 // thread will not increment the head of the dc ring until all workers have moved past the
783 // current head.
784 // the logic to determine what to work on is:
785 // 1- try to work on the FE any draw that is queued. For now there are no dependencies
786 // on the FE work, so any worker can grab any FE and process in parallel. Eventually
787 // we'll need dependency tracking to force serialization on FEs. The worker will try
788 // to pick an FE by atomically incrementing a counter in the swr context. he'll keep
789 // trying until he reaches the tail.
790 // 2- BE work must be done in strict order. we accomplish this today by pulling work off
791 // the oldest draw (ie the head) of the dcRing. the worker can determine if there is
792 // any work left by comparing the total # of binned work items and the total # of completed
793 // work items. If they are equal, then there is no more work to do for this draw, and
794 // the worker can safely increment its oldestDraw counter and move on to the next draw.
795 std::unique_lock<std::mutex> lock(pContext->WaitLock, std::defer_lock);
796
797 auto threadHasWork = [&](uint32_t curDraw) { return curDraw != pContext->dcRing.GetHead(); };
798
799 uint32_t curDrawBE = 0;
800 uint32_t curDrawFE = 0;
801
802 bool bShutdown = false;
803
804 while (true)
805 {
806 if (bShutdown && !threadHasWork(curDrawBE))
807 {
808 break;
809 }
810
811 uint32_t loop = 0;
812 while (loop++ < KNOB_WORKER_SPIN_LOOP_COUNT && !threadHasWork(curDrawBE))
813 {
814 _mm_pause();
815 }
816
817 if (!threadHasWork(curDrawBE))
818 {
819 lock.lock();
820
821 // check for thread idle condition again under lock
822 if (threadHasWork(curDrawBE))
823 {
824 lock.unlock();
825 continue;
826 }
827
828 pContext->FifosNotEmpty.wait(lock);
829 lock.unlock();
830 }
831
832 if (IsBEThread)
833 {
834 AR_BEGIN(WorkerWorkOnFifoBE, 0);
835 bShutdown |= WorkOnFifoBE(pContext, workerId, curDrawBE, lockedTiles, numaNode, numaMask);
836 AR_END(WorkerWorkOnFifoBE, 0);
837
838 WorkOnCompute(pContext, workerId, curDrawBE);
839 }
840
841 if (IsFEThread)
842 {
843 WorkOnFifoFE(pContext, workerId, curDrawFE);
844
845 if (!IsBEThread)
846 {
847 curDrawBE = curDrawFE;
848 }
849 }
850 }
851
852 return 0;
853 }
854 template<> DWORD workerThreadMain<false, false>(LPVOID) = delete;
855
856 template <bool IsFEThread, bool IsBEThread>
857 DWORD workerThreadInit(LPVOID pData)
858 {
859 #if defined(_WIN32)
860 __try
861 #endif // _WIN32
862 {
863 return workerThreadMain<IsFEThread, IsBEThread>(pData);
864 }
865
866 #if defined(_WIN32)
867 __except(EXCEPTION_CONTINUE_SEARCH)
868 {
869 }
870
871 #endif // _WIN32
872
873 return 1;
874 }
875 template<> DWORD workerThreadInit<false, false>(LPVOID pData) = delete;
876
877 //////////////////////////////////////////////////////////////////////////
878 /// @brief Creates thread pool info but doesn't launch threads.
879 /// @param pContext - pointer to context
880 /// @param pPool - pointer to thread pool object.
881 void CreateThreadPool(SWR_CONTEXT* pContext, THREAD_POOL* pPool)
882 {
883 bindThread(pContext, 0);
884
885 CPUNumaNodes nodes;
886 uint32_t numThreadsPerProcGroup = 0;
887 CalculateProcessorTopology(nodes, numThreadsPerProcGroup);
888
889 uint32_t numHWNodes = (uint32_t)nodes.size();
890 uint32_t numHWCoresPerNode = (uint32_t)nodes[0].cores.size();
891 uint32_t numHWHyperThreads = (uint32_t)nodes[0].cores[0].threadIds.size();
892
893 // Calculate num HW threads. Due to asymmetric topologies, this is not
894 // a trivial multiplication.
895 uint32_t numHWThreads = 0;
896 for (auto& node : nodes)
897 {
898 for (auto& core : node.cores)
899 {
900 numHWThreads += (uint32_t)core.threadIds.size();
901 }
902 }
903
904 uint32_t numNodes = numHWNodes;
905 uint32_t numCoresPerNode = numHWCoresPerNode;
906 uint32_t numHyperThreads = numHWHyperThreads;
907
908 if (pContext->threadInfo.MAX_NUMA_NODES)
909 {
910 numNodes = std::min(numNodes, pContext->threadInfo.MAX_NUMA_NODES);
911 }
912
913 if (pContext->threadInfo.MAX_CORES_PER_NUMA_NODE)
914 {
915 numCoresPerNode = std::min(numCoresPerNode, pContext->threadInfo.MAX_CORES_PER_NUMA_NODE);
916 }
917
918 if (pContext->threadInfo.MAX_THREADS_PER_CORE)
919 {
920 numHyperThreads = std::min(numHyperThreads, pContext->threadInfo.MAX_THREADS_PER_CORE);
921 }
922
923 #if defined(_WIN32) && !defined(_WIN64)
924 if (!pContext->threadInfo.MAX_WORKER_THREADS)
925 {
926 // Limit 32-bit windows to bindable HW threads only
927 if ((numCoresPerNode * numHWHyperThreads) > 32)
928 {
929 numCoresPerNode = 32 / numHWHyperThreads;
930 }
931 }
932 #endif
933
934 // Calculate numThreads
935 uint32_t numThreads = numNodes * numCoresPerNode * numHyperThreads;
936 numThreads = std::min(numThreads, numHWThreads);
937
938 if (pContext->threadInfo.MAX_WORKER_THREADS)
939 {
940 uint32_t maxHWThreads = numHWNodes * numHWCoresPerNode * numHWHyperThreads;
941 numThreads = std::min(pContext->threadInfo.MAX_WORKER_THREADS, maxHWThreads);
942 }
943
944 uint32_t numAPIReservedThreads = 1;
945
946
947 if (numThreads == 1)
948 {
949 // If only 1 worker threads, try to move it to an available
950 // HW thread. If that fails, use the API thread.
951 if (numCoresPerNode < numHWCoresPerNode)
952 {
953 numCoresPerNode++;
954 }
955 else if (numHyperThreads < numHWHyperThreads)
956 {
957 numHyperThreads++;
958 }
959 else if (numNodes < numHWNodes)
960 {
961 numNodes++;
962 }
963 else
964 {
965 pContext->threadInfo.SINGLE_THREADED = true;
966 }
967 }
968 else
969 {
970 // Save HW threads for the API if we can
971 if (numThreads > numAPIReservedThreads)
972 {
973 numThreads -= numAPIReservedThreads;
974 }
975 else
976 {
977 numAPIReservedThreads = 0;
978 }
979 }
980
981 if (pContext->threadInfo.SINGLE_THREADED)
982 {
983 numThreads = 1;
984 }
985
986 // Initialize DRAW_CONTEXT's per-thread stats
987 for (uint32_t dc = 0; dc < KNOB_MAX_DRAWS_IN_FLIGHT; ++dc)
988 {
989 pContext->dcRing[dc].dynState.pStats = new SWR_STATS[numThreads];
990 memset(pContext->dcRing[dc].dynState.pStats, 0, sizeof(SWR_STATS) * numThreads);
991 }
992
993 if (pContext->threadInfo.SINGLE_THREADED)
994 {
995 pContext->NumWorkerThreads = 1;
996 pContext->NumFEThreads = 1;
997 pContext->NumBEThreads = 1;
998 pPool->numThreads = 0;
999
1000 return;
1001 }
1002
1003 pPool->numThreads = numThreads;
1004 pContext->NumWorkerThreads = pPool->numThreads;
1005
1006 pPool->pThreadData = (THREAD_DATA *)malloc(pPool->numThreads * sizeof(THREAD_DATA));
1007 pPool->numaMask = 0;
1008
1009 pPool->pThreads = new THREAD_PTR[pPool->numThreads];
1010
1011 if (pContext->threadInfo.MAX_WORKER_THREADS)
1012 {
1013 bool bForceBindProcGroup = (numThreads > numThreadsPerProcGroup);
1014 uint32_t numProcGroups = (numThreads + numThreadsPerProcGroup - 1) / numThreadsPerProcGroup;
1015 // When MAX_WORKER_THREADS is set we don't bother to bind to specific HW threads
1016 // But Windows will still require binding to specific process groups
1017 for (uint32_t workerId = 0; workerId < numThreads; ++workerId)
1018 {
1019 pPool->pThreadData[workerId].workerId = workerId;
1020 pPool->pThreadData[workerId].procGroupId = workerId % numProcGroups;
1021 pPool->pThreadData[workerId].threadId = 0;
1022 pPool->pThreadData[workerId].numaId = 0;
1023 pPool->pThreadData[workerId].coreId = 0;
1024 pPool->pThreadData[workerId].htId = 0;
1025 pPool->pThreadData[workerId].pContext = pContext;
1026 pPool->pThreadData[workerId].forceBindProcGroup = bForceBindProcGroup;
1027
1028 pContext->NumBEThreads++;
1029 pContext->NumFEThreads++;
1030 }
1031 }
1032 else
1033 {
1034 pPool->numaMask = numNodes - 1; // Only works for 2**n numa nodes (1, 2, 4, etc.)
1035
1036 uint32_t workerId = 0;
1037 for (uint32_t n = 0; n < numNodes; ++n)
1038 {
1039 auto& node = nodes[n];
1040 uint32_t numCores = numCoresPerNode;
1041 for (uint32_t c = 0; c < numCores; ++c)
1042 {
1043 if (c >= node.cores.size())
1044 {
1045 break;
1046 }
1047
1048 auto& core = node.cores[c];
1049 for (uint32_t t = 0; t < numHyperThreads; ++t)
1050 {
1051 if (t >= core.threadIds.size())
1052 {
1053 break;
1054 }
1055
1056 if (numAPIReservedThreads)
1057 {
1058 --numAPIReservedThreads;
1059 continue;
1060 }
1061
1062 SWR_ASSERT(workerId < numThreads);
1063
1064 pPool->pThreadData[workerId].workerId = workerId;
1065 pPool->pThreadData[workerId].procGroupId = core.procGroup;
1066 pPool->pThreadData[workerId].threadId = core.threadIds[t];
1067 pPool->pThreadData[workerId].numaId = node.numaId;
1068 pPool->pThreadData[workerId].coreId = c;
1069 pPool->pThreadData[workerId].htId = t;
1070 pPool->pThreadData[workerId].pContext = pContext;
1071
1072 pContext->NumBEThreads++;
1073 pContext->NumFEThreads++;
1074
1075 ++workerId;
1076 }
1077 }
1078 }
1079 SWR_ASSERT(workerId == pContext->NumWorkerThreads);
1080 }
1081 }
1082
1083 //////////////////////////////////////////////////////////////////////////
1084 /// @brief Launches worker threads in thread pool.
1085 /// @param pContext - pointer to context
1086 /// @param pPool - pointer to thread pool object.
1087 void StartThreadPool(SWR_CONTEXT* pContext, THREAD_POOL* pPool)
1088 {
1089 if (pContext->threadInfo.SINGLE_THREADED)
1090 {
1091 return;
1092 }
1093
1094 for (uint32_t workerId = 0; workerId < pContext->NumWorkerThreads; ++workerId)
1095 {
1096 pPool->pThreads[workerId] = new std::thread(workerThreadInit<true, true>, &pPool->pThreadData[workerId]);
1097 }
1098 }
1099
1100 //////////////////////////////////////////////////////////////////////////
1101 /// @brief Destroys thread pool.
1102 /// @param pContext - pointer to context
1103 /// @param pPool - pointer to thread pool object.
1104 void DestroyThreadPool(SWR_CONTEXT *pContext, THREAD_POOL *pPool)
1105 {
1106 if (!pContext->threadInfo.SINGLE_THREADED)
1107 {
1108 // Wait for all threads to finish
1109 SwrWaitForIdle(pContext);
1110
1111 // Wait for threads to finish and destroy them
1112 for (uint32_t t = 0; t < pPool->numThreads; ++t)
1113 {
1114 // Detach from thread. Cannot join() due to possibility (in Windows) of code
1115 // in some DLLMain(THREAD_DETATCH case) blocking the thread until after this returns.
1116 pPool->pThreads[t]->detach();
1117 delete(pPool->pThreads[t]);
1118 }
1119
1120 delete [] pPool->pThreads;
1121
1122 // Clean up data used by threads
1123 free(pPool->pThreadData);
1124 }
1125 }