swr: Fix KNOB_MAX_WORKER_THREADS thread creation override.
[mesa.git] / src / gallium / drivers / swr / rasterizer / core / threads.cpp
1 /****************************************************************************
2 * Copyright (C) 2014-2016 Intel Corporation. All Rights Reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 ****************************************************************************/
23
24 #include <stdio.h>
25 #include <thread>
26 #include <algorithm>
27 #include <float.h>
28 #include <vector>
29 #include <utility>
30 #include <fstream>
31 #include <string>
32
33 #if defined(__linux__) || defined(__gnu_linux__) || defined(__APPLE__)
34 #include <pthread.h>
35 #include <sched.h>
36 #include <unistd.h>
37 #endif
38
39 #include "common/os.h"
40 #include "context.h"
41 #include "frontend.h"
42 #include "backend.h"
43 #include "rasterizer.h"
44 #include "rdtsc_core.h"
45 #include "tilemgr.h"
46
47
48
49
50 // ThreadId
51 struct Core
52 {
53 uint32_t procGroup = 0;
54 std::vector<uint32_t> threadIds;
55 };
56
57 struct NumaNode
58 {
59 uint32_t numaId;
60 std::vector<Core> cores;
61 };
62
63 typedef std::vector<NumaNode> CPUNumaNodes;
64
65 void CalculateProcessorTopology(CPUNumaNodes& out_nodes, uint32_t& out_numThreadsPerProcGroup)
66 {
67 out_nodes.clear();
68 out_numThreadsPerProcGroup = 0;
69
70 #if defined(_WIN32)
71
72 std::vector<KAFFINITY> threadMaskPerProcGroup;
73
74 static std::mutex m;
75 std::lock_guard<std::mutex> l(m);
76
77 DWORD bufSize = 0;
78
79 BOOL ret = GetLogicalProcessorInformationEx(RelationProcessorCore, nullptr, &bufSize);
80 SWR_ASSERT(ret == FALSE && GetLastError() == ERROR_INSUFFICIENT_BUFFER);
81
82 PSYSTEM_LOGICAL_PROCESSOR_INFORMATION_EX pBufferMem = (PSYSTEM_LOGICAL_PROCESSOR_INFORMATION_EX)malloc(bufSize);
83 SWR_ASSERT(pBufferMem);
84
85 ret = GetLogicalProcessorInformationEx(RelationProcessorCore, pBufferMem, &bufSize);
86 SWR_ASSERT(ret != FALSE, "Failed to get Processor Topology Information");
87
88 uint32_t count = bufSize / pBufferMem->Size;
89 PSYSTEM_LOGICAL_PROCESSOR_INFORMATION_EX pBuffer = pBufferMem;
90
91 for (uint32_t i = 0; i < count; ++i)
92 {
93 SWR_ASSERT(pBuffer->Relationship == RelationProcessorCore);
94 for (uint32_t g = 0; g < pBuffer->Processor.GroupCount; ++g)
95 {
96 auto& gmask = pBuffer->Processor.GroupMask[g];
97 uint32_t threadId = 0;
98 uint32_t procGroup = gmask.Group;
99
100 Core* pCore = nullptr;
101
102 uint32_t numThreads = (uint32_t)_mm_popcount_sizeT(gmask.Mask);
103
104 while (BitScanForwardSizeT((unsigned long*)&threadId, gmask.Mask))
105 {
106 // clear mask
107 KAFFINITY threadMask = KAFFINITY(1) << threadId;
108 gmask.Mask &= ~threadMask;
109
110 if (procGroup >= threadMaskPerProcGroup.size())
111 {
112 threadMaskPerProcGroup.resize(procGroup + 1);
113 }
114
115 if (threadMaskPerProcGroup[procGroup] & threadMask)
116 {
117 // Already seen this mask. This means that we are in 32-bit mode and
118 // have seen more than 32 HW threads for this procGroup
119 // Don't use it
120 #if defined(_WIN64)
121 SWR_INVALID("Shouldn't get here in 64-bit mode");
122 #endif
123 continue;
124 }
125
126 threadMaskPerProcGroup[procGroup] |= (KAFFINITY(1) << threadId);
127
128 // Find Numa Node
129 uint32_t numaId = 0;
130 PROCESSOR_NUMBER procNum = {};
131 procNum.Group = WORD(procGroup);
132 procNum.Number = UCHAR(threadId);
133
134 ret = GetNumaProcessorNodeEx(&procNum, (PUSHORT)&numaId);
135 SWR_ASSERT(ret);
136
137 // Store data
138 if (out_nodes.size() <= numaId)
139 {
140 out_nodes.resize(numaId + 1);
141 }
142 auto& numaNode = out_nodes[numaId];
143 numaNode.numaId = numaId;
144
145 uint32_t coreId = 0;
146
147 if (nullptr == pCore)
148 {
149 numaNode.cores.push_back(Core());
150 pCore = &numaNode.cores.back();
151 pCore->procGroup = procGroup;
152 }
153 pCore->threadIds.push_back(threadId);
154 if (procGroup == 0)
155 {
156 out_numThreadsPerProcGroup++;
157 }
158 }
159 }
160 pBuffer = PtrAdd(pBuffer, pBuffer->Size);
161 }
162
163 free(pBufferMem);
164
165
166 #elif defined(__linux__) || defined (__gnu_linux__)
167
168 // Parse /proc/cpuinfo to get full topology
169 std::ifstream input("/proc/cpuinfo");
170 std::string line;
171 char* c;
172 uint32_t procId = uint32_t(-1);
173 uint32_t coreId = uint32_t(-1);
174 uint32_t physId = uint32_t(-1);
175
176 while (std::getline(input, line))
177 {
178 if (line.find("processor") != std::string::npos)
179 {
180 auto data_start = line.find(": ") + 2;
181 procId = std::strtoul(&line.c_str()[data_start], &c, 10);
182 continue;
183 }
184 if (line.find("core id") != std::string::npos)
185 {
186 auto data_start = line.find(": ") + 2;
187 coreId = std::strtoul(&line.c_str()[data_start], &c, 10);
188 continue;
189 }
190 if (line.find("physical id") != std::string::npos)
191 {
192 auto data_start = line.find(": ") + 2;
193 physId = std::strtoul(&line.c_str()[data_start], &c, 10);
194 continue;
195 }
196 if (line.length() == 0)
197 {
198 if (physId + 1 > out_nodes.size())
199 out_nodes.resize(physId + 1);
200 auto& numaNode = out_nodes[physId];
201 numaNode.numaId = physId;
202
203 if (coreId + 1 > numaNode.cores.size())
204 numaNode.cores.resize(coreId + 1);
205 auto& core = numaNode.cores[coreId];
206 core.procGroup = coreId;
207 core.threadIds.push_back(procId);
208 }
209 }
210
211 out_numThreadsPerProcGroup = 0;
212 for (auto &node : out_nodes)
213 {
214 for (auto &core : node.cores)
215 {
216 out_numThreadsPerProcGroup += core.threadIds.size();
217 }
218 }
219
220 #elif defined(__APPLE__)
221
222 #else
223
224 #error Unsupported platform
225
226 #endif
227
228 // Prune empty cores and numa nodes
229 for (auto node_it = out_nodes.begin(); node_it != out_nodes.end(); )
230 {
231 // Erase empty cores (first)
232 for (auto core_it = node_it->cores.begin(); core_it != node_it->cores.end(); )
233 {
234 if (core_it->threadIds.size() == 0)
235 {
236 core_it = node_it->cores.erase(core_it);
237 }
238 else
239 {
240 ++core_it;
241 }
242 }
243
244 // Erase empty numa nodes (second)
245 if (node_it->cores.size() == 0)
246 {
247 node_it = out_nodes.erase(node_it);
248 }
249 else
250 {
251 ++node_it;
252 }
253 }
254 }
255
256
257 void bindThread(SWR_CONTEXT* pContext, uint32_t threadId, uint32_t procGroupId = 0, bool bindProcGroup=false)
258 {
259 // Only bind threads when MAX_WORKER_THREADS isn't set.
260 if (pContext->threadInfo.SINGLE_THREADED || (pContext->threadInfo.MAX_WORKER_THREADS && bindProcGroup == false))
261 {
262 return;
263 }
264
265 #if defined(_WIN32)
266
267 GROUP_AFFINITY affinity = {};
268 affinity.Group = procGroupId;
269
270 #if !defined(_WIN64)
271 if (threadId >= 32)
272 {
273 // Hopefully we don't get here. Logic in CreateThreadPool should prevent this.
274 SWR_INVALID("Shouldn't get here");
275
276 // In a 32-bit process on Windows it is impossible to bind
277 // to logical processors 32-63 within a processor group.
278 // In this case set the mask to 0 and let the system assign
279 // the processor. Hopefully it will make smart choices.
280 affinity.Mask = 0;
281 }
282 else
283 #endif
284 {
285 // If MAX_WORKER_THREADS is set, only bind to the proc group,
286 // Not the individual HW thread.
287 if (!pContext->threadInfo.MAX_WORKER_THREADS)
288 {
289 affinity.Mask = KAFFINITY(1) << threadId;
290 }
291 }
292
293 SetThreadGroupAffinity(GetCurrentThread(), &affinity, nullptr);
294
295 #elif defined(__linux__) || defined(__gnu_linux__)
296
297 cpu_set_t cpuset;
298 pthread_t thread = pthread_self();
299 CPU_ZERO(&cpuset);
300 CPU_SET(threadId, &cpuset);
301
302 int err = pthread_setaffinity_np(thread, sizeof(cpu_set_t), &cpuset);
303 if (err != 0)
304 {
305 fprintf(stderr, "pthread_setaffinity_np failure for tid %u: %s\n", threadId, strerror(err));
306 }
307
308 #endif
309 }
310
311 INLINE
312 uint32_t GetEnqueuedDraw(SWR_CONTEXT *pContext)
313 {
314 return pContext->dcRing.GetHead();
315 }
316
317 INLINE
318 DRAW_CONTEXT *GetDC(SWR_CONTEXT *pContext, uint32_t drawId)
319 {
320 return &pContext->dcRing[(drawId-1) % pContext->MAX_DRAWS_IN_FLIGHT];
321 }
322
323 INLINE
324 bool IDComparesLess(uint32_t a, uint32_t b)
325 {
326 // Use signed delta to ensure that wrap-around to 0 is correctly handled.
327 int32_t delta = int32_t(a - b);
328 return (delta < 0);
329 }
330
331 // returns true if dependency not met
332 INLINE
333 bool CheckDependency(SWR_CONTEXT *pContext, DRAW_CONTEXT *pDC, uint32_t lastRetiredDraw)
334 {
335 return pDC->dependent && IDComparesLess(lastRetiredDraw, pDC->drawId - 1);
336 }
337
338 bool CheckDependencyFE(SWR_CONTEXT *pContext, DRAW_CONTEXT *pDC, uint32_t lastRetiredDraw)
339 {
340 return pDC->dependentFE && IDComparesLess(lastRetiredDraw, pDC->drawId - 1);
341 }
342
343 //////////////////////////////////////////////////////////////////////////
344 /// @brief Update client stats.
345 INLINE void UpdateClientStats(SWR_CONTEXT* pContext, uint32_t workerId, DRAW_CONTEXT* pDC)
346 {
347 if ((pContext->pfnUpdateStats == nullptr) || (GetApiState(pDC).enableStatsBE == false))
348 {
349 return;
350 }
351
352 DRAW_DYNAMIC_STATE& dynState = pDC->dynState;
353 OSALIGNLINE(SWR_STATS) stats{ 0 };
354
355 // Sum up stats across all workers before sending to client.
356 for (uint32_t i = 0; i < pContext->NumWorkerThreads; ++i)
357 {
358 stats.DepthPassCount += dynState.pStats[i].DepthPassCount;
359
360 stats.PsInvocations += dynState.pStats[i].PsInvocations;
361 stats.CsInvocations += dynState.pStats[i].CsInvocations;
362 }
363
364
365 pContext->pfnUpdateStats(GetPrivateState(pDC), &stats);
366 }
367
368 INLINE void ExecuteCallbacks(SWR_CONTEXT* pContext, uint32_t workerId, DRAW_CONTEXT* pDC)
369 {
370 UpdateClientStats(pContext, workerId, pDC);
371
372 if (pDC->retireCallback.pfnCallbackFunc)
373 {
374 pDC->retireCallback.pfnCallbackFunc(pDC->retireCallback.userData,
375 pDC->retireCallback.userData2,
376 pDC->retireCallback.userData3);
377 }
378 }
379
380 // inlined-only version
381 INLINE int32_t CompleteDrawContextInl(SWR_CONTEXT* pContext, uint32_t workerId, DRAW_CONTEXT* pDC)
382 {
383 int32_t result = static_cast<int32_t>(InterlockedDecrement(&pDC->threadsDone));
384 SWR_ASSERT(result >= 0);
385
386 AR_FLUSH(pDC->drawId);
387
388 if (result == 0)
389 {
390 ExecuteCallbacks(pContext, workerId, pDC);
391
392 // Cleanup memory allocations
393 pDC->pArena->Reset(true);
394 if (!pDC->isCompute)
395 {
396 pDC->pTileMgr->initialize();
397 }
398 if (pDC->cleanupState)
399 {
400 pDC->pState->pArena->Reset(true);
401 }
402
403 _ReadWriteBarrier();
404
405 pContext->dcRing.Dequeue(); // Remove from tail
406 }
407
408 return result;
409 }
410
411 // available to other translation modules
412 int32_t CompleteDrawContext(SWR_CONTEXT* pContext, DRAW_CONTEXT* pDC)
413 {
414 return CompleteDrawContextInl(pContext, 0, pDC);
415 }
416
417 INLINE bool FindFirstIncompleteDraw(SWR_CONTEXT* pContext, uint32_t workerId, uint32_t& curDrawBE, uint32_t& drawEnqueued)
418 {
419 // increment our current draw id to the first incomplete draw
420 drawEnqueued = GetEnqueuedDraw(pContext);
421 while (IDComparesLess(curDrawBE, drawEnqueued))
422 {
423 DRAW_CONTEXT *pDC = &pContext->dcRing[curDrawBE % pContext->MAX_DRAWS_IN_FLIGHT];
424
425 // If its not compute and FE is not done then break out of loop.
426 if (!pDC->doneFE && !pDC->isCompute) break;
427
428 bool isWorkComplete = pDC->isCompute ?
429 pDC->pDispatch->isWorkComplete() :
430 pDC->pTileMgr->isWorkComplete();
431
432 if (isWorkComplete)
433 {
434 curDrawBE++;
435 CompleteDrawContextInl(pContext, workerId, pDC);
436 }
437 else
438 {
439 break;
440 }
441 }
442
443 // If there are no more incomplete draws then return false.
444 return IDComparesLess(curDrawBE, drawEnqueued);
445 }
446
447 //////////////////////////////////////////////////////////////////////////
448 /// @brief If there is any BE work then go work on it.
449 /// @param pContext - pointer to SWR context.
450 /// @param workerId - The unique worker ID that is assigned to this thread.
451 /// @param curDrawBE - This tracks the draw contexts that this thread has processed. Each worker thread
452 /// has its own curDrawBE counter and this ensures that each worker processes all the
453 /// draws in order.
454 /// @param lockedTiles - This is the set of tiles locked by other threads. Each thread maintains its
455 /// own set and each time it fails to lock a macrotile, because its already locked,
456 /// then it will add that tile to the lockedTiles set. As a worker begins to work
457 /// on future draws the lockedTiles ensure that it doesn't work on tiles that may
458 /// still have work pending in a previous draw. Additionally, the lockedTiles is
459 /// hueristic that can steer a worker back to the same macrotile that it had been
460 /// working on in a previous draw.
461 /// @returns true if worker thread should shutdown
462 bool WorkOnFifoBE(
463 SWR_CONTEXT *pContext,
464 uint32_t workerId,
465 uint32_t &curDrawBE,
466 TileSet& lockedTiles,
467 uint32_t numaNode,
468 uint32_t numaMask)
469 {
470 bool bShutdown = false;
471
472 // Find the first incomplete draw that has pending work. If no such draw is found then
473 // return. FindFirstIncompleteDraw is responsible for incrementing the curDrawBE.
474 uint32_t drawEnqueued = 0;
475 if (FindFirstIncompleteDraw(pContext, workerId, curDrawBE, drawEnqueued) == false)
476 {
477 return false;
478 }
479
480 uint32_t lastRetiredDraw = pContext->dcRing[curDrawBE % pContext->MAX_DRAWS_IN_FLIGHT].drawId - 1;
481
482 // Reset our history for locked tiles. We'll have to re-learn which tiles are locked.
483 lockedTiles.clear();
484
485 // Try to work on each draw in order of the available draws in flight.
486 // 1. If we're on curDrawBE, we can work on any macrotile that is available.
487 // 2. If we're trying to work on draws after curDrawBE, we are restricted to
488 // working on those macrotiles that are known to be complete in the prior draw to
489 // maintain order. The locked tiles provides the history to ensures this.
490 for (uint32_t i = curDrawBE; IDComparesLess(i, drawEnqueued); ++i)
491 {
492 DRAW_CONTEXT *pDC = &pContext->dcRing[i % pContext->MAX_DRAWS_IN_FLIGHT];
493
494 if (pDC->isCompute) return false; // We don't look at compute work.
495
496 // First wait for FE to be finished with this draw. This keeps threading model simple
497 // but if there are lots of bubbles between draws then serializing FE and BE may
498 // need to be revisited.
499 if (!pDC->doneFE) return false;
500
501 // If this draw is dependent on a previous draw then we need to bail.
502 if (CheckDependency(pContext, pDC, lastRetiredDraw))
503 {
504 return false;
505 }
506
507 // Grab the list of all dirty macrotiles. A tile is dirty if it has work queued to it.
508 auto &macroTiles = pDC->pTileMgr->getDirtyTiles();
509
510 for (auto tile : macroTiles)
511 {
512 uint32_t tileID = tile->mId;
513
514 // Only work on tiles for this numa node
515 uint32_t x, y;
516 pDC->pTileMgr->getTileIndices(tileID, x, y);
517 if (((x ^ y) & numaMask) != numaNode)
518 {
519 continue;
520 }
521
522 if (!tile->getNumQueued())
523 {
524 continue;
525 }
526
527 // can only work on this draw if it's not in use by other threads
528 if (lockedTiles.find(tileID) != lockedTiles.end())
529 {
530 continue;
531 }
532
533 if (tile->tryLock())
534 {
535 BE_WORK *pWork;
536
537 AR_BEGIN(WorkerFoundWork, pDC->drawId);
538
539 uint32_t numWorkItems = tile->getNumQueued();
540 SWR_ASSERT(numWorkItems);
541
542 pWork = tile->peek();
543 SWR_ASSERT(pWork);
544 if (pWork->type == DRAW)
545 {
546 pContext->pHotTileMgr->InitializeHotTiles(pContext, pDC, workerId, tileID);
547 }
548 else if (pWork->type == SHUTDOWN)
549 {
550 bShutdown = true;
551 }
552
553 while ((pWork = tile->peek()) != nullptr)
554 {
555 pWork->pfnWork(pDC, workerId, tileID, &pWork->desc);
556 tile->dequeue();
557 }
558 AR_END(WorkerFoundWork, numWorkItems);
559
560 _ReadWriteBarrier();
561
562 pDC->pTileMgr->markTileComplete(tileID);
563
564 // Optimization: If the draw is complete and we're the last one to have worked on it then
565 // we can reset the locked list as we know that all previous draws before the next are guaranteed to be complete.
566 if ((curDrawBE == i) && (bShutdown || pDC->pTileMgr->isWorkComplete()))
567 {
568 // We can increment the current BE and safely move to next draw since we know this draw is complete.
569 curDrawBE++;
570 CompleteDrawContextInl(pContext, workerId, pDC);
571
572 lastRetiredDraw++;
573
574 lockedTiles.clear();
575 break;
576 }
577
578 if (bShutdown)
579 {
580 break;
581 }
582 }
583 else
584 {
585 // This tile is already locked. So let's add it to our locked tiles set. This way we don't try locking this one again.
586 lockedTiles.insert(tileID);
587 }
588 }
589 }
590
591 return bShutdown;
592 }
593
594 //////////////////////////////////////////////////////////////////////////
595 /// @brief Called when FE work is complete for this DC.
596 INLINE void CompleteDrawFE(SWR_CONTEXT* pContext, uint32_t workerId, DRAW_CONTEXT* pDC)
597 {
598 if (pContext->pfnUpdateStatsFE && GetApiState(pDC).enableStatsFE)
599 {
600 SWR_STATS_FE& stats = pDC->dynState.statsFE;
601
602 AR_EVENT(FrontendStatsEvent(pDC->drawId,
603 stats.IaVertices, stats.IaPrimitives, stats.VsInvocations, stats.HsInvocations,
604 stats.DsInvocations, stats.GsInvocations, stats.GsPrimitives, stats.CInvocations, stats.CPrimitives,
605 stats.SoPrimStorageNeeded[0], stats.SoPrimStorageNeeded[1], stats.SoPrimStorageNeeded[2], stats.SoPrimStorageNeeded[3],
606 stats.SoNumPrimsWritten[0], stats.SoNumPrimsWritten[1], stats.SoNumPrimsWritten[2], stats.SoNumPrimsWritten[3]
607 ));
608 AR_EVENT(FrontendDrawEndEvent(pDC->drawId));
609
610 pContext->pfnUpdateStatsFE(GetPrivateState(pDC), &stats);
611 }
612
613 if (pContext->pfnUpdateSoWriteOffset)
614 {
615 for (uint32_t i = 0; i < MAX_SO_BUFFERS; ++i)
616 {
617 if ((pDC->dynState.SoWriteOffsetDirty[i]) &&
618 (pDC->pState->state.soBuffer[i].soWriteEnable))
619 {
620 pContext->pfnUpdateSoWriteOffset(GetPrivateState(pDC), i, pDC->dynState.SoWriteOffset[i]);
621 }
622 }
623 }
624
625 // Ensure all streaming writes are globally visible before marking this FE done
626 _mm_mfence();
627 pDC->doneFE = true;
628
629 InterlockedDecrement(&pContext->drawsOutstandingFE);
630 }
631
632 void WorkOnFifoFE(SWR_CONTEXT *pContext, uint32_t workerId, uint32_t &curDrawFE)
633 {
634 // Try to grab the next DC from the ring
635 uint32_t drawEnqueued = GetEnqueuedDraw(pContext);
636 while (IDComparesLess(curDrawFE, drawEnqueued))
637 {
638 uint32_t dcSlot = curDrawFE % pContext->MAX_DRAWS_IN_FLIGHT;
639 DRAW_CONTEXT *pDC = &pContext->dcRing[dcSlot];
640 if (pDC->isCompute || pDC->doneFE)
641 {
642 CompleteDrawContextInl(pContext, workerId, pDC);
643 curDrawFE++;
644 }
645 else
646 {
647 break;
648 }
649 }
650
651 uint32_t lastRetiredFE = curDrawFE - 1;
652 uint32_t curDraw = curDrawFE;
653 while (IDComparesLess(curDraw, drawEnqueued))
654 {
655 uint32_t dcSlot = curDraw % pContext->MAX_DRAWS_IN_FLIGHT;
656 DRAW_CONTEXT *pDC = &pContext->dcRing[dcSlot];
657
658 if (!pDC->isCompute && !pDC->FeLock)
659 {
660 if (CheckDependencyFE(pContext, pDC, lastRetiredFE))
661 {
662 return;
663 }
664
665 uint32_t initial = InterlockedCompareExchange((volatile uint32_t*)&pDC->FeLock, 1, 0);
666 if (initial == 0)
667 {
668 // successfully grabbed the DC, now run the FE
669 pDC->FeWork.pfnWork(pContext, pDC, workerId, &pDC->FeWork.desc);
670
671 CompleteDrawFE(pContext, workerId, pDC);
672 }
673 }
674 curDraw++;
675 }
676 }
677
678 //////////////////////////////////////////////////////////////////////////
679 /// @brief If there is any compute work then go work on it.
680 /// @param pContext - pointer to SWR context.
681 /// @param workerId - The unique worker ID that is assigned to this thread.
682 /// @param curDrawBE - This tracks the draw contexts that this thread has processed. Each worker thread
683 /// has its own curDrawBE counter and this ensures that each worker processes all the
684 /// draws in order.
685 void WorkOnCompute(
686 SWR_CONTEXT *pContext,
687 uint32_t workerId,
688 uint32_t& curDrawBE)
689 {
690 uint32_t drawEnqueued = 0;
691 if (FindFirstIncompleteDraw(pContext, workerId, curDrawBE, drawEnqueued) == false)
692 {
693 return;
694 }
695
696 uint32_t lastRetiredDraw = pContext->dcRing[curDrawBE % pContext->MAX_DRAWS_IN_FLIGHT].drawId - 1;
697
698 for (uint64_t i = curDrawBE; IDComparesLess(i, drawEnqueued); ++i)
699 {
700 DRAW_CONTEXT *pDC = &pContext->dcRing[i % pContext->MAX_DRAWS_IN_FLIGHT];
701 if (pDC->isCompute == false) return;
702
703 // check dependencies
704 if (CheckDependency(pContext, pDC, lastRetiredDraw))
705 {
706 return;
707 }
708
709 SWR_ASSERT(pDC->pDispatch != nullptr);
710 DispatchQueue& queue = *pDC->pDispatch;
711
712 // Is there any work remaining?
713 if (queue.getNumQueued() > 0)
714 {
715 void* pSpillFillBuffer = nullptr;
716 void* pScratchSpace = nullptr;
717 uint32_t threadGroupId = 0;
718 while (queue.getWork(threadGroupId))
719 {
720 queue.dispatch(pDC, workerId, threadGroupId, pSpillFillBuffer, pScratchSpace);
721 queue.finishedWork();
722 }
723
724 // Ensure all streaming writes are globally visible before moving onto the next draw
725 _mm_mfence();
726 }
727 }
728 }
729
730 template<bool IsFEThread, bool IsBEThread>
731 DWORD workerThreadMain(LPVOID pData)
732 {
733 THREAD_DATA *pThreadData = (THREAD_DATA*)pData;
734 SWR_CONTEXT *pContext = pThreadData->pContext;
735 uint32_t threadId = pThreadData->threadId;
736 uint32_t workerId = pThreadData->workerId;
737
738 bindThread(pContext, threadId, pThreadData->procGroupId, pThreadData->forceBindProcGroup);
739
740 {
741 char threadName[64];
742 sprintf_s(threadName,
743 #if defined(_WIN32)
744 "SWRWorker_%02d_NUMA%d_Core%02d_T%d",
745 #else
746 // linux pthread name limited to 16 chars (including \0)
747 "w%03d-n%d-c%03d-t%d",
748 #endif
749 workerId, pThreadData->numaId, pThreadData->coreId, pThreadData->htId);
750 SetCurrentThreadName(threadName);
751 }
752
753 RDTSC_INIT(threadId);
754
755 uint32_t numaNode = pThreadData->numaId;
756 uint32_t numaMask = pContext->threadPool.numaMask;
757
758 // flush denormals to 0
759 _mm_setcsr(_mm_getcsr() | _MM_FLUSH_ZERO_ON | _MM_DENORMALS_ZERO_ON);
760
761 // Track tiles locked by other threads. If we try to lock a macrotile and find its already
762 // locked then we'll add it to this list so that we don't try and lock it again.
763 TileSet lockedTiles;
764
765 // each worker has the ability to work on any of the queued draws as long as certain
766 // conditions are met. the data associated
767 // with a draw is guaranteed to be active as long as a worker hasn't signaled that he
768 // has moved on to the next draw when he determines there is no more work to do. The api
769 // thread will not increment the head of the dc ring until all workers have moved past the
770 // current head.
771 // the logic to determine what to work on is:
772 // 1- try to work on the FE any draw that is queued. For now there are no dependencies
773 // on the FE work, so any worker can grab any FE and process in parallel. Eventually
774 // we'll need dependency tracking to force serialization on FEs. The worker will try
775 // to pick an FE by atomically incrementing a counter in the swr context. he'll keep
776 // trying until he reaches the tail.
777 // 2- BE work must be done in strict order. we accomplish this today by pulling work off
778 // the oldest draw (ie the head) of the dcRing. the worker can determine if there is
779 // any work left by comparing the total # of binned work items and the total # of completed
780 // work items. If they are equal, then there is no more work to do for this draw, and
781 // the worker can safely increment its oldestDraw counter and move on to the next draw.
782 std::unique_lock<std::mutex> lock(pContext->WaitLock, std::defer_lock);
783
784 auto threadHasWork = [&](uint32_t curDraw) { return curDraw != pContext->dcRing.GetHead(); };
785
786 uint32_t curDrawBE = 0;
787 uint32_t curDrawFE = 0;
788
789 bool bShutdown = false;
790
791 while (true)
792 {
793 if (bShutdown && !threadHasWork(curDrawBE))
794 {
795 break;
796 }
797
798 uint32_t loop = 0;
799 while (loop++ < KNOB_WORKER_SPIN_LOOP_COUNT && !threadHasWork(curDrawBE))
800 {
801 _mm_pause();
802 }
803
804 if (!threadHasWork(curDrawBE))
805 {
806 lock.lock();
807
808 // check for thread idle condition again under lock
809 if (threadHasWork(curDrawBE))
810 {
811 lock.unlock();
812 continue;
813 }
814
815 pContext->FifosNotEmpty.wait(lock);
816 lock.unlock();
817 }
818
819 if (IsBEThread)
820 {
821 AR_BEGIN(WorkerWorkOnFifoBE, 0);
822 bShutdown |= WorkOnFifoBE(pContext, workerId, curDrawBE, lockedTiles, numaNode, numaMask);
823 AR_END(WorkerWorkOnFifoBE, 0);
824
825 WorkOnCompute(pContext, workerId, curDrawBE);
826 }
827
828 if (IsFEThread)
829 {
830 WorkOnFifoFE(pContext, workerId, curDrawFE);
831
832 if (!IsBEThread)
833 {
834 curDrawBE = curDrawFE;
835 }
836 }
837 }
838
839 return 0;
840 }
841 template<> DWORD workerThreadMain<false, false>(LPVOID) = delete;
842
843 template <bool IsFEThread, bool IsBEThread>
844 DWORD workerThreadInit(LPVOID pData)
845 {
846 #if defined(_WIN32)
847 __try
848 #endif // _WIN32
849 {
850 return workerThreadMain<IsFEThread, IsBEThread>(pData);
851 }
852
853 #if defined(_WIN32)
854 __except(EXCEPTION_CONTINUE_SEARCH)
855 {
856 }
857
858 #endif // _WIN32
859
860 return 1;
861 }
862 template<> DWORD workerThreadInit<false, false>(LPVOID pData) = delete;
863
864 //////////////////////////////////////////////////////////////////////////
865 /// @brief Creates thread pool info but doesn't launch threads.
866 /// @param pContext - pointer to context
867 /// @param pPool - pointer to thread pool object.
868 void CreateThreadPool(SWR_CONTEXT* pContext, THREAD_POOL* pPool)
869 {
870 bindThread(pContext, 0);
871
872 CPUNumaNodes nodes;
873 uint32_t numThreadsPerProcGroup = 0;
874 CalculateProcessorTopology(nodes, numThreadsPerProcGroup);
875
876 uint32_t numHWNodes = (uint32_t)nodes.size();
877 uint32_t numHWCoresPerNode = (uint32_t)nodes[0].cores.size();
878 uint32_t numHWHyperThreads = (uint32_t)nodes[0].cores[0].threadIds.size();
879
880 // Calculate num HW threads. Due to asymmetric topologies, this is not
881 // a trivial multiplication.
882 uint32_t numHWThreads = 0;
883 for (auto& node : nodes)
884 {
885 for (auto& core : node.cores)
886 {
887 numHWThreads += (uint32_t)core.threadIds.size();
888 }
889 }
890
891 uint32_t numNodes = numHWNodes;
892 uint32_t numCoresPerNode = numHWCoresPerNode;
893 uint32_t numHyperThreads = numHWHyperThreads;
894
895 if (pContext->threadInfo.MAX_NUMA_NODES)
896 {
897 numNodes = std::min(numNodes, pContext->threadInfo.MAX_NUMA_NODES);
898 }
899
900 if (pContext->threadInfo.MAX_CORES_PER_NUMA_NODE)
901 {
902 numCoresPerNode = std::min(numCoresPerNode, pContext->threadInfo.MAX_CORES_PER_NUMA_NODE);
903 }
904
905 if (pContext->threadInfo.MAX_THREADS_PER_CORE)
906 {
907 numHyperThreads = std::min(numHyperThreads, pContext->threadInfo.MAX_THREADS_PER_CORE);
908 }
909
910 #if defined(_WIN32) && !defined(_WIN64)
911 if (!pContext->threadInfo.MAX_WORKER_THREADS)
912 {
913 // Limit 32-bit windows to bindable HW threads only
914 if ((numCoresPerNode * numHWHyperThreads) > 32)
915 {
916 numCoresPerNode = 32 / numHWHyperThreads;
917 }
918 }
919 #endif
920
921 // Calculate numThreads
922 uint32_t numThreads = numNodes * numCoresPerNode * numHyperThreads;
923 numThreads = std::min(numThreads, numHWThreads);
924
925 if (pContext->threadInfo.MAX_WORKER_THREADS)
926 {
927 uint32_t maxHWThreads = numHWNodes * numHWCoresPerNode * numHWHyperThreads;
928 numThreads = std::min(pContext->threadInfo.MAX_WORKER_THREADS, maxHWThreads);
929 }
930
931 uint32_t numAPIReservedThreads = 1;
932
933
934 if (numThreads == 1)
935 {
936 // If only 1 worker threads, try to move it to an available
937 // HW thread. If that fails, use the API thread.
938 if (numCoresPerNode < numHWCoresPerNode)
939 {
940 numCoresPerNode++;
941 }
942 else if (numHyperThreads < numHWHyperThreads)
943 {
944 numHyperThreads++;
945 }
946 else if (numNodes < numHWNodes)
947 {
948 numNodes++;
949 }
950 else
951 {
952 pContext->threadInfo.SINGLE_THREADED = true;
953 }
954 }
955 else
956 {
957 // Save HW threads for the API if we can
958 if (numThreads > numAPIReservedThreads)
959 {
960 numThreads -= numAPIReservedThreads;
961 }
962 else
963 {
964 numAPIReservedThreads = 0;
965 }
966 }
967
968 if (pContext->threadInfo.SINGLE_THREADED)
969 {
970 numThreads = 1;
971 }
972
973 // Initialize DRAW_CONTEXT's per-thread stats
974 for (uint32_t dc = 0; dc < pContext->MAX_DRAWS_IN_FLIGHT; ++dc)
975 {
976 pContext->dcRing[dc].dynState.pStats = (SWR_STATS*)AlignedMalloc(sizeof(SWR_STATS) * numThreads, 64);
977 memset(pContext->dcRing[dc].dynState.pStats, 0, sizeof(SWR_STATS) * numThreads);
978 }
979
980 if (pContext->threadInfo.SINGLE_THREADED)
981 {
982 pContext->NumWorkerThreads = 1;
983 pContext->NumFEThreads = 1;
984 pContext->NumBEThreads = 1;
985 pPool->numThreads = 0;
986
987 return;
988 }
989
990 pPool->numThreads = numThreads;
991 pContext->NumWorkerThreads = pPool->numThreads;
992
993 pPool->pThreadData = (THREAD_DATA *)malloc(pPool->numThreads * sizeof(THREAD_DATA));
994 pPool->numaMask = 0;
995
996 pPool->pThreads = new THREAD_PTR[pPool->numThreads];
997
998 if (pContext->threadInfo.MAX_WORKER_THREADS)
999 {
1000 bool bForceBindProcGroup = (numThreads > numThreadsPerProcGroup);
1001 uint32_t numProcGroups = (numThreads + numThreadsPerProcGroup - 1) / numThreadsPerProcGroup;
1002 // When MAX_WORKER_THREADS is set we don't bother to bind to specific HW threads
1003 // But Windows will still require binding to specific process groups
1004 for (uint32_t workerId = 0; workerId < numThreads; ++workerId)
1005 {
1006 pPool->pThreadData[workerId].workerId = workerId;
1007 pPool->pThreadData[workerId].procGroupId = workerId % numProcGroups;
1008 pPool->pThreadData[workerId].threadId = 0;
1009 pPool->pThreadData[workerId].numaId = 0;
1010 pPool->pThreadData[workerId].coreId = 0;
1011 pPool->pThreadData[workerId].htId = 0;
1012 pPool->pThreadData[workerId].pContext = pContext;
1013 pPool->pThreadData[workerId].forceBindProcGroup = bForceBindProcGroup;
1014
1015 pContext->NumBEThreads++;
1016 pContext->NumFEThreads++;
1017 }
1018 }
1019 else
1020 {
1021 // numa distribution assumes workers on all nodes
1022 bool useNuma = true;
1023 if (numCoresPerNode * numHyperThreads == 1)
1024 useNuma = false;
1025
1026 if (useNuma) {
1027 pPool->numaMask = numNodes - 1; // Only works for 2**n numa nodes (1, 2, 4, etc.)
1028 } else {
1029 pPool->numaMask = 0;
1030 }
1031
1032 uint32_t workerId = 0;
1033 for (uint32_t n = 0; n < numNodes; ++n)
1034 {
1035 auto& node = nodes[n];
1036 uint32_t numCores = numCoresPerNode;
1037 for (uint32_t c = 0; c < numCores; ++c)
1038 {
1039 if (c >= node.cores.size())
1040 {
1041 break;
1042 }
1043
1044 auto& core = node.cores[c];
1045 for (uint32_t t = 0; t < numHyperThreads; ++t)
1046 {
1047 if (t >= core.threadIds.size())
1048 {
1049 break;
1050 }
1051
1052 if (numAPIReservedThreads)
1053 {
1054 --numAPIReservedThreads;
1055 continue;
1056 }
1057
1058 SWR_ASSERT(workerId < numThreads);
1059
1060 pPool->pThreadData[workerId].workerId = workerId;
1061 pPool->pThreadData[workerId].procGroupId = core.procGroup;
1062 pPool->pThreadData[workerId].threadId = core.threadIds[t];
1063 pPool->pThreadData[workerId].numaId = useNuma ? n : 0;
1064 pPool->pThreadData[workerId].coreId = c;
1065 pPool->pThreadData[workerId].htId = t;
1066 pPool->pThreadData[workerId].pContext = pContext;
1067
1068 pContext->NumBEThreads++;
1069 pContext->NumFEThreads++;
1070
1071 ++workerId;
1072 }
1073 }
1074 }
1075 SWR_ASSERT(workerId == pContext->NumWorkerThreads);
1076 }
1077 }
1078
1079 //////////////////////////////////////////////////////////////////////////
1080 /// @brief Launches worker threads in thread pool.
1081 /// @param pContext - pointer to context
1082 /// @param pPool - pointer to thread pool object.
1083 void StartThreadPool(SWR_CONTEXT* pContext, THREAD_POOL* pPool)
1084 {
1085 if (pContext->threadInfo.SINGLE_THREADED)
1086 {
1087 return;
1088 }
1089
1090 for (uint32_t workerId = 0; workerId < pContext->NumWorkerThreads; ++workerId)
1091 {
1092 pPool->pThreads[workerId] = new std::thread(workerThreadInit<true, true>, &pPool->pThreadData[workerId]);
1093 }
1094 }
1095
1096 //////////////////////////////////////////////////////////////////////////
1097 /// @brief Destroys thread pool.
1098 /// @param pContext - pointer to context
1099 /// @param pPool - pointer to thread pool object.
1100 void DestroyThreadPool(SWR_CONTEXT *pContext, THREAD_POOL *pPool)
1101 {
1102 if (!pContext->threadInfo.SINGLE_THREADED)
1103 {
1104 // Wait for all threads to finish
1105 SwrWaitForIdle(pContext);
1106
1107 // Wait for threads to finish and destroy them
1108 for (uint32_t t = 0; t < pPool->numThreads; ++t)
1109 {
1110 // Detach from thread. Cannot join() due to possibility (in Windows) of code
1111 // in some DLLMain(THREAD_DETATCH case) blocking the thread until after this returns.
1112 pPool->pThreads[t]->detach();
1113 delete(pPool->pThreads[t]);
1114 }
1115
1116 delete [] pPool->pThreads;
1117
1118 // Clean up data used by threads
1119 free(pPool->pThreadData);
1120 }
1121 }