swr: [rasterizer core] fundamentally change how stats work
[mesa.git] / src / gallium / drivers / swr / rasterizer / core / threads.cpp
1 /****************************************************************************
2 * Copyright (C) 2014-2016 Intel Corporation. All Rights Reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 ****************************************************************************/
23
24 #include <stdio.h>
25 #include <thread>
26 #include <algorithm>
27 #include <float.h>
28 #include <vector>
29 #include <utility>
30 #include <fstream>
31 #include <string>
32
33 #if defined(__linux__) || defined(__gnu_linux__)
34 #include <pthread.h>
35 #include <sched.h>
36 #include <unistd.h>
37 #endif
38
39 #include "common/os.h"
40 #include "context.h"
41 #include "frontend.h"
42 #include "backend.h"
43 #include "rasterizer.h"
44 #include "rdtsc_core.h"
45 #include "tilemgr.h"
46
47
48
49
50 // ThreadId
51 struct Core
52 {
53 uint32_t procGroup = 0;
54 std::vector<uint32_t> threadIds;
55 };
56
57 struct NumaNode
58 {
59 std::vector<Core> cores;
60 };
61
62 typedef std::vector<NumaNode> CPUNumaNodes;
63
64 void CalculateProcessorTopology(CPUNumaNodes& out_nodes, uint32_t& out_numThreadsPerProcGroup)
65 {
66 out_nodes.clear();
67 out_numThreadsPerProcGroup = 0;
68
69 #if defined(_WIN32)
70
71 std::vector<KAFFINITY> threadMaskPerProcGroup;
72
73 static std::mutex m;
74 std::lock_guard<std::mutex> l(m);
75
76 static SYSTEM_LOGICAL_PROCESSOR_INFORMATION_EX buffer[KNOB_MAX_NUM_THREADS];
77 DWORD bufSize = sizeof(buffer);
78
79 BOOL ret = GetLogicalProcessorInformationEx(RelationProcessorCore, buffer, &bufSize);
80 SWR_ASSERT(ret != FALSE, "Failed to get Processor Topology Information");
81
82 uint32_t count = bufSize / buffer->Size;
83 PSYSTEM_LOGICAL_PROCESSOR_INFORMATION_EX pBuffer = buffer;
84
85 for (uint32_t i = 0; i < count; ++i)
86 {
87 SWR_ASSERT(pBuffer->Relationship == RelationProcessorCore);
88 for (uint32_t g = 0; g < pBuffer->Processor.GroupCount; ++g)
89 {
90 auto& gmask = pBuffer->Processor.GroupMask[g];
91 uint32_t threadId = 0;
92 uint32_t procGroup = gmask.Group;
93
94 Core* pCore = nullptr;
95
96 uint32_t numThreads = (uint32_t)_mm_popcount_sizeT(gmask.Mask);
97
98 while (BitScanForwardSizeT((unsigned long*)&threadId, gmask.Mask))
99 {
100 // clear mask
101 KAFFINITY threadMask = KAFFINITY(1) << threadId;
102 gmask.Mask &= ~threadMask;
103
104 if (procGroup >= threadMaskPerProcGroup.size())
105 {
106 threadMaskPerProcGroup.resize(procGroup + 1);
107 }
108
109 if (threadMaskPerProcGroup[procGroup] & threadMask)
110 {
111 // Already seen this mask. This means that we are in 32-bit mode and
112 // have seen more than 32 HW threads for this procGroup
113 // Don't use it
114 #if defined(_WIN64)
115 SWR_ASSERT(false, "Shouldn't get here in 64-bit mode");
116 #endif
117 continue;
118 }
119
120 threadMaskPerProcGroup[procGroup] |= (KAFFINITY(1) << threadId);
121
122 // Find Numa Node
123 uint32_t numaId = 0;
124 PROCESSOR_NUMBER procNum = {};
125 procNum.Group = WORD(procGroup);
126 procNum.Number = UCHAR(threadId);
127
128 ret = GetNumaProcessorNodeEx(&procNum, (PUSHORT)&numaId);
129 SWR_ASSERT(ret);
130
131 // Store data
132 if (out_nodes.size() <= numaId) out_nodes.resize(numaId + 1);
133 auto& numaNode = out_nodes[numaId];
134
135 uint32_t coreId = 0;
136
137 if (nullptr == pCore)
138 {
139 numaNode.cores.push_back(Core());
140 pCore = &numaNode.cores.back();
141 pCore->procGroup = procGroup;
142 }
143 pCore->threadIds.push_back(threadId);
144 if (procGroup == 0)
145 {
146 out_numThreadsPerProcGroup++;
147 }
148 }
149 }
150 pBuffer = PtrAdd(pBuffer, pBuffer->Size);
151 }
152
153
154 #elif defined(__linux__) || defined (__gnu_linux__)
155
156 // Parse /proc/cpuinfo to get full topology
157 std::ifstream input("/proc/cpuinfo");
158 std::string line;
159 char* c;
160 uint32_t threadId = uint32_t(-1);
161 uint32_t coreId = uint32_t(-1);
162 uint32_t numaId = uint32_t(-1);
163
164 while (std::getline(input, line))
165 {
166 if (line.find("processor") != std::string::npos)
167 {
168 if (threadId != uint32_t(-1))
169 {
170 // Save information.
171 if (out_nodes.size() <= numaId) out_nodes.resize(numaId + 1);
172 auto& numaNode = out_nodes[numaId];
173 if (numaNode.cores.size() <= coreId) numaNode.cores.resize(coreId + 1);
174 auto& core = numaNode.cores[coreId];
175
176 core.procGroup = coreId;
177 core.threadIds.push_back(threadId);
178
179 out_numThreadsPerProcGroup++;
180 }
181
182 auto data_start = line.find(": ") + 2;
183 threadId = std::strtoul(&line.c_str()[data_start], &c, 10);
184 continue;
185 }
186 if (line.find("core id") != std::string::npos)
187 {
188 auto data_start = line.find(": ") + 2;
189 coreId = std::strtoul(&line.c_str()[data_start], &c, 10);
190 continue;
191 }
192 if (line.find("physical id") != std::string::npos)
193 {
194 auto data_start = line.find(": ") + 2;
195 numaId = std::strtoul(&line.c_str()[data_start], &c, 10);
196 continue;
197 }
198 }
199
200 if (threadId != uint32_t(-1))
201 {
202 // Save information.
203 if (out_nodes.size() <= numaId) out_nodes.resize(numaId + 1);
204 auto& numaNode = out_nodes[numaId];
205 if (numaNode.cores.size() <= coreId) numaNode.cores.resize(coreId + 1);
206 auto& core = numaNode.cores[coreId];
207
208 core.procGroup = coreId;
209 core.threadIds.push_back(threadId);
210 out_numThreadsPerProcGroup++;
211 }
212
213 for (uint32_t node = 0; node < out_nodes.size(); node++) {
214 auto& numaNode = out_nodes[node];
215 auto it = numaNode.cores.begin();
216 for ( ; it != numaNode.cores.end(); ) {
217 if (it->threadIds.size() == 0)
218 numaNode.cores.erase(it);
219 else
220 ++it;
221 }
222 }
223
224 #else
225
226 #error Unsupported platform
227
228 #endif
229 }
230
231
232 void bindThread(SWR_CONTEXT* pContext, uint32_t threadId, uint32_t procGroupId = 0, bool bindProcGroup=false)
233 {
234 // Only bind threads when MAX_WORKER_THREADS isn't set.
235 if (pContext->threadInfo.MAX_WORKER_THREADS && bindProcGroup == false)
236 {
237 return;
238 }
239
240 #if defined(_WIN32)
241
242 GROUP_AFFINITY affinity = {};
243 affinity.Group = procGroupId;
244
245 #if !defined(_WIN64)
246 if (threadId >= 32)
247 {
248 // Hopefully we don't get here. Logic in CreateThreadPool should prevent this.
249 SWR_REL_ASSERT(false, "Shouldn't get here");
250
251 // In a 32-bit process on Windows it is impossible to bind
252 // to logical processors 32-63 within a processor group.
253 // In this case set the mask to 0 and let the system assign
254 // the processor. Hopefully it will make smart choices.
255 affinity.Mask = 0;
256 }
257 else
258 #endif
259 {
260 // If MAX_WORKER_THREADS is set, only bind to the proc group,
261 // Not the individual HW thread.
262 if (!pContext->threadInfo.MAX_WORKER_THREADS)
263 {
264 affinity.Mask = KAFFINITY(1) << threadId;
265 }
266 }
267
268 SetThreadGroupAffinity(GetCurrentThread(), &affinity, nullptr);
269
270 #else
271
272 cpu_set_t cpuset;
273 pthread_t thread = pthread_self();
274 CPU_ZERO(&cpuset);
275 CPU_SET(threadId, &cpuset);
276
277 pthread_setaffinity_np(thread, sizeof(cpu_set_t), &cpuset);
278
279 #endif
280 }
281
282 INLINE
283 uint32_t GetEnqueuedDraw(SWR_CONTEXT *pContext)
284 {
285 return pContext->dcRing.GetHead();
286 }
287
288 INLINE
289 DRAW_CONTEXT *GetDC(SWR_CONTEXT *pContext, uint32_t drawId)
290 {
291 return &pContext->dcRing[(drawId-1) % KNOB_MAX_DRAWS_IN_FLIGHT];
292 }
293
294 INLINE
295 bool IDComparesLess(uint32_t a, uint32_t b)
296 {
297 // Use signed delta to ensure that wrap-around to 0 is correctly handled.
298 int32_t delta = int32_t(a - b);
299 return (delta < 0);
300 }
301
302 // returns true if dependency not met
303 INLINE
304 bool CheckDependency(SWR_CONTEXT *pContext, DRAW_CONTEXT *pDC, uint32_t lastRetiredDraw)
305 {
306 return pDC->dependent && IDComparesLess(lastRetiredDraw, pDC->drawId - 1);
307 }
308
309 //////////////////////////////////////////////////////////////////////////
310 /// @brief Update client stats.
311 INLINE void UpdateClientStats(SWR_CONTEXT* pContext, DRAW_CONTEXT* pDC)
312 {
313 if ((pContext->pfnUpdateStats == nullptr) || (GetApiState(pDC).enableStats == false))
314 {
315 return;
316 }
317
318 DRAW_DYNAMIC_STATE& dynState = pDC->dynState;
319 SWR_STATS stats{ 0 };
320
321 // Sum up stats across all workers before sending to client.
322 for (uint32_t i = 0; i < pContext->NumWorkerThreads; ++i)
323 {
324 stats.DepthPassCount += dynState.stats[i].DepthPassCount;
325 stats.IaVertices += dynState.stats[i].IaVertices;
326 stats.IaPrimitives += dynState.stats[i].IaPrimitives;
327 stats.VsInvocations += dynState.stats[i].VsInvocations;
328 stats.HsInvocations += dynState.stats[i].HsInvocations;
329 stats.DsInvocations += dynState.stats[i].DsInvocations;
330 stats.GsInvocations += dynState.stats[i].GsInvocations;
331 stats.PsInvocations += dynState.stats[i].PsInvocations;
332 stats.CInvocations += dynState.stats[i].CInvocations;
333 stats.CsInvocations += dynState.stats[i].CsInvocations;
334 stats.CPrimitives += dynState.stats[i].CPrimitives;
335 stats.GsPrimitives += dynState.stats[i].GsPrimitives;
336
337 for (uint32_t stream = 0; stream < MAX_SO_STREAMS; ++stream)
338 {
339 stats.SoPrimStorageNeeded[stream] += dynState.stats[i].SoPrimStorageNeeded[stream];
340 stats.SoNumPrimsWritten[stream] += dynState.stats[i].SoNumPrimsWritten[stream];
341 }
342 }
343
344 pContext->pfnUpdateStats(GetPrivateState(pDC), &stats);
345 }
346
347 INLINE void ExecuteCallbacks(SWR_CONTEXT* pContext, DRAW_CONTEXT* pDC)
348 {
349 UpdateClientStats(pContext, pDC);
350
351 if (pDC->retireCallback.pfnCallbackFunc)
352 {
353 pDC->retireCallback.pfnCallbackFunc(pDC->retireCallback.userData,
354 pDC->retireCallback.userData2,
355 pDC->retireCallback.userData3);
356 }
357 }
358
359 // inlined-only version
360 INLINE int32_t CompleteDrawContextInl(SWR_CONTEXT* pContext, DRAW_CONTEXT* pDC)
361 {
362 int32_t result = InterlockedDecrement((volatile LONG*)&pDC->threadsDone);
363 SWR_ASSERT(result >= 0);
364
365 if (result == 0)
366 {
367 ExecuteCallbacks(pContext, pDC);
368
369 // Cleanup memory allocations
370 pDC->pArena->Reset(true);
371 if (!pDC->isCompute)
372 {
373 pDC->pTileMgr->initialize();
374 }
375 if (pDC->cleanupState)
376 {
377 pDC->pState->pArena->Reset(true);
378 }
379
380 _ReadWriteBarrier();
381
382 pContext->dcRing.Dequeue(); // Remove from tail
383 }
384
385 return result;
386 }
387
388 // available to other translation modules
389 int32_t CompleteDrawContext(SWR_CONTEXT* pContext, DRAW_CONTEXT* pDC)
390 {
391 return CompleteDrawContextInl(pContext, pDC);
392 }
393
394 INLINE bool FindFirstIncompleteDraw(SWR_CONTEXT* pContext, uint32_t& curDrawBE, uint32_t& drawEnqueued)
395 {
396 // increment our current draw id to the first incomplete draw
397 drawEnqueued = GetEnqueuedDraw(pContext);
398 while (IDComparesLess(curDrawBE, drawEnqueued))
399 {
400 DRAW_CONTEXT *pDC = &pContext->dcRing[curDrawBE % KNOB_MAX_DRAWS_IN_FLIGHT];
401
402 // If its not compute and FE is not done then break out of loop.
403 if (!pDC->doneFE && !pDC->isCompute) break;
404
405 bool isWorkComplete = pDC->isCompute ?
406 pDC->pDispatch->isWorkComplete() :
407 pDC->pTileMgr->isWorkComplete();
408
409 if (isWorkComplete)
410 {
411 curDrawBE++;
412 CompleteDrawContextInl(pContext, pDC);
413 }
414 else
415 {
416 break;
417 }
418 }
419
420 // If there are no more incomplete draws then return false.
421 return IDComparesLess(curDrawBE, drawEnqueued);
422 }
423
424 //////////////////////////////////////////////////////////////////////////
425 /// @brief If there is any BE work then go work on it.
426 /// @param pContext - pointer to SWR context.
427 /// @param workerId - The unique worker ID that is assigned to this thread.
428 /// @param curDrawBE - This tracks the draw contexts that this thread has processed. Each worker thread
429 /// has its own curDrawBE counter and this ensures that each worker processes all the
430 /// draws in order.
431 /// @param lockedTiles - This is the set of tiles locked by other threads. Each thread maintains its
432 /// own set and each time it fails to lock a macrotile, because its already locked,
433 /// then it will add that tile to the lockedTiles set. As a worker begins to work
434 /// on future draws the lockedTiles ensure that it doesn't work on tiles that may
435 /// still have work pending in a previous draw. Additionally, the lockedTiles is
436 /// hueristic that can steer a worker back to the same macrotile that it had been
437 /// working on in a previous draw.
438 void WorkOnFifoBE(
439 SWR_CONTEXT *pContext,
440 uint32_t workerId,
441 uint32_t &curDrawBE,
442 TileSet& lockedTiles,
443 uint32_t numaNode,
444 uint32_t numaMask)
445 {
446 // Find the first incomplete draw that has pending work. If no such draw is found then
447 // return. FindFirstIncompleteDraw is responsible for incrementing the curDrawBE.
448 uint32_t drawEnqueued = 0;
449 if (FindFirstIncompleteDraw(pContext, curDrawBE, drawEnqueued) == false)
450 {
451 return;
452 }
453
454 uint32_t lastRetiredDraw = pContext->dcRing[curDrawBE % KNOB_MAX_DRAWS_IN_FLIGHT].drawId - 1;
455
456 // Reset our history for locked tiles. We'll have to re-learn which tiles are locked.
457 lockedTiles.clear();
458
459 // Try to work on each draw in order of the available draws in flight.
460 // 1. If we're on curDrawBE, we can work on any macrotile that is available.
461 // 2. If we're trying to work on draws after curDrawBE, we are restricted to
462 // working on those macrotiles that are known to be complete in the prior draw to
463 // maintain order. The locked tiles provides the history to ensures this.
464 for (uint32_t i = curDrawBE; IDComparesLess(i, drawEnqueued); ++i)
465 {
466 DRAW_CONTEXT *pDC = &pContext->dcRing[i % KNOB_MAX_DRAWS_IN_FLIGHT];
467
468 if (pDC->isCompute) return; // We don't look at compute work.
469
470 // First wait for FE to be finished with this draw. This keeps threading model simple
471 // but if there are lots of bubbles between draws then serializing FE and BE may
472 // need to be revisited.
473 if (!pDC->doneFE) return;
474
475 // If this draw is dependent on a previous draw then we need to bail.
476 if (CheckDependency(pContext, pDC, lastRetiredDraw))
477 {
478 return;
479 }
480
481 // Grab the list of all dirty macrotiles. A tile is dirty if it has work queued to it.
482 std::vector<uint32_t> &macroTiles = pDC->pTileMgr->getDirtyTiles();
483
484 for (uint32_t tileID : macroTiles)
485 {
486 // Only work on tiles for this numa node
487 uint32_t x, y;
488 pDC->pTileMgr->getTileIndices(tileID, x, y);
489 if (((x ^ y) & numaMask) != numaNode)
490 {
491 continue;
492 }
493
494 MacroTileQueue &tile = pDC->pTileMgr->getMacroTileQueue(tileID);
495
496 if (!tile.getNumQueued())
497 {
498 continue;
499 }
500
501 // can only work on this draw if it's not in use by other threads
502 if (lockedTiles.find(tileID) != lockedTiles.end())
503 {
504 continue;
505 }
506
507 if (tile.tryLock())
508 {
509 BE_WORK *pWork;
510
511 RDTSC_START(WorkerFoundWork);
512
513 uint32_t numWorkItems = tile.getNumQueued();
514 SWR_ASSERT(numWorkItems);
515
516 pWork = tile.peek();
517 SWR_ASSERT(pWork);
518 if (pWork->type == DRAW)
519 {
520 pContext->pHotTileMgr->InitializeHotTiles(pContext, pDC, tileID);
521 }
522
523 while ((pWork = tile.peek()) != nullptr)
524 {
525 pWork->pfnWork(pDC, workerId, tileID, &pWork->desc);
526 tile.dequeue();
527 }
528 RDTSC_STOP(WorkerFoundWork, numWorkItems, pDC->drawId);
529
530 _ReadWriteBarrier();
531
532 pDC->pTileMgr->markTileComplete(tileID);
533
534 // Optimization: If the draw is complete and we're the last one to have worked on it then
535 // we can reset the locked list as we know that all previous draws before the next are guaranteed to be complete.
536 if ((curDrawBE == i) && pDC->pTileMgr->isWorkComplete())
537 {
538 // We can increment the current BE and safely move to next draw since we know this draw is complete.
539 curDrawBE++;
540 CompleteDrawContextInl(pContext, pDC);
541
542 lastRetiredDraw++;
543
544 lockedTiles.clear();
545 break;
546 }
547 }
548 else
549 {
550 // This tile is already locked. So let's add it to our locked tiles set. This way we don't try locking this one again.
551 lockedTiles.insert(tileID);
552 }
553 }
554 }
555 }
556
557 //////////////////////////////////////////////////////////////////////////
558 /// @brief Called when FE work is complete for this DC.
559 INLINE void CompleteDrawFE(SWR_CONTEXT* pContext, DRAW_CONTEXT* pDC)
560 {
561 _ReadWriteBarrier();
562
563 if (pContext->pfnUpdateSoWriteOffset)
564 {
565 for (uint32_t i = 0; i < MAX_SO_BUFFERS; ++i)
566 {
567 if ((pDC->dynState.SoWriteOffsetDirty[i]) &&
568 (pDC->pState->state.soBuffer[i].soWriteEnable))
569 {
570 pContext->pfnUpdateSoWriteOffset(GetPrivateState(pDC), i, pDC->dynState.SoWriteOffset[i]);
571 }
572 }
573 }
574
575 pDC->doneFE = true;
576
577 InterlockedDecrement((volatile LONG*)&pContext->drawsOutstandingFE);
578 }
579
580 void WorkOnFifoFE(SWR_CONTEXT *pContext, uint32_t workerId, uint32_t &curDrawFE)
581 {
582 // Try to grab the next DC from the ring
583 uint32_t drawEnqueued = GetEnqueuedDraw(pContext);
584 while (IDComparesLess(curDrawFE, drawEnqueued))
585 {
586 uint32_t dcSlot = curDrawFE % KNOB_MAX_DRAWS_IN_FLIGHT;
587 DRAW_CONTEXT *pDC = &pContext->dcRing[dcSlot];
588 if (pDC->isCompute || pDC->doneFE || pDC->FeLock)
589 {
590 CompleteDrawContextInl(pContext, pDC);
591 curDrawFE++;
592 }
593 else
594 {
595 break;
596 }
597 }
598
599 uint32_t curDraw = curDrawFE;
600 while (IDComparesLess(curDraw, drawEnqueued))
601 {
602 uint32_t dcSlot = curDraw % KNOB_MAX_DRAWS_IN_FLIGHT;
603 DRAW_CONTEXT *pDC = &pContext->dcRing[dcSlot];
604
605 if (!pDC->isCompute && !pDC->FeLock)
606 {
607 uint32_t initial = InterlockedCompareExchange((volatile uint32_t*)&pDC->FeLock, 1, 0);
608 if (initial == 0)
609 {
610 // successfully grabbed the DC, now run the FE
611 pDC->FeWork.pfnWork(pContext, pDC, workerId, &pDC->FeWork.desc);
612
613 CompleteDrawFE(pContext, pDC);
614 }
615 }
616 curDraw++;
617 }
618 }
619
620 //////////////////////////////////////////////////////////////////////////
621 /// @brief If there is any compute work then go work on it.
622 /// @param pContext - pointer to SWR context.
623 /// @param workerId - The unique worker ID that is assigned to this thread.
624 /// @param curDrawBE - This tracks the draw contexts that this thread has processed. Each worker thread
625 /// has its own curDrawBE counter and this ensures that each worker processes all the
626 /// draws in order.
627 void WorkOnCompute(
628 SWR_CONTEXT *pContext,
629 uint32_t workerId,
630 uint32_t& curDrawBE)
631 {
632 uint32_t drawEnqueued = 0;
633 if (FindFirstIncompleteDraw(pContext, curDrawBE, drawEnqueued) == false)
634 {
635 return;
636 }
637
638 uint32_t lastRetiredDraw = pContext->dcRing[curDrawBE % KNOB_MAX_DRAWS_IN_FLIGHT].drawId - 1;
639
640 for (uint64_t i = curDrawBE; IDComparesLess(i, drawEnqueued); ++i)
641 {
642 DRAW_CONTEXT *pDC = &pContext->dcRing[i % KNOB_MAX_DRAWS_IN_FLIGHT];
643 if (pDC->isCompute == false) return;
644
645 // check dependencies
646 if (CheckDependency(pContext, pDC, lastRetiredDraw))
647 {
648 return;
649 }
650
651 SWR_ASSERT(pDC->pDispatch != nullptr);
652 DispatchQueue& queue = *pDC->pDispatch;
653
654 // Is there any work remaining?
655 if (queue.getNumQueued() > 0)
656 {
657 void* pSpillFillBuffer = nullptr;
658 uint32_t threadGroupId = 0;
659 while (queue.getWork(threadGroupId))
660 {
661 ProcessComputeBE(pDC, workerId, threadGroupId, pSpillFillBuffer);
662
663 queue.finishedWork();
664 }
665 }
666 }
667 }
668
669 template<bool IsFEThread, bool IsBEThread>
670 DWORD workerThreadMain(LPVOID pData)
671 {
672 THREAD_DATA *pThreadData = (THREAD_DATA*)pData;
673 SWR_CONTEXT *pContext = pThreadData->pContext;
674 uint32_t threadId = pThreadData->threadId;
675 uint32_t workerId = pThreadData->workerId;
676
677 bindThread(pContext, threadId, pThreadData->procGroupId, pThreadData->forceBindProcGroup);
678
679 RDTSC_INIT(threadId);
680
681 uint32_t numaNode = pThreadData->numaId;
682 uint32_t numaMask = pContext->threadPool.numaMask;
683
684 // flush denormals to 0
685 _mm_setcsr(_mm_getcsr() | _MM_FLUSH_ZERO_ON | _MM_DENORMALS_ZERO_ON);
686
687 // Track tiles locked by other threads. If we try to lock a macrotile and find its already
688 // locked then we'll add it to this list so that we don't try and lock it again.
689 TileSet lockedTiles;
690
691 // each worker has the ability to work on any of the queued draws as long as certain
692 // conditions are met. the data associated
693 // with a draw is guaranteed to be active as long as a worker hasn't signaled that he
694 // has moved on to the next draw when he determines there is no more work to do. The api
695 // thread will not increment the head of the dc ring until all workers have moved past the
696 // current head.
697 // the logic to determine what to work on is:
698 // 1- try to work on the FE any draw that is queued. For now there are no dependencies
699 // on the FE work, so any worker can grab any FE and process in parallel. Eventually
700 // we'll need dependency tracking to force serialization on FEs. The worker will try
701 // to pick an FE by atomically incrementing a counter in the swr context. he'll keep
702 // trying until he reaches the tail.
703 // 2- BE work must be done in strict order. we accomplish this today by pulling work off
704 // the oldest draw (ie the head) of the dcRing. the worker can determine if there is
705 // any work left by comparing the total # of binned work items and the total # of completed
706 // work items. If they are equal, then there is no more work to do for this draw, and
707 // the worker can safely increment its oldestDraw counter and move on to the next draw.
708 std::unique_lock<std::mutex> lock(pContext->WaitLock, std::defer_lock);
709
710 auto threadHasWork = [&](uint32_t curDraw) { return curDraw != pContext->dcRing.GetHead(); };
711
712 uint32_t curDrawBE = 0;
713 uint32_t curDrawFE = 0;
714
715 while (pContext->threadPool.inThreadShutdown == false)
716 {
717 uint32_t loop = 0;
718 while (loop++ < KNOB_WORKER_SPIN_LOOP_COUNT && !threadHasWork(curDrawBE))
719 {
720 _mm_pause();
721 }
722
723 if (!threadHasWork(curDrawBE))
724 {
725 lock.lock();
726
727 // check for thread idle condition again under lock
728 if (threadHasWork(curDrawBE))
729 {
730 lock.unlock();
731 continue;
732 }
733
734 if (pContext->threadPool.inThreadShutdown)
735 {
736 lock.unlock();
737 break;
738 }
739
740 RDTSC_START(WorkerWaitForThreadEvent);
741
742 pContext->FifosNotEmpty.wait(lock);
743 lock.unlock();
744
745 RDTSC_STOP(WorkerWaitForThreadEvent, 0, 0);
746
747 if (pContext->threadPool.inThreadShutdown)
748 {
749 break;
750 }
751 }
752
753 if (IsBEThread)
754 {
755 RDTSC_START(WorkerWorkOnFifoBE);
756 WorkOnFifoBE(pContext, workerId, curDrawBE, lockedTiles, numaNode, numaMask);
757 RDTSC_STOP(WorkerWorkOnFifoBE, 0, 0);
758
759 WorkOnCompute(pContext, workerId, curDrawBE);
760 }
761
762 if (IsFEThread)
763 {
764 WorkOnFifoFE(pContext, workerId, curDrawFE);
765
766 if (!IsBEThread)
767 {
768 curDrawBE = curDrawFE;
769 }
770 }
771 }
772
773 return 0;
774 }
775 template<> DWORD workerThreadMain<false, false>(LPVOID) = delete;
776
777 template <bool IsFEThread, bool IsBEThread>
778 DWORD workerThreadInit(LPVOID pData)
779 {
780 #if defined(_WIN32)
781 __try
782 #endif // _WIN32
783 {
784 return workerThreadMain<IsFEThread, IsBEThread>(pData);
785 }
786
787 #if defined(_WIN32)
788 __except(EXCEPTION_CONTINUE_SEARCH)
789 {
790 }
791
792 #endif // _WIN32
793
794 return 1;
795 }
796 template<> DWORD workerThreadInit<false, false>(LPVOID pData) = delete;
797
798 void CreateThreadPool(SWR_CONTEXT *pContext, THREAD_POOL *pPool)
799 {
800 bindThread(pContext, 0);
801
802 CPUNumaNodes nodes;
803 uint32_t numThreadsPerProcGroup = 0;
804 CalculateProcessorTopology(nodes, numThreadsPerProcGroup);
805
806 uint32_t numHWNodes = (uint32_t)nodes.size();
807 uint32_t numHWCoresPerNode = (uint32_t)nodes[0].cores.size();
808 uint32_t numHWHyperThreads = (uint32_t)nodes[0].cores[0].threadIds.size();
809
810 // Calculate num HW threads. Due to asymmetric topologies, this is not
811 // a trivial multiplication.
812 uint32_t numHWThreads = 0;
813 for (auto& node : nodes)
814 {
815 for (auto& core : node.cores)
816 {
817 numHWThreads += (uint32_t)core.threadIds.size();
818 }
819 }
820
821 uint32_t numNodes = numHWNodes;
822 uint32_t numCoresPerNode = numHWCoresPerNode;
823 uint32_t numHyperThreads = numHWHyperThreads;
824
825 if (pContext->threadInfo.MAX_NUMA_NODES)
826 {
827 numNodes = std::min(numNodes, pContext->threadInfo.MAX_NUMA_NODES);
828 }
829
830 if (pContext->threadInfo.MAX_CORES_PER_NUMA_NODE)
831 {
832 numCoresPerNode = std::min(numCoresPerNode, pContext->threadInfo.MAX_CORES_PER_NUMA_NODE);
833 }
834
835 if (pContext->threadInfo.MAX_THREADS_PER_CORE)
836 {
837 numHyperThreads = std::min(numHyperThreads, pContext->threadInfo.MAX_THREADS_PER_CORE);
838 }
839
840 #if defined(_WIN32) && !defined(_WIN64)
841 if (!pContext->threadInfo.MAX_WORKER_THREADS)
842 {
843 // Limit 32-bit windows to bindable HW threads only
844 if ((numCoresPerNode * numHWHyperThreads) > 32)
845 {
846 numCoresPerNode = 32 / numHWHyperThreads;
847 }
848 }
849 #endif
850
851 // Calculate numThreads
852 uint32_t numThreads = numNodes * numCoresPerNode * numHyperThreads;
853 numThreads = std::min(numThreads, numHWThreads);
854
855 if (pContext->threadInfo.MAX_WORKER_THREADS)
856 {
857 uint32_t maxHWThreads = numHWNodes * numHWCoresPerNode * numHWHyperThreads;
858 numThreads = std::min(pContext->threadInfo.MAX_WORKER_THREADS, maxHWThreads);
859 }
860
861 if (numThreads > KNOB_MAX_NUM_THREADS)
862 {
863 printf("WARNING: system thread count %u exceeds max %u, "
864 "performance will be degraded\n",
865 numThreads, KNOB_MAX_NUM_THREADS);
866 }
867
868 uint32_t numAPIReservedThreads = 1;
869
870
871 if (numThreads == 1)
872 {
873 // If only 1 worker threads, try to move it to an available
874 // HW thread. If that fails, use the API thread.
875 if (numCoresPerNode < numHWCoresPerNode)
876 {
877 numCoresPerNode++;
878 }
879 else if (numHyperThreads < numHWHyperThreads)
880 {
881 numHyperThreads++;
882 }
883 else if (numNodes < numHWNodes)
884 {
885 numNodes++;
886 }
887 else
888 {
889 pPool->numThreads = 0;
890 SET_KNOB(SINGLE_THREADED, true);
891 return;
892 }
893 }
894 else
895 {
896 // Save HW threads for the API if we can
897 if (numThreads > numAPIReservedThreads)
898 {
899 numThreads -= numAPIReservedThreads;
900 }
901 else
902 {
903 numAPIReservedThreads = 0;
904 }
905 }
906
907 pPool->numThreads = numThreads;
908 pContext->NumWorkerThreads = pPool->numThreads;
909
910 pPool->inThreadShutdown = false;
911 pPool->pThreadData = (THREAD_DATA *)malloc(pPool->numThreads * sizeof(THREAD_DATA));
912 pPool->numaMask = 0;
913
914 if (pContext->threadInfo.MAX_WORKER_THREADS)
915 {
916 bool bForceBindProcGroup = (numThreads > numThreadsPerProcGroup);
917 uint32_t numProcGroups = (numThreads + numThreadsPerProcGroup - 1) / numThreadsPerProcGroup;
918 // When MAX_WORKER_THREADS is set we don't bother to bind to specific HW threads
919 // But Windows will still require binding to specific process groups
920 for (uint32_t workerId = 0; workerId < numThreads; ++workerId)
921 {
922 pPool->pThreadData[workerId].workerId = workerId;
923 pPool->pThreadData[workerId].procGroupId = workerId % numProcGroups;
924 pPool->pThreadData[workerId].threadId = 0;
925 pPool->pThreadData[workerId].numaId = 0;
926 pPool->pThreadData[workerId].coreId = 0;
927 pPool->pThreadData[workerId].htId = 0;
928 pPool->pThreadData[workerId].pContext = pContext;
929 pPool->pThreadData[workerId].forceBindProcGroup = bForceBindProcGroup;
930 pPool->threads[workerId] = new std::thread(workerThreadInit<true, true>, &pPool->pThreadData[workerId]);
931
932 pContext->NumBEThreads++;
933 pContext->NumFEThreads++;
934 }
935 }
936 else
937 {
938 pPool->numaMask = numNodes - 1; // Only works for 2**n numa nodes (1, 2, 4, etc.)
939
940 uint32_t workerId = 0;
941 for (uint32_t n = 0; n < numNodes; ++n)
942 {
943 auto& node = nodes[n];
944 uint32_t numCores = numCoresPerNode;
945 for (uint32_t c = 0; c < numCores; ++c)
946 {
947 if (c >= node.cores.size())
948 {
949 break;
950 }
951
952 auto& core = node.cores[c];
953 for (uint32_t t = 0; t < numHyperThreads; ++t)
954 {
955 if (t >= core.threadIds.size())
956 {
957 break;
958 }
959
960 if (numAPIReservedThreads)
961 {
962 --numAPIReservedThreads;
963 continue;
964 }
965
966 SWR_ASSERT(workerId < numThreads);
967
968 pPool->pThreadData[workerId].workerId = workerId;
969 pPool->pThreadData[workerId].procGroupId = core.procGroup;
970 pPool->pThreadData[workerId].threadId = core.threadIds[t];
971 pPool->pThreadData[workerId].numaId = n;
972 pPool->pThreadData[workerId].coreId = c;
973 pPool->pThreadData[workerId].htId = t;
974 pPool->pThreadData[workerId].pContext = pContext;
975
976 pPool->threads[workerId] = new std::thread(workerThreadInit<true, true>, &pPool->pThreadData[workerId]);
977 pContext->NumBEThreads++;
978 pContext->NumFEThreads++;
979
980 ++workerId;
981 }
982 }
983 }
984 }
985 }
986
987 void DestroyThreadPool(SWR_CONTEXT *pContext, THREAD_POOL *pPool)
988 {
989 if (!pContext->threadInfo.SINGLE_THREADED)
990 {
991 // Inform threads to finish up
992 std::unique_lock<std::mutex> lock(pContext->WaitLock);
993 pPool->inThreadShutdown = true;
994 _mm_mfence();
995 pContext->FifosNotEmpty.notify_all();
996 lock.unlock();
997
998 // Wait for threads to finish and destroy them
999 for (uint32_t t = 0; t < pPool->numThreads; ++t)
1000 {
1001 pPool->threads[t]->join();
1002 delete(pPool->threads[t]);
1003 }
1004
1005 // Clean up data used by threads
1006 free(pPool->pThreadData);
1007 }
1008 }