2 * Copyright (c) 2014 The University of Wisconsin
4 * Copyright (c) 2006 INRIA (Institut National de Recherche en
5 * Informatique et en Automatique / French National Research Institute
6 * for Computer Science and Applied Mathematics)
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions are
12 * met: redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer;
14 * redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution;
17 * neither the name of the copyright holders nor the names of its
18 * contributors may be used to endorse or promote products derived from
19 * this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35 * Implementation of a TAGE branch predictor
38 #include "cpu/pred/tage_base.hh"
40 #include "base/intmath.hh"
41 #include "base/logging.hh"
42 #include "debug/Fetch.hh"
43 #include "debug/Tage.hh"
45 TAGEBase::TAGEBase(const TAGEBaseParams
*p
)
47 logRatioBiModalHystEntries(p
->logRatioBiModalHystEntries
),
48 nHistoryTables(p
->nHistoryTables
),
49 tagTableCounterBits(p
->tagTableCounterBits
),
50 tagTableUBits(p
->tagTableUBits
),
51 histBufferSize(p
->histBufferSize
),
54 pathHistBits(p
->pathHistBits
),
55 tagTableTagWidths(p
->tagTableTagWidths
),
56 logTagTableSizes(p
->logTagTableSizes
),
57 threadHistory(p
->numThreads
),
58 logUResetPeriod(p
->logUResetPeriod
),
59 initialTCounterValue(p
->initialTCounterValue
),
60 numUseAltOnNa(p
->numUseAltOnNa
),
61 useAltOnNaBits(p
->useAltOnNaBits
),
62 maxNumAlloc(p
->maxNumAlloc
),
64 speculativeHistUpdate(p
->speculativeHistUpdate
),
65 instShiftAmt(p
->instShiftAmt
),
67 stats(this, nHistoryTables
)
70 // Set all the table to enabled by default
71 noSkip
.resize(nHistoryTables
+ 1, true);
76 TAGEBase::makeBranchInfo() {
77 return new BranchInfo(*this);
87 // Current method for periodically resetting the u counter bits only
88 // works for 1 or 2 bits
89 // Also make sure that it is not 0
90 assert(tagTableUBits
<= 2 && (tagTableUBits
> 0));
92 // we use int type for the path history, so it cannot be more than
94 assert(pathHistBits
<= (sizeof(int)*8));
96 // initialize the counter to half of the period
97 assert(logUResetPeriod
!= 0);
98 tCounter
= initialTCounterValue
;
100 assert(histBufferSize
> maxHist
* 2);
102 useAltPredForNewlyAllocated
.resize(numUseAltOnNa
, 0);
104 for (auto& history
: threadHistory
) {
105 history
.pathHist
= 0;
106 history
.globalHistory
= new uint8_t[histBufferSize
];
107 history
.gHist
= history
.globalHistory
;
108 memset(history
.gHist
, 0, histBufferSize
);
112 histLengths
= new int [nHistoryTables
+1];
114 calculateParameters();
116 assert(tagTableTagWidths
.size() == (nHistoryTables
+1));
117 assert(logTagTableSizes
.size() == (nHistoryTables
+1));
119 // First entry is for the Bimodal table and it is untagged in this
121 assert(tagTableTagWidths
[0] == 0);
123 for (auto& history
: threadHistory
) {
124 history
.computeIndices
= new FoldedHistory
[nHistoryTables
+1];
125 history
.computeTags
[0] = new FoldedHistory
[nHistoryTables
+1];
126 history
.computeTags
[1] = new FoldedHistory
[nHistoryTables
+1];
128 initFoldedHistories(history
);
131 const uint64_t bimodalTableSize
= ULL(1) << logTagTableSizes
[0];
132 btablePrediction
.resize(bimodalTableSize
, false);
133 btableHysteresis
.resize(bimodalTableSize
>> logRatioBiModalHystEntries
,
136 gtable
= new TageEntry
*[nHistoryTables
+ 1];
139 tableIndices
= new int [nHistoryTables
+1];
140 tableTags
= new int [nHistoryTables
+1];
145 TAGEBase::initFoldedHistories(ThreadHistory
& history
)
147 for (int i
= 1; i
<= nHistoryTables
; i
++) {
148 history
.computeIndices
[i
].init(
149 histLengths
[i
], (logTagTableSizes
[i
]));
150 history
.computeTags
[0][i
].init(
151 history
.computeIndices
[i
].origLength
, tagTableTagWidths
[i
]);
152 history
.computeTags
[1][i
].init(
153 history
.computeIndices
[i
].origLength
, tagTableTagWidths
[i
]-1);
154 DPRINTF(Tage
, "HistLength:%d, TTSize:%d, TTTWidth:%d\n",
155 histLengths
[i
], logTagTableSizes
[i
], tagTableTagWidths
[i
]);
160 TAGEBase::buildTageTables()
162 for (int i
= 1; i
<= nHistoryTables
; i
++) {
163 gtable
[i
] = new TageEntry
[1<<(logTagTableSizes
[i
])];
168 TAGEBase::calculateParameters()
170 histLengths
[1] = minHist
;
171 histLengths
[nHistoryTables
] = maxHist
;
173 for (int i
= 2; i
<= nHistoryTables
; i
++) {
174 histLengths
[i
] = (int) (((double) minHist
*
175 pow ((double) (maxHist
) / (double) minHist
,
176 (double) (i
- 1) / (double) ((nHistoryTables
- 1))))
182 TAGEBase::btbUpdate(ThreadID tid
, Addr branch_pc
, BranchInfo
* &bi
)
184 if (speculativeHistUpdate
) {
185 ThreadHistory
& tHist
= threadHistory
[tid
];
186 DPRINTF(Tage
, "BTB miss resets prediction: %lx\n", branch_pc
);
187 assert(tHist
.gHist
== &tHist
.globalHistory
[tHist
.ptGhist
]);
189 for (int i
= 1; i
<= nHistoryTables
; i
++) {
190 tHist
.computeIndices
[i
].comp
= bi
->ci
[i
];
191 tHist
.computeTags
[0][i
].comp
= bi
->ct0
[i
];
192 tHist
.computeTags
[1][i
].comp
= bi
->ct1
[i
];
193 tHist
.computeIndices
[i
].update(tHist
.gHist
);
194 tHist
.computeTags
[0][i
].update(tHist
.gHist
);
195 tHist
.computeTags
[1][i
].update(tHist
.gHist
);
201 TAGEBase::bindex(Addr pc_in
) const
203 return ((pc_in
>> instShiftAmt
) & ((ULL(1) << (logTagTableSizes
[0])) - 1));
207 TAGEBase::F(int A
, int size
, int bank
) const
211 A
= A
& ((ULL(1) << size
) - 1);
212 A1
= (A
& ((ULL(1) << logTagTableSizes
[bank
]) - 1));
213 A2
= (A
>> logTagTableSizes
[bank
]);
214 A2
= ((A2
<< bank
) & ((ULL(1) << logTagTableSizes
[bank
]) - 1))
215 + (A2
>> (logTagTableSizes
[bank
] - bank
));
217 A
= ((A
<< bank
) & ((ULL(1) << logTagTableSizes
[bank
]) - 1))
218 + (A
>> (logTagTableSizes
[bank
] - bank
));
222 // gindex computes a full hash of pc, ghist and pathHist
224 TAGEBase::gindex(ThreadID tid
, Addr pc
, int bank
) const
227 int hlen
= (histLengths
[bank
] > pathHistBits
) ? pathHistBits
:
229 const unsigned int shiftedPc
= pc
>> instShiftAmt
;
232 (shiftedPc
>> ((int) abs(logTagTableSizes
[bank
] - bank
) + 1)) ^
233 threadHistory
[tid
].computeIndices
[bank
].comp
^
234 F(threadHistory
[tid
].pathHist
, hlen
, bank
);
236 return (index
& ((ULL(1) << (logTagTableSizes
[bank
])) - 1));
242 TAGEBase::gtag(ThreadID tid
, Addr pc
, int bank
) const
244 int tag
= (pc
>> instShiftAmt
) ^
245 threadHistory
[tid
].computeTags
[0][bank
].comp
^
246 (threadHistory
[tid
].computeTags
[1][bank
].comp
<< 1);
248 return (tag
& ((ULL(1) << tagTableTagWidths
[bank
]) - 1));
252 // Up-down saturating counter
255 TAGEBase::ctrUpdate(T
& ctr
, bool taken
, int nbits
)
257 assert(nbits
<= sizeof(T
) << 3);
259 if (ctr
< ((1 << (nbits
- 1)) - 1))
262 if (ctr
> -(1 << (nbits
- 1)))
267 // int8_t and int versions of this function may be needed
268 template void TAGEBase::ctrUpdate(int8_t & ctr
, bool taken
, int nbits
);
269 template void TAGEBase::ctrUpdate(int & ctr
, bool taken
, int nbits
);
271 // Up-down unsigned saturating counter
273 TAGEBase::unsignedCtrUpdate(uint8_t & ctr
, bool up
, unsigned nbits
)
275 assert(nbits
<= sizeof(uint8_t) << 3);
277 if (ctr
< ((1 << nbits
) - 1))
285 // Bimodal prediction
287 TAGEBase::getBimodePred(Addr pc
, BranchInfo
* bi
) const
289 return btablePrediction
[bi
->bimodalIndex
];
293 // Update the bimodal predictor: a hysteresis bit is shared among N prediction
294 // bits (N = 2 ^ logRatioBiModalHystEntries)
296 TAGEBase::baseUpdate(Addr pc
, bool taken
, BranchInfo
* bi
)
298 int inter
= (btablePrediction
[bi
->bimodalIndex
] << 1)
299 + btableHysteresis
[bi
->bimodalIndex
>> logRatioBiModalHystEntries
];
303 } else if (inter
> 0) {
306 const bool pred
= inter
>> 1;
307 const bool hyst
= inter
& 1;
308 btablePrediction
[bi
->bimodalIndex
] = pred
;
309 btableHysteresis
[bi
->bimodalIndex
>> logRatioBiModalHystEntries
] = hyst
;
310 DPRINTF(Tage
, "Updating branch %lx, pred:%d, hyst:%d\n", pc
, pred
, hyst
);
313 // shifting the global history: we manage the history in a big table in order
314 // to reduce simulation time
316 TAGEBase::updateGHist(uint8_t * &h
, bool dir
, uint8_t * tab
, int &pt
)
319 DPRINTF(Tage
, "Rolling over the histories\n");
320 // Copy beginning of globalHistoryBuffer to end, such that
321 // the last maxHist outcomes are still reachable
322 // through pt[0 .. maxHist - 1].
323 for (int i
= 0; i
< maxHist
; i
++)
324 tab
[histBufferSize
- maxHist
+ i
] = tab
[i
];
325 pt
= histBufferSize
- maxHist
;
330 h
[0] = (dir
) ? 1 : 0;
334 TAGEBase::calculateIndicesAndTags(ThreadID tid
, Addr branch_pc
,
337 // computes the table addresses and the partial tags
338 for (int i
= 1; i
<= nHistoryTables
; i
++) {
339 tableIndices
[i
] = gindex(tid
, branch_pc
, i
);
340 bi
->tableIndices
[i
] = tableIndices
[i
];
341 tableTags
[i
] = gtag(tid
, branch_pc
, i
);
342 bi
->tableTags
[i
] = tableTags
[i
];
347 TAGEBase::getUseAltIdx(BranchInfo
* bi
, Addr branch_pc
)
349 // There is only 1 counter on the base TAGE implementation
354 TAGEBase::tagePredict(ThreadID tid
, Addr branch_pc
,
355 bool cond_branch
, BranchInfo
* bi
)
358 bool pred_taken
= true;
363 calculateIndicesAndTags(tid
, pc
, bi
);
365 bi
->bimodalIndex
= bindex(pc
);
369 //Look for the bank with longest matching history
370 for (int i
= nHistoryTables
; i
> 0; i
--) {
372 gtable
[i
][tableIndices
[i
]].tag
== tableTags
[i
]) {
374 bi
->hitBankIndex
= tableIndices
[bi
->hitBank
];
378 //Look for the alternate bank
379 for (int i
= bi
->hitBank
- 1; i
> 0; i
--) {
381 gtable
[i
][tableIndices
[i
]].tag
== tableTags
[i
]) {
383 bi
->altBankIndex
= tableIndices
[bi
->altBank
];
387 //computes the prediction and the alternate prediction
388 if (bi
->hitBank
> 0) {
389 if (bi
->altBank
> 0) {
391 gtable
[bi
->altBank
][tableIndices
[bi
->altBank
]].ctr
>= 0;
394 bi
->altTaken
= getBimodePred(pc
, bi
);
397 bi
->longestMatchPred
=
398 gtable
[bi
->hitBank
][tableIndices
[bi
->hitBank
]].ctr
>= 0;
400 abs(2 * gtable
[bi
->hitBank
][bi
->hitBankIndex
].ctr
+ 1) <= 1;
402 //if the entry is recognized as a newly allocated entry and
403 //useAltPredForNewlyAllocated is positive use the alternate
405 if ((useAltPredForNewlyAllocated
[getUseAltIdx(bi
, branch_pc
)] < 0)
406 || ! bi
->pseudoNewAlloc
) {
407 bi
->tagePred
= bi
->longestMatchPred
;
408 bi
->provider
= TAGE_LONGEST_MATCH
;
410 bi
->tagePred
= bi
->altTaken
;
411 bi
->provider
= bi
->altBank
? TAGE_ALT_MATCH
415 bi
->altTaken
= getBimodePred(pc
, bi
);
416 bi
->tagePred
= bi
->altTaken
;
417 bi
->longestMatchPred
= bi
->altTaken
;
418 bi
->provider
= BIMODAL_ONLY
;
420 //end TAGE prediction
422 pred_taken
= (bi
->tagePred
);
423 DPRINTF(Tage
, "Predict for %lx: taken?:%d, tagePred:%d, altPred:%d\n",
424 branch_pc
, pred_taken
, bi
->tagePred
, bi
->altTaken
);
426 bi
->branchPC
= branch_pc
;
427 bi
->condBranch
= cond_branch
;
432 TAGEBase::adjustAlloc(bool & alloc
, bool taken
, bool pred_taken
)
434 // Nothing for this base class implementation
438 TAGEBase::handleAllocAndUReset(bool alloc
, bool taken
, BranchInfo
* bi
,
442 // is there some "unuseful" entry to allocate
444 for (int i
= nHistoryTables
; i
> bi
->hitBank
; i
--) {
445 if (gtable
[i
][bi
->tableIndices
[i
]].u
< min
) {
446 min
= gtable
[i
][bi
->tableIndices
[i
]].u
;
450 // we allocate an entry with a longer history
451 // to avoid ping-pong, we do not choose systematically the next
452 // entry, but among the 3 next entries
454 ((ULL(1) << (nHistoryTables
- bi
->hitBank
- 1)) - 1);
455 int X
= bi
->hitBank
+ 1;
461 // No entry available, forces one to be available
463 gtable
[X
][bi
->tableIndices
[X
]].u
= 0;
468 unsigned numAllocated
= 0;
469 for (int i
= X
; i
<= nHistoryTables
; i
++) {
470 if ((gtable
[i
][bi
->tableIndices
[i
]].u
== 0)) {
471 gtable
[i
][bi
->tableIndices
[i
]].tag
= bi
->tableTags
[i
];
472 gtable
[i
][bi
->tableIndices
[i
]].ctr
= (taken
) ? 0 : -1;
474 if (numAllocated
== maxNumAlloc
) {
487 TAGEBase::handleUReset()
489 //periodic reset of u: reset is not complete but bit by bit
490 if ((tCounter
& ((ULL(1) << logUResetPeriod
) - 1)) == 0) {
491 // reset least significant bit
492 // most significant bit becomes least significant bit
493 for (int i
= 1; i
<= nHistoryTables
; i
++) {
494 for (int j
= 0; j
< (ULL(1) << logTagTableSizes
[i
]); j
++) {
495 resetUctr(gtable
[i
][j
].u
);
502 TAGEBase::resetUctr(uint8_t & u
)
508 TAGEBase::condBranchUpdate(ThreadID tid
, Addr branch_pc
, bool taken
,
509 BranchInfo
* bi
, int nrand
, Addr corrTarget
, bool pred
, bool preAdjustAlloc
)
512 // try to allocate a new entries only if prediction was wrong
513 bool alloc
= (bi
->tagePred
!= taken
) && (bi
->hitBank
< nHistoryTables
);
515 if (preAdjustAlloc
) {
516 adjustAlloc(alloc
, taken
, pred
);
519 if (bi
->hitBank
> 0) {
520 // Manage the selection between longest matching and alternate
521 // matching for "pseudo"-newly allocated longest matching entry
522 bool PseudoNewAlloc
= bi
->pseudoNewAlloc
;
523 // an entry is considered as newly allocated if its prediction
525 if (PseudoNewAlloc
) {
526 if (bi
->longestMatchPred
== taken
) {
529 // if it was delivering the correct prediction, no need to
530 // allocate new entry even if the overall prediction was false
531 if (bi
->longestMatchPred
!= bi
->altTaken
) {
533 useAltPredForNewlyAllocated
[getUseAltIdx(bi
, branch_pc
)],
534 bi
->altTaken
== taken
, useAltOnNaBits
);
539 if (!preAdjustAlloc
) {
540 adjustAlloc(alloc
, taken
, pred
);
543 handleAllocAndUReset(alloc
, taken
, bi
, nrand
);
545 handleTAGEUpdate(branch_pc
, taken
, bi
);
549 TAGEBase::handleTAGEUpdate(Addr branch_pc
, bool taken
, BranchInfo
* bi
)
551 if (bi
->hitBank
> 0) {
552 DPRINTF(Tage
, "Updating tag table entry (%d,%d) for branch %lx\n",
553 bi
->hitBank
, bi
->hitBankIndex
, branch_pc
);
554 ctrUpdate(gtable
[bi
->hitBank
][bi
->hitBankIndex
].ctr
, taken
,
555 tagTableCounterBits
);
556 // if the provider entry is not certified to be useful also update
557 // the alternate prediction
558 if (gtable
[bi
->hitBank
][bi
->hitBankIndex
].u
== 0) {
559 if (bi
->altBank
> 0) {
560 ctrUpdate(gtable
[bi
->altBank
][bi
->altBankIndex
].ctr
, taken
,
561 tagTableCounterBits
);
562 DPRINTF(Tage
, "Updating tag table entry (%d,%d) for"
563 " branch %lx\n", bi
->hitBank
, bi
->hitBankIndex
,
566 if (bi
->altBank
== 0) {
567 baseUpdate(branch_pc
, taken
, bi
);
571 // update the u counter
572 if (bi
->tagePred
!= bi
->altTaken
) {
573 unsignedCtrUpdate(gtable
[bi
->hitBank
][bi
->hitBankIndex
].u
,
574 bi
->tagePred
== taken
, tagTableUBits
);
577 baseUpdate(branch_pc
, taken
, bi
);
582 TAGEBase::updateHistories(ThreadID tid
, Addr branch_pc
, bool taken
,
583 BranchInfo
* bi
, bool speculative
,
584 const StaticInstPtr
&inst
, Addr target
)
586 if (speculative
!= speculativeHistUpdate
) {
589 ThreadHistory
& tHist
= threadHistory
[tid
];
591 bool pathbit
= ((branch_pc
>> instShiftAmt
) & 1);
592 //on a squash, return pointers to this and recompute indices.
593 //update user history
594 updateGHist(tHist
.gHist
, taken
, tHist
.globalHistory
, tHist
.ptGhist
);
595 tHist
.pathHist
= (tHist
.pathHist
<< 1) + pathbit
;
596 tHist
.pathHist
= (tHist
.pathHist
& ((ULL(1) << pathHistBits
) - 1));
599 bi
->ptGhist
= tHist
.ptGhist
;
600 bi
->pathHist
= tHist
.pathHist
;
603 //prepare next index and tag computations for user branchs
604 for (int i
= 1; i
<= nHistoryTables
; i
++)
607 bi
->ci
[i
] = tHist
.computeIndices
[i
].comp
;
608 bi
->ct0
[i
] = tHist
.computeTags
[0][i
].comp
;
609 bi
->ct1
[i
] = tHist
.computeTags
[1][i
].comp
;
611 tHist
.computeIndices
[i
].update(tHist
.gHist
);
612 tHist
.computeTags
[0][i
].update(tHist
.gHist
);
613 tHist
.computeTags
[1][i
].update(tHist
.gHist
);
615 DPRINTF(Tage
, "Updating global histories with branch:%lx; taken?:%d, "
616 "path Hist: %x; pointer:%d\n", branch_pc
, taken
, tHist
.pathHist
,
618 assert(threadHistory
[tid
].gHist
==
619 &threadHistory
[tid
].globalHistory
[threadHistory
[tid
].ptGhist
]);
623 TAGEBase::squash(ThreadID tid
, bool taken
, TAGEBase::BranchInfo
*bi
,
626 if (!speculativeHistUpdate
) {
627 /* If there are no speculative updates, no actions are needed */
631 ThreadHistory
& tHist
= threadHistory
[tid
];
632 DPRINTF(Tage
, "Restoring branch info: %lx; taken? %d; PathHistory:%x, "
633 "pointer:%d\n", bi
->branchPC
,taken
, bi
->pathHist
, bi
->ptGhist
);
634 tHist
.pathHist
= bi
->pathHist
;
635 tHist
.ptGhist
= bi
->ptGhist
;
636 tHist
.gHist
= &(tHist
.globalHistory
[tHist
.ptGhist
]);
637 tHist
.gHist
[0] = (taken
? 1 : 0);
638 for (int i
= 1; i
<= nHistoryTables
; i
++) {
639 tHist
.computeIndices
[i
].comp
= bi
->ci
[i
];
640 tHist
.computeTags
[0][i
].comp
= bi
->ct0
[i
];
641 tHist
.computeTags
[1][i
].comp
= bi
->ct1
[i
];
642 tHist
.computeIndices
[i
].update(tHist
.gHist
);
643 tHist
.computeTags
[0][i
].update(tHist
.gHist
);
644 tHist
.computeTags
[1][i
].update(tHist
.gHist
);
649 TAGEBase::extraAltCalc(BranchInfo
* bi
)
651 // do nothing. This is only used in some derived classes
656 TAGEBase::updateStats(bool taken
, BranchInfo
* bi
)
658 if (taken
== bi
->tagePred
) {
659 // correct prediction
660 switch (bi
->provider
) {
661 case BIMODAL_ONLY
: stats
.bimodalProviderCorrect
++; break;
662 case TAGE_LONGEST_MATCH
: stats
.longestMatchProviderCorrect
++; break;
663 case BIMODAL_ALT_MATCH
:
664 stats
.bimodalAltMatchProviderCorrect
++;
666 case TAGE_ALT_MATCH
: stats
.altMatchProviderCorrect
++; break;
670 switch (bi
->provider
) {
671 case BIMODAL_ONLY
: stats
.bimodalProviderWrong
++; break;
672 case TAGE_LONGEST_MATCH
:
673 stats
.longestMatchProviderWrong
++;
674 if (bi
->altTaken
== taken
) {
675 stats
.altMatchProviderWouldHaveHit
++;
678 case BIMODAL_ALT_MATCH
:
679 stats
.bimodalAltMatchProviderWrong
++;
682 stats
.altMatchProviderWrong
++;
686 switch (bi
->provider
) {
687 case BIMODAL_ALT_MATCH
:
689 if (bi
->longestMatchPred
== taken
) {
690 stats
.longestMatchProviderWouldHaveHit
++;
695 switch (bi
->provider
) {
696 case TAGE_LONGEST_MATCH
:
698 stats
.longestMatchProvider
[bi
->hitBank
]++;
699 stats
.altMatchProvider
[bi
->altBank
]++;
705 TAGEBase::getGHR(ThreadID tid
, BranchInfo
*bi
) const
708 for (unsigned i
= 0; i
< 32; i
++) {
709 // Make sure we don't go out of bounds
710 int gh_offset
= bi
->ptGhist
+ i
;
711 assert(&(threadHistory
[tid
].globalHistory
[gh_offset
]) <
712 threadHistory
[tid
].globalHistory
+ histBufferSize
);
713 val
|= ((threadHistory
[tid
].globalHistory
[gh_offset
] & 0x1) << i
);
719 TAGEBase::TAGEBaseStats::TAGEBaseStats(
720 Stats::Group
*parent
, unsigned nHistoryTables
)
721 : Stats::Group(parent
),
722 ADD_STAT(longestMatchProviderCorrect
, "Number of times TAGE Longest"
723 " Match is the provider and the prediction is correct"),
724 ADD_STAT(altMatchProviderCorrect
, "Number of times TAGE Alt Match"
725 " is the provider and the prediction is correct"),
726 ADD_STAT(bimodalAltMatchProviderCorrect
, "Number of times TAGE Alt"
727 " Match is the bimodal and it is the provider and the prediction"
729 ADD_STAT(bimodalProviderCorrect
, "Number of times there are no"
730 " hits on the TAGE tables and the bimodal prediction is correct"),
731 ADD_STAT(longestMatchProviderWrong
, "Number of times TAGE Longest"
732 " Match is the provider and the prediction is wrong"),
733 ADD_STAT(altMatchProviderWrong
, "Number of times TAGE Alt Match is"
734 " the provider and the prediction is wrong"),
735 ADD_STAT(bimodalAltMatchProviderWrong
, "Number of times TAGE Alt Match"
736 " is the bimodal and it is the provider and the prediction is"
738 ADD_STAT(bimodalProviderWrong
, "Number of times there are no hits"
739 " on the TAGE tables and the bimodal prediction is wrong"),
740 ADD_STAT(altMatchProviderWouldHaveHit
, "Number of times TAGE"
741 " Longest Match is the provider, the prediction is wrong and"
742 " Alt Match prediction was correct"),
743 ADD_STAT(longestMatchProviderWouldHaveHit
, "Number of times"
744 " TAGE Alt Match is the provider, the prediction is wrong and"
745 " Longest Match prediction was correct"),
746 ADD_STAT(longestMatchProvider
, "TAGE provider for longest match"),
747 ADD_STAT(altMatchProvider
, "TAGE provider for alt match")
749 longestMatchProvider
.init(nHistoryTables
+ 1);
750 altMatchProvider
.init(nHistoryTables
+ 1);
754 TAGEBase::getCtr(int hitBank
, int hitBankIndex
) const
756 return gtable
[hitBank
][hitBankIndex
].ctr
;
760 TAGEBase::getTageCtrBits() const
762 return tagTableCounterBits
;
766 TAGEBase::getPathHist(ThreadID tid
) const
768 return threadHistory
[tid
].pathHist
;
772 TAGEBase::isSpeculativeUpdateEnabled() const
774 return speculativeHistUpdate
;
778 TAGEBase::getSizeInBits() const {
780 for (int i
= 1; i
<= nHistoryTables
; i
++) {
781 bits
+= (1 << logTagTableSizes
[i
]) *
782 (tagTableCounterBits
+ tagTableUBits
+ tagTableTagWidths
[i
]);
784 uint64_t bimodalTableSize
= ULL(1) << logTagTableSizes
[0];
785 bits
+= numUseAltOnNa
* useAltOnNaBits
;
786 bits
+= bimodalTableSize
;
787 bits
+= (bimodalTableSize
>> logRatioBiModalHystEntries
);
788 bits
+= histLengths
[nHistoryTables
];
789 bits
+= pathHistBits
;
790 bits
+= logUResetPeriod
;
795 TAGEBaseParams::create()
797 return new TAGEBase(this);