2 * Copyright (c) 2015-2017 Advanced Micro Devices, Inc.
5 * For use for simulation and test purposes only
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions are met:
10 * 1. Redistributions of source code must retain the above copyright notice,
11 * this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright notice,
14 * this list of conditions and the following disclaimer in the documentation
15 * and/or other materials provided with the distribution.
17 * 3. Neither the name of the copyright holder nor the names of its
18 * contributors may be used to endorse or promote products derived from this
19 * software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
22 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
25 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31 * POSSIBILITY OF SUCH DAMAGE.
34 #include "gpu-compute/gpu_dyn_inst.hh"
36 #include "debug/GPUMem.hh"
37 #include "gpu-compute/gpu_static_inst.hh"
38 #include "gpu-compute/scalar_register_file.hh"
39 #include "gpu-compute/shader.hh"
40 #include "gpu-compute/wavefront.hh"
42 GPUDynInst::GPUDynInst(ComputeUnit
*_cu
, Wavefront
*_wf
,
43 GPUStaticInst
*static_inst
, InstSeqNum instSeqNum
)
44 : GPUExecContext(_cu
, _wf
), scalarAddr(0), addr(computeUnit()->wfSize(),
45 (Addr
)0), numScalarReqs(0), isSaveRestore(false),
46 _staticInst(static_inst
), _seqNum(instSeqNum
)
48 statusVector
.assign(TheGpuISA::NumVecElemPerVecReg
, 0);
49 tlbHitLevel
.assign(computeUnit()->wfSize(), -1);
50 // vector instructions can have up to 4 source/destination operands
51 d_data
= new uint8_t[computeUnit()->wfSize() * 4 * sizeof(double)];
52 a_data
= new uint8_t[computeUnit()->wfSize() * 8];
53 x_data
= new uint8_t[computeUnit()->wfSize() * 8];
54 // scalar loads can read up to 16 Dwords of data (see publicly
55 // available GCN3 ISA manual)
56 scalar_data
= new uint8_t[16 * sizeof(uint32_t)];
57 for (int i
= 0; i
< (16 * sizeof(uint32_t)); ++i
) {
60 for (int i
= 0; i
< (computeUnit()->wfSize() * 8); ++i
) {
64 for (int i
= 0; i
< (computeUnit()->wfSize() * 4 * sizeof(double)); ++i
) {
72 wfDynId
= _wf
->wfDynId
;
73 kern_id
= _wf
->kernId
;
75 wfSlotId
= _wf
->wfSlotId
;
85 GPUDynInst::~GPUDynInst()
95 GPUDynInst::execute(GPUDynInstPtr gpuDynInst
)
97 _staticInst
->execute(gpuDynInst
);
101 GPUDynInst::numSrcRegOperands()
103 return _staticInst
->numSrcRegOperands();
107 GPUDynInst::numDstRegOperands()
109 return _staticInst
->numDstRegOperands();
113 GPUDynInst::numSrcVecOperands()
115 return _staticInst
->numSrcVecOperands();
119 GPUDynInst::numDstVecOperands()
121 return _staticInst
->numDstVecOperands();
125 GPUDynInst::numSrcVecDWORDs()
127 return _staticInst
->numSrcVecDWORDs();
131 GPUDynInst::numDstVecDWORDs()
133 return _staticInst
->numDstVecDWORDs();
137 GPUDynInst::numOpdDWORDs(int operandIdx
)
139 return _staticInst
->numOpdDWORDs(operandIdx
);
143 GPUDynInst::getNumOperands()
145 return _staticInst
->getNumOperands();
149 GPUDynInst::isVectorRegister(int operandIdx
)
151 return _staticInst
->isVectorRegister(operandIdx
);
155 GPUDynInst::isScalarRegister(int operandIdx
)
157 return _staticInst
->isScalarRegister(operandIdx
);
161 GPUDynInst::getRegisterIndex(int operandIdx
, GPUDynInstPtr gpuDynInst
)
163 return _staticInst
->getRegisterIndex(operandIdx
, gpuDynInst
);
167 GPUDynInst::getOperandSize(int operandIdx
)
169 return _staticInst
->getOperandSize(operandIdx
);
173 GPUDynInst::isDstOperand(int operandIdx
)
175 return _staticInst
->isDstOperand(operandIdx
);
179 GPUDynInst::isSrcOperand(int operandIdx
)
181 return _staticInst
->isSrcOperand(operandIdx
);
185 GPUDynInst::hasSourceSgpr() const
187 for (int i
= 0; i
< _staticInst
->getNumOperands(); ++i
) {
188 if (_staticInst
->isScalarRegister(i
) && _staticInst
->isSrcOperand(i
)) {
196 GPUDynInst::hasSourceVgpr() const
198 for (int i
= 0; i
< _staticInst
->getNumOperands(); ++i
) {
199 if (_staticInst
->isVectorRegister(i
) && _staticInst
->isSrcOperand(i
)) {
207 GPUDynInst::hasDestinationSgpr() const
209 for (int i
= 0; i
< _staticInst
->getNumOperands(); ++i
) {
210 if (_staticInst
->isScalarRegister(i
) && _staticInst
->isDstOperand(i
)) {
218 GPUDynInst::srcIsVgpr(int index
) const
220 assert(index
>= 0 && index
< _staticInst
->getNumOperands());
221 if (_staticInst
->isVectorRegister(index
) &&
222 _staticInst
->isSrcOperand(index
)) {
229 GPUDynInst::hasDestinationVgpr() const
231 for (int i
= 0; i
< _staticInst
->getNumOperands(); ++i
) {
232 if (_staticInst
->isVectorRegister(i
) && _staticInst
->isDstOperand(i
)) {
240 GPUDynInst::isOpcode(const std::string
& opcodeStr
,
241 const std::string
& extStr
) const
243 return _staticInst
->opcode().find(opcodeStr
) != std::string::npos
&&
244 _staticInst
->opcode().find(extStr
) != std::string::npos
;
248 GPUDynInst::isOpcode(const std::string
& opcodeStr
) const
250 return _staticInst
->opcode().find(opcodeStr
) != std::string::npos
;
254 GPUDynInst::disassemble() const
256 return _staticInst
->disassemble();
260 GPUDynInst::seqNum() const
265 Enums::StorageClassType
266 GPUDynInst::executedAs()
268 return _staticInst
->executed_as
;
272 GPUDynInst::hasVgprRawDependence(GPUDynInstPtr s
)
275 for (int i
= 0; i
< getNumOperands(); ++i
) {
276 if (isVectorRegister(i
) && isSrcOperand(i
)) {
277 for (int j
= 0; j
< s
->getNumOperands(); ++j
) {
278 if (s
->isVectorRegister(j
) && s
->isDstOperand(j
)) {
289 GPUDynInst::hasSgprRawDependence(GPUDynInstPtr s
)
292 for (int i
= 0; i
< getNumOperands(); ++i
) {
293 if (isScalarRegister(i
) && isSrcOperand(i
)) {
294 for (int j
= 0; j
< s
->getNumOperands(); ++j
) {
295 if (s
->isScalarRegister(j
) && s
->isDstOperand(j
)) {
305 // Process a memory instruction and (if necessary) submit timing request
307 GPUDynInst::initiateAcc(GPUDynInstPtr gpuDynInst
)
309 DPRINTF(GPUMem
, "CU%d: WF[%d][%d]: mempacket status bitvector=%#x\n",
310 cu
->cu_id
, simdId
, wfSlotId
, exec_mask
);
312 _staticInst
->initiateAcc(gpuDynInst
);
316 GPUDynInst::completeAcc(GPUDynInstPtr gpuDynInst
)
318 DPRINTF(GPUMem
, "CU%d: WF[%d][%d]: mempacket status bitvector="
320 cu
->cu_id
, simdId
, wfSlotId
, exec_mask
);
322 _staticInst
->completeAcc(gpuDynInst
);
326 * accessor methods for the attributes of
327 * the underlying GPU static instruction
330 GPUDynInst::isALU() const
332 return _staticInst
->isALU();
336 GPUDynInst::isBranch() const
338 return _staticInst
->isBranch();
342 GPUDynInst::isCondBranch() const
344 return _staticInst
->isCondBranch();
348 GPUDynInst::isNop() const
350 return _staticInst
->isNop();
354 GPUDynInst::isEndOfKernel() const
356 return _staticInst
->isEndOfKernel();
360 GPUDynInst::isKernelLaunch() const
362 return _staticInst
->isKernelLaunch();
366 GPUDynInst::isSDWAInst() const
368 return _staticInst
->isSDWAInst();
372 GPUDynInst::isDPPInst() const
374 return _staticInst
->isDPPInst();
378 GPUDynInst::isReturn() const
380 return _staticInst
->isReturn();
384 GPUDynInst::isUnconditionalJump() const
386 return _staticInst
->isUnconditionalJump();
390 GPUDynInst::isSpecialOp() const
392 return _staticInst
->isSpecialOp();
396 GPUDynInst::isWaitcnt() const
398 return _staticInst
->isWaitcnt();
402 GPUDynInst::isBarrier() const
404 return _staticInst
->isBarrier();
408 GPUDynInst::isMemSync() const
410 return _staticInst
->isMemSync();
414 GPUDynInst::isMemRef() const
416 return _staticInst
->isMemRef();
420 GPUDynInst::isFlat() const
422 return _staticInst
->isFlat();
426 GPUDynInst::isLoad() const
428 return _staticInst
->isLoad();
432 GPUDynInst::isStore() const
434 return _staticInst
->isStore();
438 GPUDynInst::isAtomic() const
440 return _staticInst
->isAtomic();
444 GPUDynInst::isAtomicNoRet() const
446 return _staticInst
->isAtomicNoRet();
450 GPUDynInst::isAtomicRet() const
452 return _staticInst
->isAtomicRet();
456 GPUDynInst::isVector() const
458 return !_staticInst
->isScalar();
462 GPUDynInst::isScalar() const
464 return _staticInst
->isScalar();
468 GPUDynInst::readsSCC() const
470 return _staticInst
->readsSCC();
474 GPUDynInst::writesSCC() const
476 return _staticInst
->writesSCC();
480 GPUDynInst::readsVCC() const
482 return _staticInst
->readsVCC();
486 GPUDynInst::writesVCC() const
488 return _staticInst
->writesVCC();
492 GPUDynInst::readsMode() const
494 return _staticInst
->readsMode();
498 GPUDynInst::writesMode() const
500 return _staticInst
->writesMode();
504 GPUDynInst::readsEXEC() const
506 return _staticInst
->readsEXEC();
510 GPUDynInst::writesEXEC() const
512 return _staticInst
->writesEXEC();
516 GPUDynInst::ignoreExec() const
518 return _staticInst
->ignoreExec();
522 GPUDynInst::writesExecMask() const
524 for (int i
= 0; i
< _staticInst
->getNumOperands(); ++i
) {
525 return _staticInst
->isDstOperand(i
) &&
526 _staticInst
->isExecMaskRegister(i
);
532 GPUDynInst::readsExecMask() const
534 for (int i
= 0; i
< _staticInst
->getNumOperands(); ++i
) {
535 return _staticInst
->isSrcOperand(i
) &&
536 _staticInst
->isExecMaskRegister(i
);
542 GPUDynInst::writesFlatScratch() const
544 for (int i
= 0; i
< _staticInst
->getNumOperands(); ++i
) {
545 if (_staticInst
->isScalarRegister(i
) && _staticInst
->isDstOperand(i
)) {
546 return _staticInst
->isFlatScratchRegister(i
);
553 GPUDynInst::readsFlatScratch() const
555 for (int i
= 0; i
< _staticInst
->getNumOperands(); ++i
) {
556 if (_staticInst
->isScalarRegister(i
) && _staticInst
->isSrcOperand(i
)) {
557 return _staticInst
->isFlatScratchRegister(i
);
564 GPUDynInst::isAtomicAnd() const
566 return _staticInst
->isAtomicAnd();
570 GPUDynInst::isAtomicOr() const
572 return _staticInst
->isAtomicOr();
576 GPUDynInst::isAtomicXor() const
578 return _staticInst
->isAtomicXor();
582 GPUDynInst::isAtomicCAS() const
584 return _staticInst
->isAtomicCAS();
587 bool GPUDynInst::isAtomicExch() const
589 return _staticInst
->isAtomicExch();
593 GPUDynInst::isAtomicAdd() const
595 return _staticInst
->isAtomicAdd();
599 GPUDynInst::isAtomicSub() const
601 return _staticInst
->isAtomicSub();
605 GPUDynInst::isAtomicInc() const
607 return _staticInst
->isAtomicInc();
611 GPUDynInst::isAtomicDec() const
613 return _staticInst
->isAtomicDec();
617 GPUDynInst::isAtomicMax() const
619 return _staticInst
->isAtomicMax();
623 GPUDynInst::isAtomicMin() const
625 return _staticInst
->isAtomicMin();
629 GPUDynInst::isArgLoad() const
631 return _staticInst
->isArgLoad();
635 GPUDynInst::isGlobalMem() const
637 return _staticInst
->isGlobalMem();
641 GPUDynInst::isLocalMem() const
643 return _staticInst
->isLocalMem();
647 GPUDynInst::isArgSeg() const
649 return _staticInst
->isArgSeg();
653 GPUDynInst::isGlobalSeg() const
655 return _staticInst
->isGlobalSeg();
659 GPUDynInst::isGroupSeg() const
661 return _staticInst
->isGroupSeg();
665 GPUDynInst::isKernArgSeg() const
667 return _staticInst
->isKernArgSeg();
671 GPUDynInst::isPrivateSeg() const
673 return _staticInst
->isPrivateSeg();
677 GPUDynInst::isReadOnlySeg() const
679 return _staticInst
->isReadOnlySeg();
683 GPUDynInst::isSpillSeg() const
685 return _staticInst
->isSpillSeg();
689 GPUDynInst::isGloballyCoherent() const
691 return _staticInst
->isGloballyCoherent();
695 GPUDynInst::isSystemCoherent() const
697 return _staticInst
->isSystemCoherent();
701 GPUDynInst::isF16() const
703 return _staticInst
->isF16();
707 GPUDynInst::isF32() const
709 return _staticInst
->isF32();
713 GPUDynInst::isF64() const
715 return _staticInst
->isF64();
719 GPUDynInst::isFMA() const
721 return _staticInst
->isFMA();
725 GPUDynInst::isMAC() const
727 return _staticInst
->isMAC();
731 GPUDynInst::isMAD() const
733 return _staticInst
->isMAD();
737 GPUDynInst::doApertureCheck(const VectorMask
&mask
)
740 // find the segment of the first active address, after
741 // that we check that all other active addresses also
742 // fall within the same APE
743 for (int lane
= 0; lane
< computeUnit()->wfSize(); ++lane
) {
745 if (computeUnit()->shader
->isLdsApe(addr
[lane
])) {
747 staticInstruction()->executed_as
= Enums::SC_GROUP
;
749 } else if (computeUnit()->shader
->isScratchApe(addr
[lane
])) {
751 staticInstruction()->executed_as
= Enums::SC_PRIVATE
;
753 } else if (computeUnit()->shader
->isGpuVmApe(addr
[lane
])) {
754 // we won't support GPUVM
755 fatal("flat access is in GPUVM APE\n");
756 } else if (bits(addr
[lane
], 63, 47) != 0x1FFFF &&
757 bits(addr
[lane
], 63, 47)) {
758 // we are in the "hole", this is a memory violation
759 fatal("flat access at addr %#x has a memory violation\n",
762 // global memory segment
763 staticInstruction()->executed_as
= Enums::SC_GLOBAL
;
769 // we should have found the segment
770 assert(executedAs() != Enums::SC_NONE
);
772 // flat accesses should not straddle multiple APEs so we
773 // must check that all addresses fall within the same APE
774 if (executedAs() == Enums::SC_GROUP
) {
775 for (int lane
= 0; lane
< computeUnit()->wfSize(); ++lane
) {
777 // if the first valid addr we found above was LDS,
778 // all the rest should be
779 assert(computeUnit()->shader
->isLdsApe(addr
[lane
]));
782 } else if (executedAs() == Enums::SC_PRIVATE
) {
783 for (int lane
= 0; lane
< computeUnit()->wfSize(); ++lane
) {
785 // if the first valid addr we found above was private,
786 // all the rest should be
787 assert(computeUnit()->shader
->isScratchApe(addr
[lane
]));
791 for (int lane
= 0; lane
< computeUnit()->wfSize(); ++lane
) {
793 // if the first valid addr we found above was global,
794 // all the rest should be. because we don't have an
795 // explicit range of the global segment, we just make
796 // sure that the address fall in no other APE and that
797 // it is not a memory violation
798 assert(!computeUnit()->shader
->isLdsApe(addr
[lane
]));
799 assert(!computeUnit()->shader
->isScratchApe(addr
[lane
]));
800 assert(!computeUnit()->shader
->isGpuVmApe(addr
[lane
]));
801 assert(!(bits(addr
[lane
], 63, 47) != 0x1FFFF
802 && bits(addr
[lane
], 63, 47)));
809 GPUDynInst::resolveFlatSegment(const VectorMask
&mask
)
811 doApertureCheck(mask
);
814 // Now that we know the aperature, do the following:
815 // 1. Transform the flat address to its segmented equivalent.
816 // 2. Set the execUnitId based an the aperture check.
817 // 3. Decrement any extra resources that were reserved. Other
818 // resources are released as normal, below.
819 if (executedAs() == Enums::SC_GLOBAL
) {
820 // no transormation for global segment
821 wavefront()->execUnitId
= wavefront()->flatGmUnitId
;
823 wavefront()->rdLmReqsInPipe
--;
824 } else if (isStore()) {
825 wavefront()->wrLmReqsInPipe
--;
826 } else if (isAtomic() || isMemSync()) {
827 wavefront()->wrLmReqsInPipe
--;
828 wavefront()->rdLmReqsInPipe
--;
830 panic("Invalid memory operation!\n");
832 } else if (executedAs() == Enums::SC_GROUP
) {
833 for (int lane
= 0; lane
< wavefront()->computeUnit
->wfSize(); ++lane
) {
835 // flat address calculation goes here.
836 // addr[lane] = segmented address
837 panic("Flat group memory operation is unimplemented!\n");
840 wavefront()->execUnitId
= wavefront()->flatLmUnitId
;
841 wavefront()->decVMemInstsIssued();
843 wavefront()->rdGmReqsInPipe
--;
844 } else if (isStore()) {
845 wavefront()->wrGmReqsInPipe
--;
846 } else if (isAtomic() || isMemSync()) {
847 wavefront()->rdGmReqsInPipe
--;
848 wavefront()->wrGmReqsInPipe
--;
850 panic("Invalid memory operation!\n");
852 } else if (executedAs() == Enums::SC_PRIVATE
) {
854 * Flat instructions may resolve to the private segment (scratch),
855 * which is backed by main memory and provides per-lane scratch
856 * memory. Flat addressing uses apertures - registers that specify
857 * the address range in the VA space where LDS/private memory is
858 * mapped. The value of which is set by the kernel mode driver.
859 * These apertures use addresses that are not used by x86 CPUs.
860 * When the address of a Flat operation falls into one of the
861 * apertures, the Flat operation is redirected to either LDS or
862 * to the private memory segment.
864 * For private memory the SW runtime will allocate some space in
865 * the VA space for each AQL queue. The base address of which is
866 * stored in scalar registers per the AMD GPU ABI. The amd_queue_t
867 * scratch_backing_memory_location provides the base address in
868 * memory for the queue's private segment. Various other fields
869 * loaded into register state during kernel launch specify per-WF
870 * and per-work-item offsets so that individual lanes may access
871 * their private segment allocation.
873 * For more details about flat addressing see:
874 * http://rocm-documentation.readthedocs.io/en/latest/
875 * ROCm_Compiler_SDK/ROCm-Native-ISA.html#flat-scratch
877 * https://github.com/ROCm-Developer-Tools/
878 * ROCm-ComputeABI-Doc/blob/master/AMDGPU-ABI.md
882 uint32_t numSgprs
= wavefront()->maxSgprs
;
883 uint32_t physSgprIdx
=
884 wavefront()->computeUnit
->registerManager
->mapSgpr(wavefront(),
887 wavefront()->computeUnit
->srf
[simdId
]->read(physSgprIdx
);
889 wavefront()->computeUnit
->registerManager
->mapSgpr(wavefront(),
892 wavefront()->computeUnit
->srf
[simdId
]->read(physSgprIdx
);
893 for (int lane
= 0; lane
< wavefront()->computeUnit
->wfSize(); ++lane
) {
895 addr
[lane
] = addr
[lane
] + lane
* size
+ offset
+
896 wavefront()->computeUnit
->shader
->getHiddenPrivateBase() -
897 wavefront()->computeUnit
->shader
->getScratchBase();
900 wavefront()->execUnitId
= wavefront()->flatLmUnitId
;
901 wavefront()->decLGKMInstsIssued();
903 wavefront()->rdGmReqsInPipe
--;
904 } else if (isStore()) {
905 wavefront()->wrGmReqsInPipe
--;
906 } else if (isAtomic() || isMemSync()) {
907 wavefront()->rdGmReqsInPipe
--;
908 wavefront()->wrGmReqsInPipe
--;
910 panic("Invalid memory operation!\n");
913 for (int lane
= 0; lane
< wavefront()->computeUnit
->wfSize(); ++lane
) {
915 panic("flat addr %#llx maps to bad segment %d\n",
916 addr
[lane
], executedAs());
922 TheGpuISA::ScalarRegU32
923 GPUDynInst::srcLiteral() const
925 return _staticInst
->srcLiteral();
929 GPUDynInst::updateStats()
931 if (_staticInst
->isLocalMem()) {
932 // access to LDS (shared) memory
933 cu
->stats
.dynamicLMemInstrCnt
++;
934 } else if (_staticInst
->isFlat()) {
935 cu
->stats
.dynamicFlatMemInstrCnt
++;
937 // access to global memory
939 // update PageDivergence histogram
940 int number_pages_touched
= cu
->pagesTouched
.size();
941 assert(number_pages_touched
);
942 cu
->stats
.pageDivergenceDist
.sample(number_pages_touched
);
944 std::pair
<ComputeUnit::pageDataStruct::iterator
, bool> ret
;
946 for (auto it
: cu
->pagesTouched
) {
947 // see if this page has been touched before. if not, this also
948 // inserts the page into the table.
949 ret
= cu
->pageAccesses
950 .insert(ComputeUnit::pageDataStruct::value_type(it
.first
,
951 std::make_pair(1, it
.second
)));
953 // if yes, then update the stats
955 ret
.first
->second
.first
++;
956 ret
.first
->second
.second
+= it
.second
;
960 cu
->pagesTouched
.clear();
962 // total number of memory instructions (dynamic)
963 // Atomics are counted as a single memory instruction.
964 // this is # memory instructions per wavefronts, not per workitem
965 cu
->stats
.dynamicGMemInstrCnt
++;
970 GPUDynInst::profileRoundTripTime(Tick currentTime
, int hopId
)
972 // Only take the first measurement in the case of coalescing
973 if (roundTripTime
.size() > hopId
)
976 roundTripTime
.push_back(currentTime
);
980 GPUDynInst::profileLineAddressTime(Addr addr
, Tick currentTime
, int hopId
)
982 if (lineAddressTime
.count(addr
)) {
983 if (lineAddressTime
[addr
].size() > hopId
) {
987 lineAddressTime
[addr
].push_back(currentTime
);
988 } else if (hopId
== 0) {
989 auto addressTimeVec
= std::vector
<Tick
> { currentTime
};
990 lineAddressTime
.insert(std::make_pair(addr
, addressTimeVec
));