A few minor non-debug compilation issues.
[gem5.git] / src / mem / cache / cache_impl.hh
1 /*
2 * Copyright (c) 2002-2005 The Regents of The University of Michigan
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 *
28 * Authors: Erik Hallnor
29 * Dave Greene
30 * Nathan Binkert
31 * Steve Reinhardt
32 * Ron Dreslinski
33 */
34
35 /**
36 * @file
37 * Cache definitions.
38 */
39
40 #include "sim/host.hh"
41 #include "base/misc.hh"
42
43 #include "mem/cache/cache.hh"
44 #include "mem/cache/cache_blk.hh"
45 #include "mem/cache/miss/mshr.hh"
46 #include "mem/cache/prefetch/base_prefetcher.hh"
47
48 #include "sim/sim_exit.hh" // for SimExitEvent
49
50
51 template<class TagStore>
52 Cache<TagStore>::Cache(const std::string &_name,
53 Cache<TagStore>::Params &params)
54 : BaseCache(_name, params.baseParams),
55 prefetchAccess(params.prefetchAccess),
56 tags(params.tags),
57 prefetcher(params.prefetcher),
58 doFastWrites(params.doFastWrites),
59 prefetchMiss(params.prefetchMiss)
60 {
61 tempBlock = new BlkType();
62 tempBlock->data = new uint8_t[blkSize];
63
64 cpuSidePort = new CpuSidePort(_name + "-cpu_side_port", this);
65 memSidePort = new MemSidePort(_name + "-mem_side_port", this);
66 cpuSidePort->setOtherPort(memSidePort);
67 memSidePort->setOtherPort(cpuSidePort);
68
69 tags->setCache(this);
70 prefetcher->setCache(this);
71 }
72
73 template<class TagStore>
74 void
75 Cache<TagStore>::regStats()
76 {
77 BaseCache::regStats();
78 tags->regStats(name());
79 prefetcher->regStats(name());
80 }
81
82 template<class TagStore>
83 Port *
84 Cache<TagStore>::getPort(const std::string &if_name, int idx)
85 {
86 if (if_name == "" || if_name == "cpu_side") {
87 return cpuSidePort;
88 } else if (if_name == "mem_side") {
89 return memSidePort;
90 } else if (if_name == "functional") {
91 return new CpuSidePort(name() + "-cpu_side_funcport", this);
92 } else {
93 panic("Port name %s unrecognized\n", if_name);
94 }
95 }
96
97 template<class TagStore>
98 void
99 Cache<TagStore>::deletePortRefs(Port *p)
100 {
101 if (cpuSidePort == p || memSidePort == p)
102 panic("Can only delete functional ports\n");
103
104 delete p;
105 }
106
107
108 template<class TagStore>
109 void
110 Cache<TagStore>::cmpAndSwap(BlkType *blk, PacketPtr pkt)
111 {
112 uint64_t overwrite_val;
113 bool overwrite_mem;
114 uint64_t condition_val64;
115 uint32_t condition_val32;
116
117 int offset = tags->extractBlkOffset(pkt->getAddr());
118 uint8_t *blk_data = blk->data + offset;
119
120 assert(sizeof(uint64_t) >= pkt->getSize());
121
122 overwrite_mem = true;
123 // keep a copy of our possible write value, and copy what is at the
124 // memory address into the packet
125 pkt->writeData((uint8_t *)&overwrite_val);
126 pkt->setData(blk_data);
127
128 if (pkt->req->isCondSwap()) {
129 if (pkt->getSize() == sizeof(uint64_t)) {
130 condition_val64 = pkt->req->getExtraData();
131 overwrite_mem = !std::memcmp(&condition_val64, blk_data,
132 sizeof(uint64_t));
133 } else if (pkt->getSize() == sizeof(uint32_t)) {
134 condition_val32 = (uint32_t)pkt->req->getExtraData();
135 overwrite_mem = !std::memcmp(&condition_val32, blk_data,
136 sizeof(uint32_t));
137 } else
138 panic("Invalid size for conditional read/write\n");
139 }
140
141 if (overwrite_mem)
142 std::memcpy(blk_data, &overwrite_val, pkt->getSize());
143 }
144
145
146 template<class TagStore>
147 void
148 Cache<TagStore>::satisfyCpuSideRequest(PacketPtr pkt, BlkType *blk)
149 {
150 assert(blk);
151 assert(pkt->needsExclusive() ? blk->isWritable() : blk->isValid());
152 assert(pkt->getOffset(blkSize) + pkt->getSize() <= blkSize);
153
154 // Check RMW operations first since both isRead() and
155 // isWrite() will be true for them
156 if (pkt->cmd == MemCmd::SwapReq) {
157 cmpAndSwap(blk, pkt);
158 } else if (pkt->isWrite()) {
159 if (blk->checkWrite(pkt)) {
160 blk->status |= BlkDirty;
161 pkt->writeDataToBlock(blk->data, blkSize);
162 }
163 } else if (pkt->isRead()) {
164 if (pkt->isLocked()) {
165 blk->trackLoadLocked(pkt);
166 }
167 pkt->setDataFromBlock(blk->data, blkSize);
168 if (pkt->getSize() == blkSize) {
169 // special handling for coherent block requests from
170 // upper-level caches
171 if (pkt->needsExclusive()) {
172 // on ReadExReq we give up our copy
173 tags->invalidateBlk(blk);
174 } else {
175 // on ReadReq we create shareable copies here and in
176 // the requester
177 pkt->assertShared();
178 blk->status &= ~BlkWritable;
179 }
180 }
181 } else {
182 // Not a read or write... must be an upgrade. it's OK
183 // to just ack those as long as we have an exclusive
184 // copy at this level.
185 assert(pkt->cmd == MemCmd::UpgradeReq);
186 tags->invalidateBlk(blk);
187 }
188 }
189
190
191 /////////////////////////////////////////////////////
192 //
193 // MSHR helper functions
194 //
195 /////////////////////////////////////////////////////
196
197
198 template<class TagStore>
199 void
200 Cache<TagStore>::markInService(MSHR *mshr)
201 {
202 markInServiceInternal(mshr);
203 #if 0
204 if (mshr->originalCmd == MemCmd::HardPFReq) {
205 DPRINTF(HWPrefetch, "%s:Marking a HW_PF in service\n",
206 name());
207 //Also clear pending if need be
208 if (!prefetcher->havePending())
209 {
210 deassertMemSideBusRequest(Request_PF);
211 }
212 }
213 #endif
214 }
215
216
217 template<class TagStore>
218 void
219 Cache<TagStore>::squash(int threadNum)
220 {
221 bool unblock = false;
222 BlockedCause cause = NUM_BLOCKED_CAUSES;
223
224 if (noTargetMSHR && noTargetMSHR->threadNum == threadNum) {
225 noTargetMSHR = NULL;
226 unblock = true;
227 cause = Blocked_NoTargets;
228 }
229 if (mshrQueue.isFull()) {
230 unblock = true;
231 cause = Blocked_NoMSHRs;
232 }
233 mshrQueue.squash(threadNum);
234 if (unblock && !mshrQueue.isFull()) {
235 clearBlocked(cause);
236 }
237 }
238
239 /////////////////////////////////////////////////////
240 //
241 // Access path: requests coming in from the CPU side
242 //
243 /////////////////////////////////////////////////////
244
245 template<class TagStore>
246 bool
247 Cache<TagStore>::access(PacketPtr pkt, BlkType *&blk, int &lat)
248 {
249 if (pkt->req->isUncacheable()) {
250 blk = NULL;
251 lat = hitLatency;
252 return false;
253 }
254
255 bool satisfied = false; // assume the worst
256 blk = tags->findBlock(pkt->getAddr(), lat);
257
258 if (prefetchAccess) {
259 //We are determining prefetches on access stream, call prefetcher
260 prefetcher->handleMiss(pkt, curTick);
261 }
262
263 DPRINTF(Cache, "%s %x %s\n", pkt->cmdString(), pkt->getAddr(),
264 (blk) ? "hit" : "miss");
265
266 if (blk != NULL) {
267 // HIT
268 if (blk->isPrefetch()) {
269 //Signal that this was a hit under prefetch (no need for
270 //use prefetch (only can get here if true)
271 DPRINTF(HWPrefetch, "Hit a block that was prefetched\n");
272 blk->status &= ~BlkHWPrefetched;
273 if (prefetchMiss) {
274 //If we are using the miss stream, signal the
275 //prefetcher otherwise the access stream would have
276 //already signaled this hit
277 prefetcher->handleMiss(pkt, curTick);
278 }
279 }
280
281 if (pkt->needsExclusive() ? blk->isWritable() : blk->isValid()) {
282 // OK to satisfy access
283 hits[pkt->cmdToIndex()][0/*pkt->req->getThreadNum()*/]++;
284 satisfied = true;
285 satisfyCpuSideRequest(pkt, blk);
286 } else if (pkt->cmd == MemCmd::Writeback) {
287 // special case: writeback to read-only block (e.g., from
288 // L1 into L2). since we're really just passing ownership
289 // from one cache to another, we can update this cache to
290 // be the owner without making the block writeable
291 assert(!blk->isWritable() /* && !blk->isDirty() */);
292 assert(blkSize == pkt->getSize());
293 std::memcpy(blk->data, pkt->getPtr<uint8_t>(), blkSize);
294 blk->status |= BlkDirty;
295 satisfied = true;
296 // nothing else to do; writeback doesn't expect response
297 assert(!pkt->needsResponse());
298 } else {
299 // permission violation... nothing to do here, leave unsatisfied
300 // for statistics purposes this counts like a complete miss
301 incMissCount(pkt);
302 }
303 } else {
304 // complete miss (no matching block)
305 incMissCount(pkt);
306
307 if (pkt->isLocked() && pkt->isWrite()) {
308 // miss on store conditional... just give up now
309 pkt->req->setExtraData(0);
310 satisfied = true;
311 }
312 }
313
314 return satisfied;
315 }
316
317
318 class ForwardResponseRecord : public Packet::SenderState
319 {
320 Packet::SenderState *prevSenderState;
321 int prevSrc;
322 #ifndef NDEBUG
323 BaseCache *cache;
324 #endif
325 public:
326 ForwardResponseRecord(Packet *pkt, BaseCache *_cache)
327 : prevSenderState(pkt->senderState), prevSrc(pkt->getSrc())
328 #ifndef NDEBUG
329 , cache(_cache)
330 #endif
331 {}
332 void restore(Packet *pkt, BaseCache *_cache)
333 {
334 assert(_cache == cache);
335 pkt->senderState = prevSenderState;
336 pkt->setDest(prevSrc);
337 }
338 };
339
340
341 template<class TagStore>
342 bool
343 Cache<TagStore>::timingAccess(PacketPtr pkt)
344 {
345 //@todo Add back in MemDebug Calls
346 // MemDebug::cacheAccess(pkt);
347
348 // we charge hitLatency for doing just about anything here
349 Tick time = curTick + hitLatency;
350
351 if (pkt->isResponse()) {
352 // must be cache-to-cache response from upper to lower level
353 ForwardResponseRecord *rec =
354 dynamic_cast<ForwardResponseRecord *>(pkt->senderState);
355 assert(rec != NULL);
356 rec->restore(pkt, this);
357 delete rec;
358 memSidePort->respond(pkt, time);
359 return true;
360 }
361
362 assert(pkt->isRequest());
363
364 if (pkt->memInhibitAsserted()) {
365 DPRINTF(Cache, "mem inhibited on 0x%x: not responding\n",
366 pkt->getAddr());
367 assert(!pkt->req->isUncacheable());
368 return true;
369 }
370
371 if (pkt->req->isUncacheable()) {
372 // writes go in write buffer, reads use MSHR
373 if (pkt->isWrite() && !pkt->isRead()) {
374 allocateWriteBuffer(pkt, time, true);
375 } else {
376 allocateUncachedReadBuffer(pkt, time, true);
377 }
378 assert(pkt->needsResponse()); // else we should delete it here??
379 return true;
380 }
381
382 int lat = hitLatency;
383 bool satisfied = false;
384
385 Addr blk_addr = pkt->getAddr() & ~(Addr(blkSize-1));
386 MSHR *mshr = mshrQueue.findMatch(blk_addr);
387
388 if (!mshr) {
389 // no outstanding access to this block, look up in cache
390 // (otherwise if we allow reads while there's an outstanding
391 // write miss, the read could return stale data out of the
392 // cache block... a more aggressive system could detect the
393 // overlap (if any) and forward data out of the MSHRs, but we
394 // don't do that yet)
395 BlkType *blk = NULL;
396 satisfied = access(pkt, blk, lat);
397 }
398
399 #if 0
400 PacketList writebacks;
401
402 // If this is a block size write/hint (WH64) allocate the block here
403 // if the coherence protocol allows it.
404 /** @todo make the fast write alloc (wh64) work with coherence. */
405 /** @todo Do we want to do fast writes for writebacks as well? */
406 if (!blk && pkt->getSize() >= blkSize && coherence->allowFastWrites() &&
407 (pkt->cmd == MemCmd::WriteReq
408 || pkt->cmd == MemCmd::WriteInvalidateReq) ) {
409 // not outstanding misses, can do this
410 MSHR *outstanding_miss = mshrQueue.findMatch(pkt->getAddr());
411 if (pkt->cmd == MemCmd::WriteInvalidateReq || !outstanding_miss) {
412 if (outstanding_miss) {
413 warn("WriteInv doing a fastallocate"
414 "with an outstanding miss to the same address\n");
415 }
416 blk = handleFill(NULL, pkt, BlkValid | BlkWritable,
417 writebacks);
418 ++fastWrites;
419 }
420 }
421
422 // copy writebacks to write buffer
423 while (!writebacks.empty()) {
424 PacketPtr wbPkt = writebacks.front();
425 allocateWriteBuffer(wbPkt, time, true);
426 writebacks.pop_front();
427 }
428 #endif
429
430 bool needsResponse = pkt->needsResponse();
431
432 if (satisfied) {
433 if (needsResponse) {
434 pkt->makeTimingResponse();
435 cpuSidePort->respond(pkt, curTick+lat);
436 } else {
437 delete pkt;
438 }
439 } else {
440 // miss
441 if (prefetchMiss)
442 prefetcher->handleMiss(pkt, time);
443
444 if (mshr) {
445 // MSHR hit
446 //@todo remove hw_pf here
447 mshr_hits[pkt->cmdToIndex()][0/*pkt->req->getThreadNum()*/]++;
448 if (mshr->threadNum != 0/*pkt->req->getThreadNum()*/) {
449 mshr->threadNum = -1;
450 }
451 mshr->allocateTarget(pkt, time, order++);
452 if (mshr->getNumTargets() == numTarget) {
453 noTargetMSHR = mshr;
454 setBlocked(Blocked_NoTargets);
455 // need to be careful with this... if this mshr isn't
456 // ready yet (i.e. time > curTick_, we don't want to
457 // move it ahead of mshrs that are ready
458 // mshrQueue.moveToFront(mshr);
459 }
460 } else {
461 // no MSHR
462 mshr_misses[pkt->cmdToIndex()][0/*pkt->req->getThreadNum()*/]++;
463 // always mark as cache fill for now... if we implement
464 // no-write-allocate or bypass accesses this will have to
465 // be changed.
466 if (pkt->cmd == MemCmd::Writeback) {
467 allocateWriteBuffer(pkt, time, true);
468 } else {
469 allocateMissBuffer(pkt, time, true);
470 }
471 }
472 }
473
474 return true;
475 }
476
477
478 template<class TagStore>
479 PacketPtr
480 Cache<TagStore>::getBusPacket(PacketPtr cpu_pkt, BlkType *blk,
481 bool needsExclusive)
482 {
483 bool blkValid = blk && blk->isValid();
484
485 if (cpu_pkt->req->isUncacheable()) {
486 assert(blk == NULL);
487 return NULL;
488 }
489
490 if (!blkValid &&
491 (cpu_pkt->cmd == MemCmd::Writeback ||
492 cpu_pkt->cmd == MemCmd::UpgradeReq)) {
493 // For now, writebacks from upper-level caches that
494 // completely miss in the cache just go through. If we had
495 // "fast write" support (where we could write the whole
496 // block w/o fetching new data) we might want to allocate
497 // on writeback misses instead.
498 return NULL;
499 }
500
501 assert(cpu_pkt->needsResponse());
502
503 MemCmd cmd;
504 // @TODO make useUpgrades a parameter.
505 // Note that ownership protocols require upgrade, otherwise a
506 // write miss on a shared owned block will generate a ReadExcl,
507 // which will clobber the owned copy.
508 const bool useUpgrades = true;
509 if (blkValid && useUpgrades) {
510 // only reason to be here is that blk is shared
511 // (read-only) and we need exclusive
512 assert(needsExclusive && !blk->isWritable());
513 cmd = MemCmd::UpgradeReq;
514 } else {
515 // block is invalid
516 cmd = needsExclusive ? MemCmd::ReadExReq : MemCmd::ReadReq;
517 }
518 PacketPtr pkt = new Packet(cpu_pkt->req, cmd, Packet::Broadcast, blkSize);
519
520 pkt->allocate();
521 return pkt;
522 }
523
524
525 template<class TagStore>
526 Tick
527 Cache<TagStore>::atomicAccess(PacketPtr pkt)
528 {
529 int lat = hitLatency;
530
531 // @TODO: make this a parameter
532 bool last_level_cache = false;
533
534 if (pkt->memInhibitAsserted()) {
535 assert(!pkt->req->isUncacheable());
536 // have to invalidate ourselves and any lower caches even if
537 // upper cache will be responding
538 if (pkt->isInvalidate()) {
539 BlkType *blk = tags->findBlock(pkt->getAddr());
540 if (blk && blk->isValid()) {
541 tags->invalidateBlk(blk);
542 DPRINTF(Cache, "rcvd mem-inhibited %s on 0x%x: invalidating\n",
543 pkt->cmdString(), pkt->getAddr());
544 }
545 if (!last_level_cache) {
546 DPRINTF(Cache, "forwarding mem-inhibited %s on 0x%x\n",
547 pkt->cmdString(), pkt->getAddr());
548 lat += memSidePort->sendAtomic(pkt);
549 }
550 } else {
551 DPRINTF(Cache, "rcvd mem-inhibited %s on 0x%x: not responding\n",
552 pkt->cmdString(), pkt->getAddr());
553 }
554
555 return lat;
556 }
557
558 // should assert here that there are no outstanding MSHRs or
559 // writebacks... that would mean that someone used an atomic
560 // access in timing mode
561
562 BlkType *blk = NULL;
563
564 if (!access(pkt, blk, lat)) {
565 // MISS
566 PacketPtr busPkt = getBusPacket(pkt, blk, pkt->needsExclusive());
567
568 bool isCacheFill = (busPkt != NULL);
569
570 if (busPkt == NULL) {
571 // just forwarding the same request to the next level
572 // no local cache operation involved
573 busPkt = pkt;
574 }
575
576 DPRINTF(Cache, "Sending an atomic %s for %x\n",
577 busPkt->cmdString(), busPkt->getAddr());
578
579 #if TRACING_ON
580 CacheBlk::State old_state = blk ? blk->status : 0;
581 #endif
582
583 lat += memSidePort->sendAtomic(busPkt);
584
585 DPRINTF(Cache, "Receive response: %s for addr %x in state %i\n",
586 busPkt->cmdString(), busPkt->getAddr(), old_state);
587
588 if (isCacheFill) {
589 PacketList writebacks;
590 blk = handleFill(busPkt, blk, writebacks);
591 satisfyCpuSideRequest(pkt, blk);
592 delete busPkt;
593
594 // Handle writebacks if needed
595 while (!writebacks.empty()){
596 PacketPtr wbPkt = writebacks.front();
597 memSidePort->sendAtomic(wbPkt);
598 writebacks.pop_front();
599 delete wbPkt;
600 }
601 }
602 }
603
604 // We now have the block one way or another (hit or completed miss)
605
606 if (pkt->needsResponse()) {
607 pkt->makeAtomicResponse();
608 }
609
610 return lat;
611 }
612
613
614 template<class TagStore>
615 void
616 Cache<TagStore>::functionalAccess(PacketPtr pkt,
617 CachePort *otherSidePort)
618 {
619 Addr blk_addr = pkt->getAddr() & ~(blkSize - 1);
620 BlkType *blk = tags->findBlock(pkt->getAddr());
621
622 if (blk && pkt->checkFunctional(blk_addr, blkSize, blk->data)) {
623 // request satisfied from block
624 return;
625 }
626
627 // Need to check for outstanding misses and writes
628
629 // There can only be one matching outstanding miss.
630 MSHR *mshr = mshrQueue.findMatch(blk_addr);
631 if (mshr) {
632 MSHR::TargetList *targets = mshr->getTargetList();
633 MSHR::TargetList::iterator i = targets->begin();
634 MSHR::TargetList::iterator end = targets->end();
635 for (; i != end; ++i) {
636 PacketPtr targetPkt = i->pkt;
637 if (pkt->checkFunctional(targetPkt))
638 return;
639 }
640 }
641
642 // There can be many matching outstanding writes.
643 std::vector<MSHR*> writes;
644 assert(!writeBuffer.findMatches(blk_addr, writes));
645 /* Need to change this to iterate through targets in mshr??
646 for (int i = 0; i < writes.size(); ++i) {
647 MSHR *mshr = writes[i];
648 if (pkt->checkFunctional(mshr->addr, mshr->size, mshr->writeData))
649 return;
650 }
651 */
652
653 otherSidePort->checkAndSendFunctional(pkt);
654 }
655
656
657 /////////////////////////////////////////////////////
658 //
659 // Response handling: responses from the memory side
660 //
661 /////////////////////////////////////////////////////
662
663
664 template<class TagStore>
665 void
666 Cache<TagStore>::handleResponse(PacketPtr pkt)
667 {
668 Tick time = curTick + hitLatency;
669 MSHR *mshr = dynamic_cast<MSHR*>(pkt->senderState);
670 assert(mshr);
671
672 if (pkt->wasNacked()) {
673 //pkt->reinitFromRequest();
674 warn("NACKs from devices not connected to the same bus "
675 "not implemented\n");
676 return;
677 }
678 assert(!pkt->isError());
679 DPRINTF(Cache, "Handling response to %x\n", pkt->getAddr());
680
681 MSHRQueue *mq = mshr->queue;
682 bool wasFull = mq->isFull();
683
684 if (mshr == noTargetMSHR) {
685 // we always clear at least one target
686 clearBlocked(Blocked_NoTargets);
687 noTargetMSHR = NULL;
688 }
689
690 // Initial target is used just for stats
691 MSHR::Target *initial_tgt = mshr->getTarget();
692 BlkType *blk = tags->findBlock(pkt->getAddr());
693 int stats_cmd_idx = initial_tgt->pkt->cmdToIndex();
694 Tick miss_latency = curTick - initial_tgt->recvTime;
695 PacketList writebacks;
696
697 if (pkt->req->isUncacheable()) {
698 mshr_uncacheable_lat[stats_cmd_idx][0/*pkt->req->getThreadNum()*/] +=
699 miss_latency;
700 } else {
701 mshr_miss_latency[stats_cmd_idx][0/*pkt->req->getThreadNum()*/] +=
702 miss_latency;
703 }
704
705 if (mshr->isCacheFill) {
706 DPRINTF(Cache, "Block for addr %x being updated in Cache\n",
707 pkt->getAddr());
708
709 // give mshr a chance to do some dirty work
710 mshr->handleFill(pkt, blk);
711
712 blk = handleFill(pkt, blk, writebacks);
713 assert(blk != NULL);
714 }
715
716 // First offset for critical word first calculations
717 int initial_offset = 0;
718
719 if (mshr->hasTargets()) {
720 initial_offset = mshr->getTarget()->pkt->getOffset(blkSize);
721 }
722
723 while (mshr->hasTargets()) {
724 MSHR::Target *target = mshr->getTarget();
725
726 if (target->isCpuSide()) {
727 Tick completion_time;
728 if (blk != NULL) {
729 satisfyCpuSideRequest(target->pkt, blk);
730 // How many bytes pass the first request is this one
731 int transfer_offset =
732 target->pkt->getOffset(blkSize) - initial_offset;
733 if (transfer_offset < 0) {
734 transfer_offset += blkSize;
735 }
736
737 // If critical word (no offset) return first word time
738 completion_time = tags->getHitLatency() +
739 transfer_offset ? pkt->finishTime : pkt->firstWordTime;
740
741 if (!target->pkt->req->isUncacheable()) {
742 missLatency[target->pkt->cmdToIndex()][0/*pkt->req->getThreadNum()*/] +=
743 completion_time - target->recvTime;
744 }
745 } else {
746 // not a cache fill, just forwarding response
747 completion_time = tags->getHitLatency() + pkt->finishTime;
748 if (pkt->isRead()) {
749 target->pkt->setData(pkt->getPtr<uint8_t>());
750 }
751 }
752 target->pkt->makeTimingResponse();
753 cpuSidePort->respond(target->pkt, completion_time);
754 } else {
755 // response to snoop request
756 DPRINTF(Cache, "processing deferred snoop...\n");
757 handleSnoop(target->pkt, blk, true, true);
758 }
759
760 mshr->popTarget();
761 }
762
763 if (mshr->promoteDeferredTargets()) {
764 MSHRQueue *mq = mshr->queue;
765 mq->markPending(mshr);
766 requestMemSideBus((RequestCause)mq->index, pkt->finishTime);
767 } else {
768 mq->deallocate(mshr);
769 if (wasFull && !mq->isFull()) {
770 clearBlocked((BlockedCause)mq->index);
771 }
772 }
773
774 // copy writebacks to write buffer
775 while (!writebacks.empty()) {
776 PacketPtr wbPkt = writebacks.front();
777 allocateWriteBuffer(wbPkt, time, true);
778 writebacks.pop_front();
779 }
780 // if we used temp block, clear it out
781 if (blk == tempBlock) {
782 if (blk->isDirty()) {
783 allocateWriteBuffer(writebackBlk(blk), time, true);
784 }
785 tags->invalidateBlk(blk);
786 }
787
788 delete pkt;
789 }
790
791
792
793
794 template<class TagStore>
795 PacketPtr
796 Cache<TagStore>::writebackBlk(BlkType *blk)
797 {
798 assert(blk && blk->isValid() && blk->isDirty());
799
800 writebacks[0/*pkt->req->getThreadNum()*/]++;
801
802 Request *writebackReq =
803 new Request(tags->regenerateBlkAddr(blk->tag, blk->set), blkSize, 0);
804 PacketPtr writeback = new Packet(writebackReq, MemCmd::Writeback, -1);
805 writeback->allocate();
806 std::memcpy(writeback->getPtr<uint8_t>(), blk->data, blkSize);
807
808 blk->status &= ~BlkDirty;
809 return writeback;
810 }
811
812
813 // Note that the reason we return a list of writebacks rather than
814 // inserting them directly in the write buffer is that this function
815 // is called by both atomic and timing-mode accesses, and in atomic
816 // mode we don't mess with the write buffer (we just perform the
817 // writebacks atomically once the original request is complete).
818 template<class TagStore>
819 typename Cache<TagStore>::BlkType*
820 Cache<TagStore>::handleFill(PacketPtr pkt, BlkType *blk,
821 PacketList &writebacks)
822 {
823 Addr addr = pkt->getAddr();
824 #if TRACING_ON
825 CacheBlk::State old_state = blk ? blk->status : 0;
826 #endif
827
828 if (blk == NULL) {
829 // better have read new data...
830 assert(pkt->isRead());
831
832 // need to do a replacement
833 blk = tags->findReplacement(addr, writebacks);
834 if (blk->isValid()) {
835 Addr repl_addr = tags->regenerateBlkAddr(blk->tag, blk->set);
836 MSHR *repl_mshr = mshrQueue.findMatch(repl_addr);
837 if (repl_mshr) {
838 // must be an outstanding upgrade request on block
839 // we're about to replace...
840 assert(!blk->isWritable());
841 assert(repl_mshr->needsExclusive());
842 // too hard to replace block with transient state;
843 // just use temporary storage to complete the current
844 // request and then get rid of it
845 assert(!tempBlock->isValid());
846 blk = tempBlock;
847 tempBlock->set = tags->extractSet(addr);
848 DPRINTF(Cache, "using temp block for %x\n", addr);
849 } else {
850 DPRINTF(Cache, "replacement: replacing %x with %x: %s\n",
851 repl_addr, addr,
852 blk->isDirty() ? "writeback" : "clean");
853
854 if (blk->isDirty()) {
855 // Save writeback packet for handling by caller
856 writebacks.push_back(writebackBlk(blk));
857 }
858 }
859 }
860
861 blk->tag = tags->extractTag(addr);
862 } else {
863 // existing block... probably an upgrade
864 assert(blk->tag == tags->extractTag(addr));
865 // either we're getting new data or the block should already be valid
866 assert(pkt->isRead() || blk->isValid());
867 }
868
869 if (pkt->needsExclusive() || !pkt->sharedAsserted()) {
870 blk->status = BlkValid | BlkWritable;
871 } else {
872 blk->status = BlkValid;
873 }
874
875 DPRINTF(Cache, "Block addr %x moving from state %i to %i\n",
876 addr, old_state, blk->status);
877
878 // if we got new data, copy it in
879 if (pkt->isRead()) {
880 std::memcpy(blk->data, pkt->getPtr<uint8_t>(), blkSize);
881 }
882
883 blk->whenReady = pkt->finishTime;
884
885 return blk;
886 }
887
888
889 /////////////////////////////////////////////////////
890 //
891 // Snoop path: requests coming in from the memory side
892 //
893 /////////////////////////////////////////////////////
894
895 template<class TagStore>
896 void
897 Cache<TagStore>::doTimingSupplyResponse(PacketPtr req_pkt,
898 uint8_t *blk_data,
899 bool already_copied)
900 {
901 // timing-mode snoop responses require a new packet, unless we
902 // already made a copy...
903 PacketPtr pkt = already_copied ? req_pkt : new Packet(req_pkt, true);
904 if (!req_pkt->isInvalidate()) {
905 // note that we're ignoring the shared flag on req_pkt... it's
906 // basically irrelveant, as we'll always assert shared unless
907 // it's an exclusive request, in which case the shared line
908 // should never be asserted1
909 pkt->assertShared();
910 }
911 pkt->allocate();
912 pkt->makeTimingResponse();
913 pkt->setDataFromBlock(blk_data, blkSize);
914 memSidePort->respond(pkt, curTick + hitLatency);
915 }
916
917 template<class TagStore>
918 void
919 Cache<TagStore>::handleSnoop(PacketPtr pkt, BlkType *blk,
920 bool is_timing, bool is_deferred)
921 {
922 assert(pkt->isRequest());
923
924 // first propagate snoop upward to see if anyone above us wants to
925 // handle it. save & restore packet src since it will get
926 // rewritten to be relative to cpu-side bus (if any)
927 bool alreadySupplied = pkt->memInhibitAsserted();
928 bool upperSupply = false;
929 if (is_timing) {
930 Packet *snoopPkt = new Packet(pkt, true); // clear flags
931 snoopPkt->setExpressSnoop();
932 if (is_deferred) {
933 snoopPkt->setDeferredSnoop();
934 }
935 snoopPkt->senderState = new ForwardResponseRecord(pkt, this);
936 cpuSidePort->sendTiming(snoopPkt);
937 if (snoopPkt->memInhibitAsserted()) {
938 // cache-to-cache response from some upper cache
939 assert(!alreadySupplied);
940 pkt->assertMemInhibit();
941 } else {
942 delete snoopPkt->senderState;
943 }
944 if (snoopPkt->sharedAsserted()) {
945 pkt->assertShared();
946 }
947 delete snoopPkt;
948 } else {
949 int origSrc = pkt->getSrc();
950 cpuSidePort->sendAtomic(pkt);
951 if (!alreadySupplied && pkt->memInhibitAsserted()) {
952 // cache-to-cache response from some upper cache:
953 // forward response to original requester
954 assert(pkt->isResponse());
955 }
956 pkt->setSrc(origSrc);
957 }
958
959 if (!blk || !blk->isValid()) {
960 return;
961 }
962
963 // we may end up modifying both the block state and the packet (if
964 // we respond in atomic mode), so just figure out what to do now
965 // and then do it later
966 bool supply = blk->isDirty() && pkt->isRead() && !upperSupply;
967 bool invalidate = pkt->isInvalidate();
968
969 if (pkt->isRead() && !pkt->isInvalidate()) {
970 assert(!pkt->needsExclusive());
971 pkt->assertShared();
972 int bits_to_clear = BlkWritable;
973 const bool haveOwnershipState = true; // for now
974 if (!haveOwnershipState) {
975 // if we don't support pure ownership (dirty && !writable),
976 // have to clear dirty bit here, assume memory snarfs data
977 // on cache-to-cache xfer
978 bits_to_clear |= BlkDirty;
979 }
980 blk->status &= ~bits_to_clear;
981 }
982
983 if (supply) {
984 assert(!pkt->memInhibitAsserted());
985 pkt->assertMemInhibit();
986 if (is_timing) {
987 doTimingSupplyResponse(pkt, blk->data, is_deferred);
988 } else {
989 pkt->makeAtomicResponse();
990 pkt->setDataFromBlock(blk->data, blkSize);
991 }
992 }
993
994 // Do this last in case it deallocates block data or something
995 // like that
996 if (invalidate) {
997 tags->invalidateBlk(blk);
998 }
999
1000 DPRINTF(Cache, "snooped a %s request for addr %x, %snew state is %i\n",
1001 pkt->cmdString(), blockAlign(pkt->getAddr()),
1002 supply ? "supplying data, " : "", blk->status);
1003 }
1004
1005
1006 template<class TagStore>
1007 void
1008 Cache<TagStore>::snoopTiming(PacketPtr pkt)
1009 {
1010 if (pkt->req->isUncacheable()) {
1011 //Can't get a hit on an uncacheable address
1012 //Revisit this for multi level coherence
1013 return;
1014 }
1015
1016 BlkType *blk = tags->findBlock(pkt->getAddr());
1017
1018 Addr blk_addr = pkt->getAddr() & ~(Addr(blkSize-1));
1019 MSHR *mshr = mshrQueue.findMatch(blk_addr);
1020 // better not be snooping a request that conflicts with something
1021 // we have outstanding...
1022 if (mshr && mshr->handleSnoop(pkt, order++)) {
1023 DPRINTF(Cache, "Deferring snoop on in-service MSHR to blk %x\n",
1024 blk_addr);
1025 if (mshr->getNumTargets() > numTarget)
1026 warn("allocating bonus target for snoop"); //handle later
1027 return;
1028 }
1029
1030 //We also need to check the writeback buffers and handle those
1031 std::vector<MSHR *> writebacks;
1032 if (writeBuffer.findMatches(blk_addr, writebacks)) {
1033 DPRINTF(Cache, "Snoop hit in writeback to addr: %x\n",
1034 pkt->getAddr());
1035
1036 //Look through writebacks for any non-uncachable writes, use that
1037 for (int i=0; i<writebacks.size(); i++) {
1038 mshr = writebacks[i];
1039 assert(!mshr->isUncacheable());
1040 assert(mshr->getNumTargets() == 1);
1041 PacketPtr wb_pkt = mshr->getTarget()->pkt;
1042 assert(wb_pkt->cmd == MemCmd::Writeback);
1043
1044 if (pkt->isRead()) {
1045 assert(!pkt->memInhibitAsserted());
1046 pkt->assertMemInhibit();
1047 if (!pkt->needsExclusive()) {
1048 pkt->assertShared();
1049 } else {
1050 // if we're not asserting the shared line, we need to
1051 // invalidate our copy. we'll do that below as long as
1052 // the packet's invalidate flag is set...
1053 assert(pkt->isInvalidate());
1054 }
1055 doTimingSupplyResponse(pkt, wb_pkt->getPtr<uint8_t>(), false);
1056 }
1057
1058 if (pkt->isInvalidate()) {
1059 // Invalidation trumps our writeback... discard here
1060 markInService(mshr);
1061 }
1062 return;
1063 }
1064 }
1065
1066 handleSnoop(pkt, blk, true, false);
1067 }
1068
1069
1070 template<class TagStore>
1071 Tick
1072 Cache<TagStore>::snoopAtomic(PacketPtr pkt)
1073 {
1074 if (pkt->req->isUncacheable()) {
1075 // Can't get a hit on an uncacheable address
1076 // Revisit this for multi level coherence
1077 return hitLatency;
1078 }
1079
1080 BlkType *blk = tags->findBlock(pkt->getAddr());
1081 handleSnoop(pkt, blk, false, false);
1082 return hitLatency;
1083 }
1084
1085
1086 template<class TagStore>
1087 MSHR *
1088 Cache<TagStore>::getNextMSHR()
1089 {
1090 // Check both MSHR queue and write buffer for potential requests
1091 MSHR *miss_mshr = mshrQueue.getNextMSHR();
1092 MSHR *write_mshr = writeBuffer.getNextMSHR();
1093
1094 // Now figure out which one to send... some cases are easy
1095 if (miss_mshr && !write_mshr) {
1096 return miss_mshr;
1097 }
1098 if (write_mshr && !miss_mshr) {
1099 return write_mshr;
1100 }
1101
1102 if (miss_mshr && write_mshr) {
1103 // We have one of each... normally we favor the miss request
1104 // unless the write buffer is full
1105 if (writeBuffer.isFull() && writeBuffer.inServiceEntries == 0) {
1106 // Write buffer is full, so we'd like to issue a write;
1107 // need to search MSHR queue for conflicting earlier miss.
1108 MSHR *conflict_mshr =
1109 mshrQueue.findPending(write_mshr->addr, write_mshr->size);
1110
1111 if (conflict_mshr && conflict_mshr->order < write_mshr->order) {
1112 // Service misses in order until conflict is cleared.
1113 return conflict_mshr;
1114 }
1115
1116 // No conflicts; issue write
1117 return write_mshr;
1118 }
1119
1120 // Write buffer isn't full, but need to check it for
1121 // conflicting earlier writeback
1122 MSHR *conflict_mshr =
1123 writeBuffer.findPending(miss_mshr->addr, miss_mshr->size);
1124 if (conflict_mshr) {
1125 // not sure why we don't check order here... it was in the
1126 // original code but commented out.
1127
1128 // The only way this happens is if we are
1129 // doing a write and we didn't have permissions
1130 // then subsequently saw a writeback (owned got evicted)
1131 // We need to make sure to perform the writeback first
1132 // To preserve the dirty data, then we can issue the write
1133
1134 // should we return write_mshr here instead? I.e. do we
1135 // have to flush writes in order? I don't think so... not
1136 // for Alpha anyway. Maybe for x86?
1137 return conflict_mshr;
1138 }
1139
1140 // No conflicts; issue read
1141 return miss_mshr;
1142 }
1143
1144 // fall through... no pending requests. Try a prefetch.
1145 assert(!miss_mshr && !write_mshr);
1146 if (!mshrQueue.isFull()) {
1147 // If we have a miss queue slot, we can try a prefetch
1148 PacketPtr pkt = prefetcher->getPacket();
1149 if (pkt) {
1150 // Update statistic on number of prefetches issued
1151 // (hwpf_mshr_misses)
1152 mshr_misses[pkt->cmdToIndex()][0/*pkt->req->getThreadNum()*/]++;
1153 // Don't request bus, since we already have it
1154 return allocateMissBuffer(pkt, curTick, false);
1155 }
1156 }
1157
1158 return NULL;
1159 }
1160
1161
1162 template<class TagStore>
1163 PacketPtr
1164 Cache<TagStore>::getTimingPacket()
1165 {
1166 MSHR *mshr = getNextMSHR();
1167
1168 if (mshr == NULL) {
1169 return NULL;
1170 }
1171
1172 // use request from 1st target
1173 PacketPtr tgt_pkt = mshr->getTarget()->pkt;
1174 PacketPtr pkt = NULL;
1175
1176 if (mshr->isSimpleForward()) {
1177 // no response expected, just forward packet as it is
1178 assert(tags->findBlock(mshr->addr) == NULL);
1179 pkt = tgt_pkt;
1180 } else {
1181 BlkType *blk = tags->findBlock(mshr->addr);
1182 pkt = getBusPacket(tgt_pkt, blk, mshr->needsExclusive());
1183
1184 mshr->isCacheFill = (pkt != NULL);
1185
1186 if (pkt == NULL) {
1187 // not a cache block request, but a response is expected
1188 assert(!mshr->isSimpleForward());
1189 // make copy of current packet to forward, keep current
1190 // copy for response handling
1191 pkt = new Packet(tgt_pkt);
1192 pkt->allocate();
1193 if (pkt->isWrite()) {
1194 pkt->setData(tgt_pkt->getPtr<uint8_t>());
1195 }
1196 }
1197 }
1198
1199 assert(pkt != NULL);
1200 pkt->senderState = mshr;
1201 return pkt;
1202 }
1203
1204
1205 ///////////////
1206 //
1207 // CpuSidePort
1208 //
1209 ///////////////
1210
1211 template<class TagStore>
1212 void
1213 Cache<TagStore>::CpuSidePort::
1214 getDeviceAddressRanges(AddrRangeList &resp, bool &snoop)
1215 {
1216 // CPU side port doesn't snoop; it's a target only.
1217 bool dummy;
1218 otherPort->getPeerAddressRanges(resp, dummy);
1219 snoop = false;
1220 }
1221
1222
1223 template<class TagStore>
1224 bool
1225 Cache<TagStore>::CpuSidePort::recvTiming(PacketPtr pkt)
1226 {
1227 // illegal to block responses... can lead to deadlock
1228 if (pkt->isRequest() && blocked) {
1229 DPRINTF(Cache,"Scheduling a retry while blocked\n");
1230 mustSendRetry = true;
1231 return false;
1232 }
1233
1234 myCache()->timingAccess(pkt);
1235 return true;
1236 }
1237
1238
1239 template<class TagStore>
1240 Tick
1241 Cache<TagStore>::CpuSidePort::recvAtomic(PacketPtr pkt)
1242 {
1243 return myCache()->atomicAccess(pkt);
1244 }
1245
1246
1247 template<class TagStore>
1248 void
1249 Cache<TagStore>::CpuSidePort::recvFunctional(PacketPtr pkt)
1250 {
1251 checkFunctional(pkt);
1252 if (!pkt->isResponse())
1253 myCache()->functionalAccess(pkt, cache->memSidePort);
1254 }
1255
1256
1257 template<class TagStore>
1258 Cache<TagStore>::
1259 CpuSidePort::CpuSidePort(const std::string &_name,
1260 Cache<TagStore> *_cache)
1261 : BaseCache::CachePort(_name, _cache)
1262 {
1263 }
1264
1265 ///////////////
1266 //
1267 // MemSidePort
1268 //
1269 ///////////////
1270
1271 template<class TagStore>
1272 void
1273 Cache<TagStore>::MemSidePort::
1274 getDeviceAddressRanges(AddrRangeList &resp, bool &snoop)
1275 {
1276 otherPort->getPeerAddressRanges(resp, snoop);
1277 // Memory-side port always snoops, so unconditionally set flag for
1278 // caller.
1279 snoop = true;
1280 }
1281
1282
1283 template<class TagStore>
1284 bool
1285 Cache<TagStore>::MemSidePort::recvTiming(PacketPtr pkt)
1286 {
1287 // this needs to be fixed so that the cache updates the mshr and sends the
1288 // packet back out on the link, but it probably won't happen so until this
1289 // gets fixed, just panic when it does
1290 if (pkt->wasNacked())
1291 panic("Need to implement cache resending nacked packets!\n");
1292
1293 if (pkt->isRequest() && blocked) {
1294 DPRINTF(Cache,"Scheduling a retry while blocked\n");
1295 mustSendRetry = true;
1296 return false;
1297 }
1298
1299 if (pkt->isResponse()) {
1300 myCache()->handleResponse(pkt);
1301 } else {
1302 myCache()->snoopTiming(pkt);
1303 }
1304 return true;
1305 }
1306
1307
1308 template<class TagStore>
1309 Tick
1310 Cache<TagStore>::MemSidePort::recvAtomic(PacketPtr pkt)
1311 {
1312 // in atomic mode, responses go back to the sender via the
1313 // function return from sendAtomic(), not via a separate
1314 // sendAtomic() from the responder. Thus we should never see a
1315 // response packet in recvAtomic() (anywhere, not just here).
1316 assert(!pkt->isResponse());
1317 return myCache()->snoopAtomic(pkt);
1318 }
1319
1320
1321 template<class TagStore>
1322 void
1323 Cache<TagStore>::MemSidePort::recvFunctional(PacketPtr pkt)
1324 {
1325 checkFunctional(pkt);
1326 if (!pkt->isResponse())
1327 myCache()->functionalAccess(pkt, cache->cpuSidePort);
1328 }
1329
1330
1331
1332 template<class TagStore>
1333 void
1334 Cache<TagStore>::MemSidePort::sendPacket()
1335 {
1336 // if we have responses that are ready, they take precedence
1337 if (deferredPacketReady()) {
1338 bool success = sendTiming(transmitList.front().pkt);
1339
1340 if (success) {
1341 //send successful, remove packet
1342 transmitList.pop_front();
1343 }
1344
1345 waitingOnRetry = !success;
1346 } else {
1347 // check for non-response packets (requests & writebacks)
1348 PacketPtr pkt = myCache()->getTimingPacket();
1349 if (pkt == NULL) {
1350 // can happen if e.g. we attempt a writeback and fail, but
1351 // before the retry, the writeback is eliminated because
1352 // we snoop another cache's ReadEx.
1353 waitingOnRetry = false;
1354 } else {
1355 MSHR *mshr = dynamic_cast<MSHR*>(pkt->senderState);
1356
1357 bool success = sendTiming(pkt);
1358 DPRINTF(CachePort,
1359 "Address %x was %s in sending the timing request\n",
1360 pkt->getAddr(), success ? "successful" : "unsuccessful");
1361
1362 waitingOnRetry = !success;
1363 if (waitingOnRetry) {
1364 DPRINTF(CachePort, "now waiting on a retry\n");
1365 if (!mshr->isSimpleForward()) {
1366 delete pkt;
1367 }
1368 } else {
1369 myCache()->markInService(mshr);
1370 }
1371 }
1372 }
1373
1374
1375 // tried to send packet... if it was successful (no retry), see if
1376 // we need to rerequest bus or not
1377 if (!waitingOnRetry) {
1378 Tick nextReady = std::min(deferredPacketReadyTime(),
1379 myCache()->nextMSHRReadyTime());
1380 // @TODO: need to facotr in prefetch requests here somehow
1381 if (nextReady != MaxTick) {
1382 DPRINTF(CachePort, "more packets to send @ %d\n", nextReady);
1383 sendEvent->schedule(std::max(nextReady, curTick + 1));
1384 } else {
1385 // no more to send right now: if we're draining, we may be done
1386 if (drainEvent) {
1387 drainEvent->process();
1388 drainEvent = NULL;
1389 }
1390 }
1391 }
1392 }
1393
1394 template<class TagStore>
1395 void
1396 Cache<TagStore>::MemSidePort::recvRetry()
1397 {
1398 assert(waitingOnRetry);
1399 sendPacket();
1400 }
1401
1402
1403 template<class TagStore>
1404 void
1405 Cache<TagStore>::MemSidePort::processSendEvent()
1406 {
1407 assert(!waitingOnRetry);
1408 sendPacket();
1409 }
1410
1411
1412 template<class TagStore>
1413 Cache<TagStore>::
1414 MemSidePort::MemSidePort(const std::string &_name, Cache<TagStore> *_cache)
1415 : BaseCache::CachePort(_name, _cache)
1416 {
1417 // override default send event from SimpleTimingPort
1418 delete sendEvent;
1419 sendEvent = new SendEvent(this);
1420 }