Make L2+ caches allocate new block for writeback misses
[gem5.git] / src / mem / physical.hh
1 /*
2 * Copyright (c) 2001-2005 The Regents of The University of Michigan
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 *
28 * Authors: Ron Dreslinski
29 */
30
31 /* @file
32 */
33
34 #ifndef __PHYSICAL_MEMORY_HH__
35 #define __PHYSICAL_MEMORY_HH__
36
37 #include <map>
38 #include <string>
39
40 #include "base/range.hh"
41 #include "mem/mem_object.hh"
42 #include "mem/packet.hh"
43 #include "mem/tport.hh"
44 #include "params/PhysicalMemory.hh"
45 #include "sim/eventq.hh"
46
47 //
48 // Functional model for a contiguous block of physical memory. (i.e. RAM)
49 //
50 class PhysicalMemory : public MemObject
51 {
52 class MemoryPort : public SimpleTimingPort
53 {
54 PhysicalMemory *memory;
55
56 public:
57
58 MemoryPort(const std::string &_name, PhysicalMemory *_memory);
59
60 protected:
61
62 virtual Tick recvAtomic(PacketPtr pkt);
63
64 virtual void recvFunctional(PacketPtr pkt);
65
66 virtual void recvStatusChange(Status status);
67
68 virtual void getDeviceAddressRanges(AddrRangeList &resp,
69 bool &snoop);
70
71 virtual int deviceBlockSize();
72 };
73
74 int numPorts;
75
76
77 private:
78 // prevent copying of a MainMemory object
79 PhysicalMemory(const PhysicalMemory &specmem);
80 const PhysicalMemory &operator=(const PhysicalMemory &specmem);
81
82 protected:
83
84 class LockedAddr {
85 public:
86 // on alpha, minimum LL/SC granularity is 16 bytes, so lower
87 // bits need to masked off.
88 static const Addr Addr_Mask = 0xf;
89
90 static Addr mask(Addr paddr) { return (paddr & ~Addr_Mask); }
91
92 Addr addr; // locked address
93 int cpuNum; // locking CPU
94 int threadNum; // locking thread ID within CPU
95
96 // check for matching execution context
97 bool matchesContext(Request *req)
98 {
99 return (cpuNum == req->getCpuNum() &&
100 threadNum == req->getThreadNum());
101 }
102
103 LockedAddr(Request *req)
104 : addr(mask(req->getPaddr())),
105 cpuNum(req->getCpuNum()),
106 threadNum(req->getThreadNum())
107 {
108 }
109 };
110
111 std::list<LockedAddr> lockedAddrList;
112
113 // helper function for checkLockedAddrs(): we really want to
114 // inline a quick check for an empty locked addr list (hopefully
115 // the common case), and do the full list search (if necessary) in
116 // this out-of-line function
117 bool checkLockedAddrList(PacketPtr pkt);
118
119 // Record the address of a load-locked operation so that we can
120 // clear the execution context's lock flag if a matching store is
121 // performed
122 void trackLoadLocked(PacketPtr pkt);
123
124 // Compare a store address with any locked addresses so we can
125 // clear the lock flag appropriately. Return value set to 'false'
126 // if store operation should be suppressed (because it was a
127 // conditional store and the address was no longer locked by the
128 // requesting execution context), 'true' otherwise. Note that
129 // this method must be called on *all* stores since even
130 // non-conditional stores must clear any matching lock addresses.
131 bool writeOK(PacketPtr pkt) {
132 Request *req = pkt->req;
133 if (lockedAddrList.empty()) {
134 // no locked addrs: nothing to check, store_conditional fails
135 bool isLocked = pkt->isLocked();
136 if (isLocked) {
137 req->setExtraData(0);
138 }
139 return !isLocked; // only do write if not an sc
140 } else {
141 // iterate over list...
142 return checkLockedAddrList(pkt);
143 }
144 }
145
146 uint8_t *pmemAddr;
147 int pagePtr;
148 Tick lat;
149 std::vector<MemoryPort*> ports;
150 typedef std::vector<MemoryPort*>::iterator PortIterator;
151
152 uint64_t cachedSize;
153 uint64_t cachedStart;
154 public:
155 Addr new_page();
156 uint64_t size() { return cachedSize; }
157 uint64_t start() { return cachedStart; }
158
159 public:
160 typedef PhysicalMemoryParams Params;
161 PhysicalMemory(const Params *p);
162 virtual ~PhysicalMemory();
163
164 const Params *
165 params() const
166 {
167 return dynamic_cast<const Params *>(_params);
168 }
169
170 public:
171 int deviceBlockSize();
172 void getAddressRanges(AddrRangeList &resp, bool &snoop);
173 virtual Port *getPort(const std::string &if_name, int idx = -1);
174 void virtual init();
175 unsigned int drain(Event *de);
176
177 protected:
178 Tick doAtomicAccess(PacketPtr pkt);
179 void doFunctionalAccess(PacketPtr pkt);
180 virtual Tick calculateLatency(PacketPtr pkt);
181 void recvStatusChange(Port::Status status);
182
183 public:
184 virtual void serialize(std::ostream &os);
185 virtual void unserialize(Checkpoint *cp, const std::string &section);
186
187 };
188
189 #endif //__PHYSICAL_MEMORY_HH__