From: Nilay Vaish Date: Mon, 1 Sep 2014 21:55:40 +0000 (-0500) Subject: ruby: move files from ruby/system to ruby/structures X-Git-Url: https://git.libre-soc.org/?a=commitdiff_plain;h=82d136285dac52a97384961a814d5a0dda4a6482;p=gem5.git ruby: move files from ruby/system to ruby/structures The directory ruby/system is crowded and unorganized. Hence, the files the hold actual physical structures, are being moved to the directory ruby/structures. This includes Cache Memory, Directory Memory, Memory Controller, Wire Buffer, TBE Table, Perfect Cache Memory, Timer Table, Bank Array. The directory ruby/systems has the glue code that holds these structures together. --HG-- rename : src/mem/ruby/system/MachineID.hh => src/mem/ruby/common/MachineID.hh rename : src/mem/ruby/buffers/MessageBuffer.cc => src/mem/ruby/network/MessageBuffer.cc rename : src/mem/ruby/buffers/MessageBuffer.hh => src/mem/ruby/network/MessageBuffer.hh rename : src/mem/ruby/buffers/MessageBufferNode.cc => src/mem/ruby/network/MessageBufferNode.cc rename : src/mem/ruby/buffers/MessageBufferNode.hh => src/mem/ruby/network/MessageBufferNode.hh rename : src/mem/ruby/system/AbstractReplacementPolicy.hh => src/mem/ruby/structures/AbstractReplacementPolicy.hh rename : src/mem/ruby/system/BankedArray.cc => src/mem/ruby/structures/BankedArray.cc rename : src/mem/ruby/system/BankedArray.hh => src/mem/ruby/structures/BankedArray.hh rename : src/mem/ruby/system/Cache.py => src/mem/ruby/structures/Cache.py rename : src/mem/ruby/system/CacheMemory.cc => src/mem/ruby/structures/CacheMemory.cc rename : src/mem/ruby/system/CacheMemory.hh => src/mem/ruby/structures/CacheMemory.hh rename : src/mem/ruby/system/DirectoryMemory.cc => src/mem/ruby/structures/DirectoryMemory.cc rename : src/mem/ruby/system/DirectoryMemory.hh => src/mem/ruby/structures/DirectoryMemory.hh rename : src/mem/ruby/system/DirectoryMemory.py => src/mem/ruby/structures/DirectoryMemory.py rename : src/mem/ruby/system/LRUPolicy.hh => src/mem/ruby/structures/LRUPolicy.hh rename : src/mem/ruby/system/MemoryControl.cc => src/mem/ruby/structures/MemoryControl.cc rename : src/mem/ruby/system/MemoryControl.hh => src/mem/ruby/structures/MemoryControl.hh rename : src/mem/ruby/system/MemoryControl.py => src/mem/ruby/structures/MemoryControl.py rename : src/mem/ruby/system/MemoryNode.cc => src/mem/ruby/structures/MemoryNode.cc rename : src/mem/ruby/system/MemoryNode.hh => src/mem/ruby/structures/MemoryNode.hh rename : src/mem/ruby/system/MemoryVector.hh => src/mem/ruby/structures/MemoryVector.hh rename : src/mem/ruby/system/PerfectCacheMemory.hh => src/mem/ruby/structures/PerfectCacheMemory.hh rename : src/mem/ruby/system/PersistentTable.cc => src/mem/ruby/structures/PersistentTable.cc rename : src/mem/ruby/system/PersistentTable.hh => src/mem/ruby/structures/PersistentTable.hh rename : src/mem/ruby/system/PseudoLRUPolicy.hh => src/mem/ruby/structures/PseudoLRUPolicy.hh rename : src/mem/ruby/system/RubyMemoryControl.cc => src/mem/ruby/structures/RubyMemoryControl.cc rename : src/mem/ruby/system/RubyMemoryControl.hh => src/mem/ruby/structures/RubyMemoryControl.hh rename : src/mem/ruby/system/RubyMemoryControl.py => src/mem/ruby/structures/RubyMemoryControl.py rename : src/mem/ruby/system/SparseMemory.cc => src/mem/ruby/structures/SparseMemory.cc rename : src/mem/ruby/system/SparseMemory.hh => src/mem/ruby/structures/SparseMemory.hh rename : src/mem/ruby/system/TBETable.hh => src/mem/ruby/structures/TBETable.hh rename : src/mem/ruby/system/TimerTable.cc => src/mem/ruby/structures/TimerTable.cc rename : src/mem/ruby/system/TimerTable.hh => src/mem/ruby/structures/TimerTable.hh rename : src/mem/ruby/system/WireBuffer.cc => src/mem/ruby/structures/WireBuffer.cc rename : src/mem/ruby/system/WireBuffer.hh => src/mem/ruby/structures/WireBuffer.hh rename : src/mem/ruby/system/WireBuffer.py => src/mem/ruby/structures/WireBuffer.py rename : src/mem/ruby/recorder/CacheRecorder.cc => src/mem/ruby/system/CacheRecorder.cc rename : src/mem/ruby/recorder/CacheRecorder.hh => src/mem/ruby/system/CacheRecorder.hh --- diff --git a/src/mem/ruby/SConscript b/src/mem/ruby/SConscript index 2072470ac..3029c3297 100644 --- a/src/mem/ruby/SConscript +++ b/src/mem/ruby/SConscript @@ -118,21 +118,21 @@ MakeInclude('slicc_interface/NetworkMessage.hh') MakeInclude('slicc_interface/RubyRequest.hh') # External types -MakeInclude('buffers/MessageBuffer.hh') MakeInclude('common/Address.hh') MakeInclude('common/DataBlock.hh') +MakeInclude('common/MachineID.hh') MakeInclude('common/NetDest.hh') MakeInclude('common/Set.hh') MakeInclude('filters/GenericBloomFilter.hh') +MakeInclude('network/MessageBuffer.hh') MakeInclude('structures/Prefetcher.hh') -MakeInclude('system/CacheMemory.hh') +MakeInclude('structures/CacheMemory.hh') MakeInclude('system/DMASequencer.hh') -MakeInclude('system/DirectoryMemory.hh') -MakeInclude('system/MachineID.hh') -MakeInclude('system/MemoryControl.hh') -MakeInclude('system/WireBuffer.hh') -MakeInclude('system/PerfectCacheMemory.hh') -MakeInclude('system/PersistentTable.hh') +MakeInclude('structures/DirectoryMemory.hh') +MakeInclude('structures/MemoryControl.hh') +MakeInclude('structures/WireBuffer.hh') +MakeInclude('structures/PerfectCacheMemory.hh') +MakeInclude('structures/PersistentTable.hh') MakeInclude('system/Sequencer.hh') -MakeInclude('system/TBETable.hh') -MakeInclude('system/TimerTable.hh') +MakeInclude('structures/TBETable.hh') +MakeInclude('structures/TimerTable.hh') diff --git a/src/mem/ruby/buffers/MessageBuffer.cc b/src/mem/ruby/buffers/MessageBuffer.cc deleted file mode 100644 index b63b07976..000000000 --- a/src/mem/ruby/buffers/MessageBuffer.cc +++ /dev/null @@ -1,422 +0,0 @@ -/* - * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer; - * redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution; - * neither the name of the copyright holders nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#include - -#include "base/cprintf.hh" -#include "base/misc.hh" -#include "base/stl_helpers.hh" -#include "debug/RubyQueue.hh" -#include "mem/ruby/buffers/MessageBuffer.hh" -#include "mem/ruby/system/System.hh" - -using namespace std; -using m5::stl_helpers::operator<<; - -MessageBuffer::MessageBuffer(const string &name) - : m_time_last_time_size_checked(0), m_time_last_time_enqueue(0), - m_time_last_time_pop(0), m_last_arrival_time(0) -{ - m_msg_counter = 0; - m_consumer = NULL; - m_sender = NULL; - m_receiver = NULL; - - m_ordering_set = false; - m_strict_fifo = true; - m_max_size = 0; - m_randomization = true; - m_size_last_time_size_checked = 0; - m_size_at_cycle_start = 0; - m_msgs_this_cycle = 0; - m_not_avail_count = 0; - m_priority_rank = 0; - m_name = name; - - m_stall_msg_map.clear(); - m_input_link_id = 0; - m_vnet_id = 0; -} - -unsigned int -MessageBuffer::getSize() -{ - if (m_time_last_time_size_checked != m_receiver->curCycle()) { - m_time_last_time_size_checked = m_receiver->curCycle(); - m_size_last_time_size_checked = m_prio_heap.size(); - } - - return m_size_last_time_size_checked; -} - -bool -MessageBuffer::areNSlotsAvailable(unsigned int n) -{ - - // fast path when message buffers have infinite size - if (m_max_size == 0) { - return true; - } - - // determine the correct size for the current cycle - // pop operations shouldn't effect the network's visible size - // until next cycle, but enqueue operations effect the visible - // size immediately - unsigned int current_size = 0; - - if (m_time_last_time_pop < m_sender->clockEdge()) { - // no pops this cycle - heap size is correct - current_size = m_prio_heap.size(); - } else { - if (m_time_last_time_enqueue < m_sender->curCycle()) { - // no enqueues this cycle - m_size_at_cycle_start is correct - current_size = m_size_at_cycle_start; - } else { - // both pops and enqueues occured this cycle - add new - // enqueued msgs to m_size_at_cycle_start - current_size = m_size_at_cycle_start + m_msgs_this_cycle; - } - } - - // now compare the new size with our max size - if (current_size + n <= m_max_size) { - return true; - } else { - DPRINTF(RubyQueue, "n: %d, current_size: %d, heap size: %d, " - "m_max_size: %d\n", - n, current_size, m_prio_heap.size(), m_max_size); - m_not_avail_count++; - return false; - } -} - -const Message* -MessageBuffer::peek() const -{ - DPRINTF(RubyQueue, "Peeking at head of queue.\n"); - assert(isReady()); - - const Message* msg_ptr = m_prio_heap.front().m_msgptr.get(); - assert(msg_ptr); - - DPRINTF(RubyQueue, "Message: %s\n", (*msg_ptr)); - return msg_ptr; -} - -// FIXME - move me somewhere else -Cycles -random_time() -{ - Cycles time(1); - time += Cycles(random() & 0x3); // [0...3] - if ((random() & 0x7) == 0) { // 1 in 8 chance - time += Cycles(100 + (random() % 0xf)); // 100 + [1...15] - } - return time; -} - -void -MessageBuffer::enqueue(MsgPtr message, Cycles delta) -{ - m_msg_counter++; - - // record current time incase we have a pop that also adjusts my size - if (m_time_last_time_enqueue < m_sender->curCycle()) { - m_msgs_this_cycle = 0; // first msg this cycle - m_time_last_time_enqueue = m_sender->curCycle(); - } - m_msgs_this_cycle++; - - assert(m_ordering_set); - - // Calculate the arrival time of the message, that is, the first - // cycle the message can be dequeued. - assert(delta > 0); - Tick current_time = m_sender->clockEdge(); - Tick arrival_time = 0; - - if (!RubySystem::getRandomization() || !m_randomization) { - // No randomization - arrival_time = current_time + delta * m_sender->clockPeriod(); - } else { - // Randomization - ignore delta - if (m_strict_fifo) { - if (m_last_arrival_time < current_time) { - m_last_arrival_time = current_time; - } - arrival_time = m_last_arrival_time + - random_time() * m_sender->clockPeriod(); - } else { - arrival_time = current_time + - random_time() * m_sender->clockPeriod(); - } - } - - // Check the arrival time - assert(arrival_time > current_time); - if (m_strict_fifo) { - if (arrival_time < m_last_arrival_time) { - panic("FIFO ordering violated: %s name: %s current time: %d " - "delta: %d arrival_time: %d last arrival_time: %d\n", - *this, m_name, current_time, - delta * m_sender->clockPeriod(), - arrival_time, m_last_arrival_time); - } - } - - // If running a cache trace, don't worry about the last arrival checks - if (!g_system_ptr->m_warmup_enabled) { - m_last_arrival_time = arrival_time; - } - - // compute the delay cycles and set enqueue time - Message* msg_ptr = message.get(); - assert(msg_ptr != NULL); - - assert(m_sender->clockEdge() >= msg_ptr->getLastEnqueueTime() && - "ensure we aren't dequeued early"); - - msg_ptr->updateDelayedTicks(m_sender->clockEdge()); - msg_ptr->setLastEnqueueTime(arrival_time); - - // Insert the message into the priority heap - MessageBufferNode thisNode(arrival_time, m_msg_counter, message); - m_prio_heap.push_back(thisNode); - push_heap(m_prio_heap.begin(), m_prio_heap.end(), - greater()); - - DPRINTF(RubyQueue, "Enqueue arrival_time: %lld, Message: %s\n", - arrival_time, *(message.get())); - - // Schedule the wakeup - assert(m_consumer != NULL); - m_consumer->scheduleEventAbsolute(arrival_time); - m_consumer->storeEventInfo(m_vnet_id); -} - -Cycles -MessageBuffer::dequeue() -{ - DPRINTF(RubyQueue, "Popping\n"); - assert(isReady()); - - // get MsgPtr of the message about to be dequeued - MsgPtr message = m_prio_heap.front().m_msgptr; - - // get the delay cycles - message->updateDelayedTicks(m_receiver->clockEdge()); - Cycles delayCycles = - m_receiver->ticksToCycles(message->getDelayedTicks()); - - // record previous size and time so the current buffer size isn't - // adjusted until next cycle - if (m_time_last_time_pop < m_receiver->clockEdge()) { - m_size_at_cycle_start = m_prio_heap.size(); - m_time_last_time_pop = m_receiver->clockEdge(); - } - - pop_heap(m_prio_heap.begin(), m_prio_heap.end(), - greater()); - m_prio_heap.pop_back(); - - return delayCycles; -} - -void -MessageBuffer::clear() -{ - m_prio_heap.clear(); - - m_msg_counter = 0; - m_time_last_time_enqueue = Cycles(0); - m_time_last_time_pop = 0; - m_size_at_cycle_start = 0; - m_msgs_this_cycle = 0; -} - -void -MessageBuffer::recycle() -{ - DPRINTF(RubyQueue, "Recycling.\n"); - assert(isReady()); - MessageBufferNode node = m_prio_heap.front(); - pop_heap(m_prio_heap.begin(), m_prio_heap.end(), - greater()); - - node.m_time = m_receiver->clockEdge(m_recycle_latency); - m_prio_heap.back() = node; - push_heap(m_prio_heap.begin(), m_prio_heap.end(), - greater()); - m_consumer-> - scheduleEventAbsolute(m_receiver->clockEdge(m_recycle_latency)); -} - -void -MessageBuffer::reanalyzeList(list <, Tick nextTick) -{ - while(!lt.empty()) { - m_msg_counter++; - MessageBufferNode msgNode(nextTick, m_msg_counter, lt.front()); - - m_prio_heap.push_back(msgNode); - push_heap(m_prio_heap.begin(), m_prio_heap.end(), - greater()); - - m_consumer->scheduleEventAbsolute(nextTick); - lt.pop_front(); - } -} - -void -MessageBuffer::reanalyzeMessages(const Address& addr) -{ - DPRINTF(RubyQueue, "ReanalyzeMessages\n"); - assert(m_stall_msg_map.count(addr) > 0); - Tick nextTick = m_receiver->clockEdge(Cycles(1)); - - // - // Put all stalled messages associated with this address back on the - // prio heap - // - reanalyzeList(m_stall_msg_map[addr], nextTick); - m_stall_msg_map.erase(addr); -} - -void -MessageBuffer::reanalyzeAllMessages() -{ - DPRINTF(RubyQueue, "ReanalyzeAllMessages\n"); - Tick nextTick = m_receiver->clockEdge(Cycles(1)); - - // - // Put all stalled messages associated with this address back on the - // prio heap - // - for (StallMsgMapType::iterator map_iter = m_stall_msg_map.begin(); - map_iter != m_stall_msg_map.end(); ++map_iter) { - reanalyzeList(map_iter->second, nextTick); - } - m_stall_msg_map.clear(); -} - -void -MessageBuffer::stallMessage(const Address& addr) -{ - DPRINTF(RubyQueue, "Stalling due to %s\n", addr); - assert(isReady()); - assert(addr.getOffset() == 0); - MsgPtr message = m_prio_heap.front().m_msgptr; - - dequeue(); - - // - // Note: no event is scheduled to analyze the map at a later time. - // Instead the controller is responsible to call reanalyzeMessages when - // these addresses change state. - // - (m_stall_msg_map[addr]).push_back(message); -} - -void -MessageBuffer::print(ostream& out) const -{ - ccprintf(out, "[MessageBuffer: "); - if (m_consumer != NULL) { - ccprintf(out, " consumer-yes "); - } - - vector copy(m_prio_heap); - sort_heap(copy.begin(), copy.end(), greater()); - ccprintf(out, "%s] %s", copy, m_name); -} - -bool -MessageBuffer::isReady() const -{ - return ((m_prio_heap.size() > 0) && - (m_prio_heap.front().m_time <= m_receiver->clockEdge())); -} - -bool -MessageBuffer::functionalRead(Packet *pkt) -{ - // Check the priority heap and read any messages that may - // correspond to the address in the packet. - for (unsigned int i = 0; i < m_prio_heap.size(); ++i) { - Message *msg = m_prio_heap[i].m_msgptr.get(); - if (msg->functionalRead(pkt)) return true; - } - - // Read the messages in the stall queue that correspond - // to the address in the packet. - for (StallMsgMapType::iterator map_iter = m_stall_msg_map.begin(); - map_iter != m_stall_msg_map.end(); - ++map_iter) { - - for (std::list::iterator it = (map_iter->second).begin(); - it != (map_iter->second).end(); ++it) { - - Message *msg = (*it).get(); - if (msg->functionalRead(pkt)) return true; - } - } - return false; -} - -uint32_t -MessageBuffer::functionalWrite(Packet *pkt) -{ - uint32_t num_functional_writes = 0; - - // Check the priority heap and write any messages that may - // correspond to the address in the packet. - for (unsigned int i = 0; i < m_prio_heap.size(); ++i) { - Message *msg = m_prio_heap[i].m_msgptr.get(); - if (msg->functionalWrite(pkt)) { - num_functional_writes++; - } - } - - // Check the stall queue and write any messages that may - // correspond to the address in the packet. - for (StallMsgMapType::iterator map_iter = m_stall_msg_map.begin(); - map_iter != m_stall_msg_map.end(); - ++map_iter) { - - for (std::list::iterator it = (map_iter->second).begin(); - it != (map_iter->second).end(); ++it) { - - Message *msg = (*it).get(); - if (msg->functionalWrite(pkt)) { - num_functional_writes++; - } - } - } - - return num_functional_writes; -} diff --git a/src/mem/ruby/buffers/MessageBuffer.hh b/src/mem/ruby/buffers/MessageBuffer.hh deleted file mode 100644 index 3b3a69a3e..000000000 --- a/src/mem/ruby/buffers/MessageBuffer.hh +++ /dev/null @@ -1,215 +0,0 @@ -/* - * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer; - * redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution; - * neither the name of the copyright holders nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -/* - * Unordered buffer of messages that can be inserted such - * that they can be dequeued after a given delta time has expired. - */ - -#ifndef __MEM_RUBY_BUFFERS_MESSAGEBUFFER_HH__ -#define __MEM_RUBY_BUFFERS_MESSAGEBUFFER_HH__ - -#include -#include -#include -#include -#include -#include - -#include "mem/packet.hh" -#include "mem/ruby/buffers/MessageBufferNode.hh" -#include "mem/ruby/common/Address.hh" -#include "mem/ruby/common/Consumer.hh" -#include "mem/ruby/slicc_interface/Message.hh" - -class MessageBuffer -{ - public: - MessageBuffer(const std::string &name = ""); - - std::string name() const { return m_name; } - - void setRecycleLatency(Cycles recycle_latency) - { m_recycle_latency = recycle_latency; } - - void reanalyzeMessages(const Address& addr); - void reanalyzeAllMessages(); - void stallMessage(const Address& addr); - - // TRUE if head of queue timestamp <= SystemTime - bool isReady() const; - - void - delayHead() - { - MessageBufferNode node = m_prio_heap.front(); - std::pop_heap(m_prio_heap.begin(), m_prio_heap.end(), - std::greater()); - m_prio_heap.pop_back(); - enqueue(node.m_msgptr, Cycles(1)); - } - - bool areNSlotsAvailable(unsigned int n); - int getPriority() { return m_priority_rank; } - void setPriority(int rank) { m_priority_rank = rank; } - void setConsumer(Consumer* consumer) - { - if (m_consumer != NULL) { - fatal("Trying to connect %s to MessageBuffer %s. \ - \n%s already connected. Check the cntrl_id's.\n", - *consumer, *this, *m_consumer); - } - m_consumer = consumer; - } - - void setSender(ClockedObject* obj) - { - assert(m_sender == NULL || m_sender == obj); - m_sender = obj; - } - - void setReceiver(ClockedObject* obj) - { - assert(m_receiver == NULL || m_receiver == obj); - m_receiver = obj; - } - - void setDescription(const std::string& name) { m_name = name; } - std::string getDescription() { return m_name;} - - Consumer* getConsumer() { return m_consumer; } - - //! Function for extracting the message at the head of the - //! message queue. The function assumes that the queue is nonempty. - const Message* peek() const; - - const MsgPtr& - peekMsgPtr() const - { - assert(isReady()); - return m_prio_heap.front().m_msgptr; - } - - void enqueue(MsgPtr message) { enqueue(message, Cycles(1)); } - void enqueue(MsgPtr message, Cycles delta); - - //! Updates the delay cycles of the message at the head of the queue, - //! removes it from the queue and returns its total delay. - Cycles dequeue(); - - void recycle(); - bool isEmpty() const { return m_prio_heap.size() == 0; } - - void - setOrdering(bool order) - { - m_strict_fifo = order; - m_ordering_set = true; - } - - void resize(unsigned int size) { m_max_size = size; } - unsigned int getSize(); - void setRandomization(bool random_flag) { m_randomization = random_flag; } - - void clear(); - void print(std::ostream& out) const; - void clearStats() { m_not_avail_count = 0; m_msg_counter = 0; } - - void setIncomingLink(int link_id) { m_input_link_id = link_id; } - void setVnet(int net) { m_vnet_id = net; } - - // Function for figuring out if any of the messages in the buffer can - // satisfy the read request for the address in the packet. - // Return value, if true, indicates that the request was fulfilled. - bool functionalRead(Packet *pkt); - - // Function for figuring out if any of the messages in the buffer need - // to be updated with the data from the packet. - // Return value indicates the number of messages that were updated. - // This required for debugging the code. - uint32_t functionalWrite(Packet *pkt); - - private: - void reanalyzeList(std::list &, Tick); - - private: - //added by SS - Cycles m_recycle_latency; - - // Data Members (m_ prefix) - //! The two ends of the buffer. - ClockedObject* m_sender; - ClockedObject* m_receiver; - - //! Consumer to signal a wakeup(), can be NULL - Consumer* m_consumer; - std::vector m_prio_heap; - - // use a std::map for the stalled messages as this container is - // sorted and ensures a well-defined iteration order - typedef std::map< Address, std::list > StallMsgMapType; - - StallMsgMapType m_stall_msg_map; - std::string m_name; - - unsigned int m_max_size; - Cycles m_time_last_time_size_checked; - unsigned int m_size_last_time_size_checked; - - // variables used so enqueues appear to happen imediately, while - // pop happen the next cycle - Cycles m_time_last_time_enqueue; - Tick m_time_last_time_pop; - Tick m_last_arrival_time; - - unsigned int m_size_at_cycle_start; - unsigned int m_msgs_this_cycle; - - int m_not_avail_count; // count the # of times I didn't have N - // slots available - uint64 m_msg_counter; - int m_priority_rank; - bool m_strict_fifo; - bool m_ordering_set; - bool m_randomization; - - int m_input_link_id; - int m_vnet_id; -}; - -Cycles random_time(); - -inline std::ostream& -operator<<(std::ostream& out, const MessageBuffer& obj) -{ - obj.print(out); - out << std::flush; - return out; -} - -#endif // __MEM_RUBY_BUFFERS_MESSAGEBUFFER_HH__ diff --git a/src/mem/ruby/buffers/MessageBufferNode.cc b/src/mem/ruby/buffers/MessageBufferNode.cc deleted file mode 100644 index d54d8345e..000000000 --- a/src/mem/ruby/buffers/MessageBufferNode.cc +++ /dev/null @@ -1,39 +0,0 @@ -/* - * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer; - * redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution; - * neither the name of the copyright holders nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#include "mem/ruby/buffers/MessageBufferNode.hh" - -void -MessageBufferNode::print(std::ostream& out) const -{ - out << "["; - out << m_time << ", "; - out << m_msg_counter << ", "; - out << *m_msgptr << "; "; - out << "]"; -} diff --git a/src/mem/ruby/buffers/MessageBufferNode.hh b/src/mem/ruby/buffers/MessageBufferNode.hh deleted file mode 100644 index 16aec8a1b..000000000 --- a/src/mem/ruby/buffers/MessageBufferNode.hh +++ /dev/null @@ -1,75 +0,0 @@ -/* - * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer; - * redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution; - * neither the name of the copyright holders nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#ifndef __MEM_RUBY_BUFFERS_MESSAGEBUFFERNODE_HH__ -#define __MEM_RUBY_BUFFERS_MESSAGEBUFFERNODE_HH__ - -#include - -#include "mem/ruby/slicc_interface/Message.hh" - -class MessageBufferNode -{ - public: - MessageBufferNode() - : m_time(0), m_msg_counter(0) - {} - - MessageBufferNode(const Tick time, uint64_t counter, - const MsgPtr& msgptr) - : m_time(time), m_msg_counter(counter), m_msgptr(msgptr) - {} - - void print(std::ostream& out) const; - - public: - Tick m_time; - uint64_t m_msg_counter; // FIXME, should this be a 64-bit value? - MsgPtr m_msgptr; -}; - -inline bool -operator>(const MessageBufferNode& n1, const MessageBufferNode& n2) -{ - if (n1.m_time == n2.m_time) { - assert(n1.m_msg_counter != n2.m_msg_counter); - return n1.m_msg_counter > n2.m_msg_counter; - } else { - return n1.m_time > n2.m_time; - } -} - -inline std::ostream& -operator<<(std::ostream& out, const MessageBufferNode& obj) -{ - obj.print(out); - out << std::flush; - return out; -} - -#endif // __MEM_RUBY_BUFFERS_MESSAGEBUFFERNODE_HH__ diff --git a/src/mem/ruby/buffers/SConscript b/src/mem/ruby/buffers/SConscript deleted file mode 100644 index ff0b5e9bc..000000000 --- a/src/mem/ruby/buffers/SConscript +++ /dev/null @@ -1,37 +0,0 @@ -# -*- mode:python -*- - -# Copyright (c) 2009 The Hewlett-Packard Development Company -# All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer; -# redistributions in binary form must reproduce the above copyright -# notice, this list of conditions and the following disclaimer in the -# documentation and/or other materials provided with the distribution; -# neither the name of the copyright holders nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -# -# Authors: Nathan Binkert - -Import('*') - -if env['PROTOCOL'] == 'None': - Return() - -Source('MessageBuffer.cc') -Source('MessageBufferNode.cc') diff --git a/src/mem/ruby/common/MachineID.hh b/src/mem/ruby/common/MachineID.hh new file mode 100644 index 000000000..0ad898959 --- /dev/null +++ b/src/mem/ruby/common/MachineID.hh @@ -0,0 +1,83 @@ +/* + * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer; + * redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution; + * neither the name of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef __MEM_RUBY_SYSTEM_MACHINEID_HH__ +#define __MEM_RUBY_SYSTEM_MACHINEID_HH__ + +#include +#include + +#include "base/cprintf.hh" +#include "mem/protocol/MachineType.hh" + +struct MachineID +{ + MachineType type; + //! range: 0 ... number of this machine's components in system - 1 + NodeID num; + + MachineType getType() const { return type; } + NodeID getNum() const { return num; } +}; + +inline std::string +MachineIDToString(MachineID machine) +{ + return csprintf("%s_%d", MachineType_to_string(machine.type), machine.num); +} + +inline bool +operator==(const MachineID & obj1, const MachineID & obj2) +{ + return (obj1.type == obj2.type && obj1.num == obj2.num); +} + +inline bool +operator!=(const MachineID & obj1, const MachineID & obj2) +{ + return (obj1.type != obj2.type || obj1.num != obj2.num); +} + +// Output operator declaration +std::ostream& operator<<(std::ostream& out, const MachineID& obj); + +inline std::ostream& +operator<<(std::ostream& out, const MachineID& obj) +{ + if ((obj.type < MachineType_NUM) && (obj.type >= MachineType_FIRST)) { + out << MachineType_to_string(obj.type); + } else { + out << "NULL"; + } + out << "-"; + out << obj.num; + out << std::flush; + return out; +} + +#endif // __MEM_RUBY_SYSTEM_MACHINEID_HH__ diff --git a/src/mem/ruby/common/NetDest.hh b/src/mem/ruby/common/NetDest.hh index f982b7c38..ba72fe214 100644 --- a/src/mem/ruby/common/NetDest.hh +++ b/src/mem/ruby/common/NetDest.hh @@ -38,7 +38,7 @@ #include #include "mem/ruby/common/Set.hh" -#include "mem/ruby/system/MachineID.hh" +#include "mem/ruby/common/MachineID.hh" class NetDest { diff --git a/src/mem/ruby/network/MessageBuffer.cc b/src/mem/ruby/network/MessageBuffer.cc new file mode 100644 index 000000000..1961765c5 --- /dev/null +++ b/src/mem/ruby/network/MessageBuffer.cc @@ -0,0 +1,422 @@ +/* + * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer; + * redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution; + * neither the name of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include + +#include "base/cprintf.hh" +#include "base/misc.hh" +#include "base/stl_helpers.hh" +#include "debug/RubyQueue.hh" +#include "mem/ruby/network/MessageBuffer.hh" +#include "mem/ruby/system/System.hh" + +using namespace std; +using m5::stl_helpers::operator<<; + +MessageBuffer::MessageBuffer(const string &name) + : m_time_last_time_size_checked(0), m_time_last_time_enqueue(0), + m_time_last_time_pop(0), m_last_arrival_time(0) +{ + m_msg_counter = 0; + m_consumer = NULL; + m_sender = NULL; + m_receiver = NULL; + + m_ordering_set = false; + m_strict_fifo = true; + m_max_size = 0; + m_randomization = true; + m_size_last_time_size_checked = 0; + m_size_at_cycle_start = 0; + m_msgs_this_cycle = 0; + m_not_avail_count = 0; + m_priority_rank = 0; + m_name = name; + + m_stall_msg_map.clear(); + m_input_link_id = 0; + m_vnet_id = 0; +} + +unsigned int +MessageBuffer::getSize() +{ + if (m_time_last_time_size_checked != m_receiver->curCycle()) { + m_time_last_time_size_checked = m_receiver->curCycle(); + m_size_last_time_size_checked = m_prio_heap.size(); + } + + return m_size_last_time_size_checked; +} + +bool +MessageBuffer::areNSlotsAvailable(unsigned int n) +{ + + // fast path when message buffers have infinite size + if (m_max_size == 0) { + return true; + } + + // determine the correct size for the current cycle + // pop operations shouldn't effect the network's visible size + // until next cycle, but enqueue operations effect the visible + // size immediately + unsigned int current_size = 0; + + if (m_time_last_time_pop < m_sender->clockEdge()) { + // no pops this cycle - heap size is correct + current_size = m_prio_heap.size(); + } else { + if (m_time_last_time_enqueue < m_sender->curCycle()) { + // no enqueues this cycle - m_size_at_cycle_start is correct + current_size = m_size_at_cycle_start; + } else { + // both pops and enqueues occured this cycle - add new + // enqueued msgs to m_size_at_cycle_start + current_size = m_size_at_cycle_start + m_msgs_this_cycle; + } + } + + // now compare the new size with our max size + if (current_size + n <= m_max_size) { + return true; + } else { + DPRINTF(RubyQueue, "n: %d, current_size: %d, heap size: %d, " + "m_max_size: %d\n", + n, current_size, m_prio_heap.size(), m_max_size); + m_not_avail_count++; + return false; + } +} + +const Message* +MessageBuffer::peek() const +{ + DPRINTF(RubyQueue, "Peeking at head of queue.\n"); + assert(isReady()); + + const Message* msg_ptr = m_prio_heap.front().m_msgptr.get(); + assert(msg_ptr); + + DPRINTF(RubyQueue, "Message: %s\n", (*msg_ptr)); + return msg_ptr; +} + +// FIXME - move me somewhere else +Cycles +random_time() +{ + Cycles time(1); + time += Cycles(random() & 0x3); // [0...3] + if ((random() & 0x7) == 0) { // 1 in 8 chance + time += Cycles(100 + (random() % 0xf)); // 100 + [1...15] + } + return time; +} + +void +MessageBuffer::enqueue(MsgPtr message, Cycles delta) +{ + m_msg_counter++; + + // record current time incase we have a pop that also adjusts my size + if (m_time_last_time_enqueue < m_sender->curCycle()) { + m_msgs_this_cycle = 0; // first msg this cycle + m_time_last_time_enqueue = m_sender->curCycle(); + } + m_msgs_this_cycle++; + + assert(m_ordering_set); + + // Calculate the arrival time of the message, that is, the first + // cycle the message can be dequeued. + assert(delta > 0); + Tick current_time = m_sender->clockEdge(); + Tick arrival_time = 0; + + if (!RubySystem::getRandomization() || !m_randomization) { + // No randomization + arrival_time = current_time + delta * m_sender->clockPeriod(); + } else { + // Randomization - ignore delta + if (m_strict_fifo) { + if (m_last_arrival_time < current_time) { + m_last_arrival_time = current_time; + } + arrival_time = m_last_arrival_time + + random_time() * m_sender->clockPeriod(); + } else { + arrival_time = current_time + + random_time() * m_sender->clockPeriod(); + } + } + + // Check the arrival time + assert(arrival_time > current_time); + if (m_strict_fifo) { + if (arrival_time < m_last_arrival_time) { + panic("FIFO ordering violated: %s name: %s current time: %d " + "delta: %d arrival_time: %d last arrival_time: %d\n", + *this, m_name, current_time, + delta * m_sender->clockPeriod(), + arrival_time, m_last_arrival_time); + } + } + + // If running a cache trace, don't worry about the last arrival checks + if (!g_system_ptr->m_warmup_enabled) { + m_last_arrival_time = arrival_time; + } + + // compute the delay cycles and set enqueue time + Message* msg_ptr = message.get(); + assert(msg_ptr != NULL); + + assert(m_sender->clockEdge() >= msg_ptr->getLastEnqueueTime() && + "ensure we aren't dequeued early"); + + msg_ptr->updateDelayedTicks(m_sender->clockEdge()); + msg_ptr->setLastEnqueueTime(arrival_time); + + // Insert the message into the priority heap + MessageBufferNode thisNode(arrival_time, m_msg_counter, message); + m_prio_heap.push_back(thisNode); + push_heap(m_prio_heap.begin(), m_prio_heap.end(), + greater()); + + DPRINTF(RubyQueue, "Enqueue arrival_time: %lld, Message: %s\n", + arrival_time, *(message.get())); + + // Schedule the wakeup + assert(m_consumer != NULL); + m_consumer->scheduleEventAbsolute(arrival_time); + m_consumer->storeEventInfo(m_vnet_id); +} + +Cycles +MessageBuffer::dequeue() +{ + DPRINTF(RubyQueue, "Popping\n"); + assert(isReady()); + + // get MsgPtr of the message about to be dequeued + MsgPtr message = m_prio_heap.front().m_msgptr; + + // get the delay cycles + message->updateDelayedTicks(m_receiver->clockEdge()); + Cycles delayCycles = + m_receiver->ticksToCycles(message->getDelayedTicks()); + + // record previous size and time so the current buffer size isn't + // adjusted until next cycle + if (m_time_last_time_pop < m_receiver->clockEdge()) { + m_size_at_cycle_start = m_prio_heap.size(); + m_time_last_time_pop = m_receiver->clockEdge(); + } + + pop_heap(m_prio_heap.begin(), m_prio_heap.end(), + greater()); + m_prio_heap.pop_back(); + + return delayCycles; +} + +void +MessageBuffer::clear() +{ + m_prio_heap.clear(); + + m_msg_counter = 0; + m_time_last_time_enqueue = Cycles(0); + m_time_last_time_pop = 0; + m_size_at_cycle_start = 0; + m_msgs_this_cycle = 0; +} + +void +MessageBuffer::recycle() +{ + DPRINTF(RubyQueue, "Recycling.\n"); + assert(isReady()); + MessageBufferNode node = m_prio_heap.front(); + pop_heap(m_prio_heap.begin(), m_prio_heap.end(), + greater()); + + node.m_time = m_receiver->clockEdge(m_recycle_latency); + m_prio_heap.back() = node; + push_heap(m_prio_heap.begin(), m_prio_heap.end(), + greater()); + m_consumer-> + scheduleEventAbsolute(m_receiver->clockEdge(m_recycle_latency)); +} + +void +MessageBuffer::reanalyzeList(list <, Tick nextTick) +{ + while(!lt.empty()) { + m_msg_counter++; + MessageBufferNode msgNode(nextTick, m_msg_counter, lt.front()); + + m_prio_heap.push_back(msgNode); + push_heap(m_prio_heap.begin(), m_prio_heap.end(), + greater()); + + m_consumer->scheduleEventAbsolute(nextTick); + lt.pop_front(); + } +} + +void +MessageBuffer::reanalyzeMessages(const Address& addr) +{ + DPRINTF(RubyQueue, "ReanalyzeMessages\n"); + assert(m_stall_msg_map.count(addr) > 0); + Tick nextTick = m_receiver->clockEdge(Cycles(1)); + + // + // Put all stalled messages associated with this address back on the + // prio heap + // + reanalyzeList(m_stall_msg_map[addr], nextTick); + m_stall_msg_map.erase(addr); +} + +void +MessageBuffer::reanalyzeAllMessages() +{ + DPRINTF(RubyQueue, "ReanalyzeAllMessages\n"); + Tick nextTick = m_receiver->clockEdge(Cycles(1)); + + // + // Put all stalled messages associated with this address back on the + // prio heap + // + for (StallMsgMapType::iterator map_iter = m_stall_msg_map.begin(); + map_iter != m_stall_msg_map.end(); ++map_iter) { + reanalyzeList(map_iter->second, nextTick); + } + m_stall_msg_map.clear(); +} + +void +MessageBuffer::stallMessage(const Address& addr) +{ + DPRINTF(RubyQueue, "Stalling due to %s\n", addr); + assert(isReady()); + assert(addr.getOffset() == 0); + MsgPtr message = m_prio_heap.front().m_msgptr; + + dequeue(); + + // + // Note: no event is scheduled to analyze the map at a later time. + // Instead the controller is responsible to call reanalyzeMessages when + // these addresses change state. + // + (m_stall_msg_map[addr]).push_back(message); +} + +void +MessageBuffer::print(ostream& out) const +{ + ccprintf(out, "[MessageBuffer: "); + if (m_consumer != NULL) { + ccprintf(out, " consumer-yes "); + } + + vector copy(m_prio_heap); + sort_heap(copy.begin(), copy.end(), greater()); + ccprintf(out, "%s] %s", copy, m_name); +} + +bool +MessageBuffer::isReady() const +{ + return ((m_prio_heap.size() > 0) && + (m_prio_heap.front().m_time <= m_receiver->clockEdge())); +} + +bool +MessageBuffer::functionalRead(Packet *pkt) +{ + // Check the priority heap and read any messages that may + // correspond to the address in the packet. + for (unsigned int i = 0; i < m_prio_heap.size(); ++i) { + Message *msg = m_prio_heap[i].m_msgptr.get(); + if (msg->functionalRead(pkt)) return true; + } + + // Read the messages in the stall queue that correspond + // to the address in the packet. + for (StallMsgMapType::iterator map_iter = m_stall_msg_map.begin(); + map_iter != m_stall_msg_map.end(); + ++map_iter) { + + for (std::list::iterator it = (map_iter->second).begin(); + it != (map_iter->second).end(); ++it) { + + Message *msg = (*it).get(); + if (msg->functionalRead(pkt)) return true; + } + } + return false; +} + +uint32_t +MessageBuffer::functionalWrite(Packet *pkt) +{ + uint32_t num_functional_writes = 0; + + // Check the priority heap and write any messages that may + // correspond to the address in the packet. + for (unsigned int i = 0; i < m_prio_heap.size(); ++i) { + Message *msg = m_prio_heap[i].m_msgptr.get(); + if (msg->functionalWrite(pkt)) { + num_functional_writes++; + } + } + + // Check the stall queue and write any messages that may + // correspond to the address in the packet. + for (StallMsgMapType::iterator map_iter = m_stall_msg_map.begin(); + map_iter != m_stall_msg_map.end(); + ++map_iter) { + + for (std::list::iterator it = (map_iter->second).begin(); + it != (map_iter->second).end(); ++it) { + + Message *msg = (*it).get(); + if (msg->functionalWrite(pkt)) { + num_functional_writes++; + } + } + } + + return num_functional_writes; +} diff --git a/src/mem/ruby/network/MessageBuffer.hh b/src/mem/ruby/network/MessageBuffer.hh new file mode 100644 index 000000000..6d51eade9 --- /dev/null +++ b/src/mem/ruby/network/MessageBuffer.hh @@ -0,0 +1,215 @@ +/* + * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer; + * redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution; + * neither the name of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +/* + * Unordered buffer of messages that can be inserted such + * that they can be dequeued after a given delta time has expired. + */ + +#ifndef __MEM_RUBY_BUFFERS_MESSAGEBUFFER_HH__ +#define __MEM_RUBY_BUFFERS_MESSAGEBUFFER_HH__ + +#include +#include +#include +#include +#include +#include + +#include "mem/ruby/common/Address.hh" +#include "mem/ruby/common/Consumer.hh" +#include "mem/ruby/network/MessageBufferNode.hh" +#include "mem/ruby/slicc_interface/Message.hh" +#include "mem/packet.hh" + +class MessageBuffer +{ + public: + MessageBuffer(const std::string &name = ""); + + std::string name() const { return m_name; } + + void setRecycleLatency(Cycles recycle_latency) + { m_recycle_latency = recycle_latency; } + + void reanalyzeMessages(const Address& addr); + void reanalyzeAllMessages(); + void stallMessage(const Address& addr); + + // TRUE if head of queue timestamp <= SystemTime + bool isReady() const; + + void + delayHead() + { + MessageBufferNode node = m_prio_heap.front(); + std::pop_heap(m_prio_heap.begin(), m_prio_heap.end(), + std::greater()); + m_prio_heap.pop_back(); + enqueue(node.m_msgptr, Cycles(1)); + } + + bool areNSlotsAvailable(unsigned int n); + int getPriority() { return m_priority_rank; } + void setPriority(int rank) { m_priority_rank = rank; } + void setConsumer(Consumer* consumer) + { + if (m_consumer != NULL) { + fatal("Trying to connect %s to MessageBuffer %s. \ + \n%s already connected. Check the cntrl_id's.\n", + *consumer, *this, *m_consumer); + } + m_consumer = consumer; + } + + void setSender(ClockedObject* obj) + { + assert(m_sender == NULL || m_sender == obj); + m_sender = obj; + } + + void setReceiver(ClockedObject* obj) + { + assert(m_receiver == NULL || m_receiver == obj); + m_receiver = obj; + } + + void setDescription(const std::string& name) { m_name = name; } + std::string getDescription() { return m_name;} + + Consumer* getConsumer() { return m_consumer; } + + //! Function for extracting the message at the head of the + //! message queue. The function assumes that the queue is nonempty. + const Message* peek() const; + + const MsgPtr& + peekMsgPtr() const + { + assert(isReady()); + return m_prio_heap.front().m_msgptr; + } + + void enqueue(MsgPtr message) { enqueue(message, Cycles(1)); } + void enqueue(MsgPtr message, Cycles delta); + + //! Updates the delay cycles of the message at the head of the queue, + //! removes it from the queue and returns its total delay. + Cycles dequeue(); + + void recycle(); + bool isEmpty() const { return m_prio_heap.size() == 0; } + + void + setOrdering(bool order) + { + m_strict_fifo = order; + m_ordering_set = true; + } + + void resize(unsigned int size) { m_max_size = size; } + unsigned int getSize(); + void setRandomization(bool random_flag) { m_randomization = random_flag; } + + void clear(); + void print(std::ostream& out) const; + void clearStats() { m_not_avail_count = 0; m_msg_counter = 0; } + + void setIncomingLink(int link_id) { m_input_link_id = link_id; } + void setVnet(int net) { m_vnet_id = net; } + + // Function for figuring out if any of the messages in the buffer can + // satisfy the read request for the address in the packet. + // Return value, if true, indicates that the request was fulfilled. + bool functionalRead(Packet *pkt); + + // Function for figuring out if any of the messages in the buffer need + // to be updated with the data from the packet. + // Return value indicates the number of messages that were updated. + // This required for debugging the code. + uint32_t functionalWrite(Packet *pkt); + + private: + void reanalyzeList(std::list &, Tick); + + private: + //added by SS + Cycles m_recycle_latency; + + // Data Members (m_ prefix) + //! The two ends of the buffer. + ClockedObject* m_sender; + ClockedObject* m_receiver; + + //! Consumer to signal a wakeup(), can be NULL + Consumer* m_consumer; + std::vector m_prio_heap; + + // use a std::map for the stalled messages as this container is + // sorted and ensures a well-defined iteration order + typedef std::map< Address, std::list > StallMsgMapType; + + StallMsgMapType m_stall_msg_map; + std::string m_name; + + unsigned int m_max_size; + Cycles m_time_last_time_size_checked; + unsigned int m_size_last_time_size_checked; + + // variables used so enqueues appear to happen imediately, while + // pop happen the next cycle + Cycles m_time_last_time_enqueue; + Tick m_time_last_time_pop; + Tick m_last_arrival_time; + + unsigned int m_size_at_cycle_start; + unsigned int m_msgs_this_cycle; + + int m_not_avail_count; // count the # of times I didn't have N + // slots available + uint64 m_msg_counter; + int m_priority_rank; + bool m_strict_fifo; + bool m_ordering_set; + bool m_randomization; + + int m_input_link_id; + int m_vnet_id; +}; + +Cycles random_time(); + +inline std::ostream& +operator<<(std::ostream& out, const MessageBuffer& obj) +{ + obj.print(out); + out << std::flush; + return out; +} + +#endif // __MEM_RUBY_BUFFERS_MESSAGEBUFFER_HH__ diff --git a/src/mem/ruby/network/MessageBufferNode.cc b/src/mem/ruby/network/MessageBufferNode.cc new file mode 100644 index 000000000..2e682b096 --- /dev/null +++ b/src/mem/ruby/network/MessageBufferNode.cc @@ -0,0 +1,39 @@ +/* + * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer; + * redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution; + * neither the name of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "mem/ruby/network/MessageBufferNode.hh" + +void +MessageBufferNode::print(std::ostream& out) const +{ + out << "["; + out << m_time << ", "; + out << m_msg_counter << ", "; + out << *m_msgptr << "; "; + out << "]"; +} diff --git a/src/mem/ruby/network/MessageBufferNode.hh b/src/mem/ruby/network/MessageBufferNode.hh new file mode 100644 index 000000000..16aec8a1b --- /dev/null +++ b/src/mem/ruby/network/MessageBufferNode.hh @@ -0,0 +1,75 @@ +/* + * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer; + * redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution; + * neither the name of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef __MEM_RUBY_BUFFERS_MESSAGEBUFFERNODE_HH__ +#define __MEM_RUBY_BUFFERS_MESSAGEBUFFERNODE_HH__ + +#include + +#include "mem/ruby/slicc_interface/Message.hh" + +class MessageBufferNode +{ + public: + MessageBufferNode() + : m_time(0), m_msg_counter(0) + {} + + MessageBufferNode(const Tick time, uint64_t counter, + const MsgPtr& msgptr) + : m_time(time), m_msg_counter(counter), m_msgptr(msgptr) + {} + + void print(std::ostream& out) const; + + public: + Tick m_time; + uint64_t m_msg_counter; // FIXME, should this be a 64-bit value? + MsgPtr m_msgptr; +}; + +inline bool +operator>(const MessageBufferNode& n1, const MessageBufferNode& n2) +{ + if (n1.m_time == n2.m_time) { + assert(n1.m_msg_counter != n2.m_msg_counter); + return n1.m_msg_counter > n2.m_msg_counter; + } else { + return n1.m_time > n2.m_time; + } +} + +inline std::ostream& +operator<<(std::ostream& out, const MessageBufferNode& obj) +{ + obj.print(out); + out << std::flush; + return out; +} + +#endif // __MEM_RUBY_BUFFERS_MESSAGEBUFFERNODE_HH__ diff --git a/src/mem/ruby/network/SConscript b/src/mem/ruby/network/SConscript index c4abba716..1b0b1c94b 100644 --- a/src/mem/ruby/network/SConscript +++ b/src/mem/ruby/network/SConscript @@ -39,5 +39,7 @@ SimObject('Network.py') Source('BasicLink.cc') Source('BasicRouter.cc') +Source('MessageBuffer.cc') +Source('MessageBufferNode.cc') Source('Network.cc') Source('Topology.cc') diff --git a/src/mem/ruby/network/garnet/BaseGarnetNetwork.cc b/src/mem/ruby/network/garnet/BaseGarnetNetwork.cc index f237c4dcc..01f1b80ed 100644 --- a/src/mem/ruby/network/garnet/BaseGarnetNetwork.cc +++ b/src/mem/ruby/network/garnet/BaseGarnetNetwork.cc @@ -28,8 +28,8 @@ * Authors: Niket Agarwal */ -#include "mem/ruby/buffers/MessageBuffer.hh" #include "mem/ruby/network/garnet/BaseGarnetNetwork.hh" +#include "mem/ruby/network/MessageBuffer.hh" using namespace std; diff --git a/src/mem/ruby/network/garnet/fixed-pipeline/NetworkInterface_d.cc b/src/mem/ruby/network/garnet/fixed-pipeline/NetworkInterface_d.cc index 3e4088038..5f9493806 100644 --- a/src/mem/ruby/network/garnet/fixed-pipeline/NetworkInterface_d.cc +++ b/src/mem/ruby/network/garnet/fixed-pipeline/NetworkInterface_d.cc @@ -34,7 +34,7 @@ #include "base/cast.hh" #include "base/stl_helpers.hh" #include "debug/RubyNetwork.hh" -#include "mem/ruby/buffers/MessageBuffer.hh" +#include "mem/ruby/network/MessageBuffer.hh" #include "mem/ruby/network/garnet/fixed-pipeline/NetworkInterface_d.hh" #include "mem/ruby/network/garnet/fixed-pipeline/flitBuffer_d.hh" #include "mem/ruby/slicc_interface/NetworkMessage.hh" diff --git a/src/mem/ruby/network/garnet/flexible-pipeline/NetworkInterface.cc b/src/mem/ruby/network/garnet/flexible-pipeline/NetworkInterface.cc index ba32abd44..13bbe2b08 100644 --- a/src/mem/ruby/network/garnet/flexible-pipeline/NetworkInterface.cc +++ b/src/mem/ruby/network/garnet/flexible-pipeline/NetworkInterface.cc @@ -34,7 +34,7 @@ #include "base/cast.hh" #include "base/stl_helpers.hh" #include "debug/RubyNetwork.hh" -#include "mem/ruby/buffers/MessageBuffer.hh" +#include "mem/ruby/network/MessageBuffer.hh" #include "mem/ruby/network/garnet/flexible-pipeline/NetworkInterface.hh" #include "mem/ruby/network/garnet/flexible-pipeline/flitBuffer.hh" #include "mem/ruby/slicc_interface/NetworkMessage.hh" diff --git a/src/mem/ruby/network/simple/PerfectSwitch.cc b/src/mem/ruby/network/simple/PerfectSwitch.cc index cf2430e36..0c6111c48 100644 --- a/src/mem/ruby/network/simple/PerfectSwitch.cc +++ b/src/mem/ruby/network/simple/PerfectSwitch.cc @@ -30,7 +30,7 @@ #include "base/cast.hh" #include "debug/RubyNetwork.hh" -#include "mem/ruby/buffers/MessageBuffer.hh" +#include "mem/ruby/network/MessageBuffer.hh" #include "mem/ruby/network/simple/PerfectSwitch.hh" #include "mem/ruby/network/simple/SimpleNetwork.hh" #include "mem/ruby/network/simple/Switch.hh" diff --git a/src/mem/ruby/network/simple/SimpleNetwork.cc b/src/mem/ruby/network/simple/SimpleNetwork.cc index 05b729183..9eca157f6 100644 --- a/src/mem/ruby/network/simple/SimpleNetwork.cc +++ b/src/mem/ruby/network/simple/SimpleNetwork.cc @@ -31,8 +31,8 @@ #include "base/cast.hh" #include "base/stl_helpers.hh" -#include "mem/ruby/buffers/MessageBuffer.hh" #include "mem/ruby/common/NetDest.hh" +#include "mem/ruby/network/MessageBuffer.hh" #include "mem/ruby/network/simple/SimpleLink.hh" #include "mem/ruby/network/simple/SimpleNetwork.hh" #include "mem/ruby/network/simple/Switch.hh" diff --git a/src/mem/ruby/network/simple/Switch.cc b/src/mem/ruby/network/simple/Switch.cc index 1e153be76..6e116d82c 100644 --- a/src/mem/ruby/network/simple/Switch.cc +++ b/src/mem/ruby/network/simple/Switch.cc @@ -30,7 +30,7 @@ #include "base/cast.hh" #include "base/stl_helpers.hh" -#include "mem/ruby/buffers/MessageBuffer.hh" +#include "mem/ruby/network/MessageBuffer.hh" #include "mem/ruby/network/simple/PerfectSwitch.hh" #include "mem/ruby/network/simple/SimpleNetwork.hh" #include "mem/ruby/network/simple/Switch.hh" diff --git a/src/mem/ruby/network/simple/Throttle.cc b/src/mem/ruby/network/simple/Throttle.cc index da7b1732b..40958a6da 100644 --- a/src/mem/ruby/network/simple/Throttle.cc +++ b/src/mem/ruby/network/simple/Throttle.cc @@ -31,8 +31,8 @@ #include "base/cast.hh" #include "base/cprintf.hh" #include "debug/RubyNetwork.hh" -#include "mem/ruby/buffers/MessageBuffer.hh" #include "mem/ruby/network/simple/Throttle.hh" +#include "mem/ruby/network/MessageBuffer.hh" #include "mem/ruby/network/Network.hh" #include "mem/ruby/slicc_interface/NetworkMessage.hh" #include "mem/ruby/system/System.hh" diff --git a/src/mem/ruby/profiler/Profiler.hh b/src/mem/ruby/profiler/Profiler.hh index 2a0ff71b2..07d1411c0 100644 --- a/src/mem/ruby/profiler/Profiler.hh +++ b/src/mem/ruby/profiler/Profiler.hh @@ -57,7 +57,7 @@ #include "mem/protocol/RubyAccessMode.hh" #include "mem/protocol/RubyRequestType.hh" #include "mem/ruby/common/Global.hh" -#include "mem/ruby/system/MachineID.hh" +#include "mem/ruby/common/MachineID.hh" #include "params/RubySystem.hh" class RubyRequest; diff --git a/src/mem/ruby/recorder/CacheRecorder.cc b/src/mem/ruby/recorder/CacheRecorder.cc deleted file mode 100644 index a63dbd48e..000000000 --- a/src/mem/ruby/recorder/CacheRecorder.cc +++ /dev/null @@ -1,195 +0,0 @@ -/* - * Copyright (c) 1999-2012 Mark D. Hill and David A. Wood - * Copyright (c) 2010 Advanced Micro Devices, Inc. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer; - * redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution; - * neither the name of the copyright holders nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#include "debug/RubyCacheTrace.hh" -#include "mem/ruby/recorder/CacheRecorder.hh" -#include "mem/ruby/system/Sequencer.hh" -#include "mem/ruby/system/System.hh" - -using namespace std; - -void -TraceRecord::print(ostream& out) const -{ - out << "[TraceRecord: Node, " << m_cntrl_id << ", " - << m_data_address << ", " << m_pc_address << ", " - << m_type << ", Time: " << m_time << "]"; -} - -CacheRecorder::CacheRecorder() - : m_uncompressed_trace(NULL), - m_uncompressed_trace_size(0), - m_block_size_bytes(RubySystem::getBlockSizeBytes()) -{ -} - -CacheRecorder::CacheRecorder(uint8_t* uncompressed_trace, - uint64_t uncompressed_trace_size, - std::vector& seq_map, - uint64_t block_size_bytes) - : m_uncompressed_trace(uncompressed_trace), - m_uncompressed_trace_size(uncompressed_trace_size), - m_seq_map(seq_map), m_bytes_read(0), m_records_read(0), - m_records_flushed(0), m_block_size_bytes(block_size_bytes) -{ - if (m_uncompressed_trace != NULL) { - if (m_block_size_bytes < RubySystem::getBlockSizeBytes()) { - // Block sizes larger than when the trace was recorded are not - // supported, as we cannot reliably turn accesses to smaller blocks - // into larger ones. - panic("Recorded cache block size (%d) < current block size (%d) !!", - m_block_size_bytes, RubySystem::getBlockSizeBytes()); - } - } -} - -CacheRecorder::~CacheRecorder() -{ - if (m_uncompressed_trace != NULL) { - delete [] m_uncompressed_trace; - m_uncompressed_trace = NULL; - } - m_seq_map.clear(); -} - -void -CacheRecorder::enqueueNextFlushRequest() -{ - if (m_records_flushed < m_records.size()) { - TraceRecord* rec = m_records[m_records_flushed]; - m_records_flushed++; - Request* req = new Request(rec->m_data_address, - m_block_size_bytes, 0, - Request::funcMasterId); - MemCmd::Command requestType = MemCmd::FlushReq; - Packet *pkt = new Packet(req, requestType); - - Sequencer* m_sequencer_ptr = m_seq_map[rec->m_cntrl_id]; - assert(m_sequencer_ptr != NULL); - m_sequencer_ptr->makeRequest(pkt); - - DPRINTF(RubyCacheTrace, "Flushing %s\n", *rec); - } -} - -void -CacheRecorder::enqueueNextFetchRequest() -{ - if (m_bytes_read < m_uncompressed_trace_size) { - TraceRecord* traceRecord = (TraceRecord*) (m_uncompressed_trace + - m_bytes_read); - - DPRINTF(RubyCacheTrace, "Issuing %s\n", *traceRecord); - - for (int rec_bytes_read = 0; rec_bytes_read < m_block_size_bytes; - rec_bytes_read += RubySystem::getBlockSizeBytes()) { - Request* req = new Request(); - MemCmd::Command requestType; - - if (traceRecord->m_type == RubyRequestType_LD) { - requestType = MemCmd::ReadReq; - req->setPhys(traceRecord->m_data_address + rec_bytes_read, - RubySystem::getBlockSizeBytes(), 0, Request::funcMasterId); - } else if (traceRecord->m_type == RubyRequestType_IFETCH) { - requestType = MemCmd::ReadReq; - req->setPhys(traceRecord->m_data_address + rec_bytes_read, - RubySystem::getBlockSizeBytes(), - Request::INST_FETCH, Request::funcMasterId); - } else { - requestType = MemCmd::WriteReq; - req->setPhys(traceRecord->m_data_address + rec_bytes_read, - RubySystem::getBlockSizeBytes(), 0, Request::funcMasterId); - } - - Packet *pkt = new Packet(req, requestType); - pkt->dataStatic(traceRecord->m_data + rec_bytes_read); - - Sequencer* m_sequencer_ptr = m_seq_map[traceRecord->m_cntrl_id]; - assert(m_sequencer_ptr != NULL); - m_sequencer_ptr->makeRequest(pkt); - } - - m_bytes_read += (sizeof(TraceRecord) + m_block_size_bytes); - m_records_read++; - } -} - -void -CacheRecorder::addRecord(int cntrl, const physical_address_t data_addr, - const physical_address_t pc_addr, - RubyRequestType type, Time time, DataBlock& data) -{ - TraceRecord* rec = (TraceRecord*)malloc(sizeof(TraceRecord) + - m_block_size_bytes); - rec->m_cntrl_id = cntrl; - rec->m_time = time; - rec->m_data_address = data_addr; - rec->m_pc_address = pc_addr; - rec->m_type = type; - memcpy(rec->m_data, data.getData(0, m_block_size_bytes), - m_block_size_bytes); - - m_records.push_back(rec); -} - -uint64 -CacheRecorder::aggregateRecords(uint8_t** buf, uint64 total_size) -{ - std::sort(m_records.begin(), m_records.end(), compareTraceRecords); - - int size = m_records.size(); - uint64 current_size = 0; - int record_size = sizeof(TraceRecord) + m_block_size_bytes; - - for (int i = 0; i < size; ++i) { - // Determine if we need to expand the buffer size - if (current_size + record_size > total_size) { - uint8_t* new_buf = new (nothrow) uint8_t[total_size * 2]; - if (new_buf == NULL) { - fatal("Unable to allocate buffer of size %s\n", - total_size * 2); - } - total_size = total_size * 2; - uint8_t* old_buf = *buf; - memcpy(new_buf, old_buf, current_size); - *buf = new_buf; - delete [] old_buf; - } - - // Copy the current record into the buffer - memcpy(&((*buf)[current_size]), m_records[i], record_size); - current_size += record_size; - - free(m_records[i]); - m_records[i] = NULL; - } - - m_records.clear(); - return current_size; -} diff --git a/src/mem/ruby/recorder/CacheRecorder.hh b/src/mem/ruby/recorder/CacheRecorder.hh deleted file mode 100644 index 2156b0689..000000000 --- a/src/mem/ruby/recorder/CacheRecorder.hh +++ /dev/null @@ -1,130 +0,0 @@ -/* - * Copyright (c) 1999-2012 Mark D. Hill and David A. Wood - * Copyright (c) 2010 Advanced Micro Devices, Inc. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer; - * redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution; - * neither the name of the copyright holders nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -/* - * Recording cache requests made to a ruby cache at certain ruby - * time. Also dump the requests to a gziped file. - */ - -#ifndef __MEM_RUBY_RECORDER_CACHERECORDER_HH__ -#define __MEM_RUBY_RECORDER_CACHERECORDER_HH__ - -#include - -#include "base/hashmap.hh" -#include "mem/protocol/RubyRequestType.hh" -#include "mem/ruby/common/Address.hh" -#include "mem/ruby/common/DataBlock.hh" -#include "mem/ruby/common/TypeDefines.hh" - -class Sequencer; - -/*! - * Class for recording cache contents. Note that the last element of the - * class is an array of length zero. It is used for creating variable - * length object, so that while writing the data to a file one does not - * need to copy the meta data and the actual data separately. - */ -class TraceRecord { - public: - int m_cntrl_id; - Time m_time; - physical_address_t m_data_address; - physical_address_t m_pc_address; - RubyRequestType m_type; - uint8_t m_data[0]; - - void print(std::ostream& out) const; -}; - -class CacheRecorder -{ - public: - CacheRecorder(); - ~CacheRecorder(); - - CacheRecorder(uint8_t* uncompressed_trace, - uint64_t uncompressed_trace_size, - std::vector& SequencerMap, - uint64_t block_size_bytes); - void addRecord(int cntrl, const physical_address_t data_addr, - const physical_address_t pc_addr, RubyRequestType type, - Time time, DataBlock& data); - - uint64 aggregateRecords(uint8_t** data, uint64 size); - - /*! - * Function for flushing the memory contents of the caches to the - * main memory. It goes through the recorded contents of the caches, - * and issues flush requests. Except for the first one, a flush request - * is issued only after the previous one has completed. This currently - * requires use of MOESI Hammer protocol since only that protocol - * supports flush requests. - */ - void enqueueNextFlushRequest(); - - /*! - * Function for fetching warming up the memory and the caches. It goes - * through the recorded contents of the caches, as available in the - * checkpoint and issues fetch requests. Except for the first one, a - * fetch request is issued only after the previous one has completed. - * It should be possible to use this with any protocol. - */ - void enqueueNextFetchRequest(); - - private: - // Private copy constructor and assignment operator - CacheRecorder(const CacheRecorder& obj); - CacheRecorder& operator=(const CacheRecorder& obj); - - std::vector m_records; - uint8_t* m_uncompressed_trace; - uint64_t m_uncompressed_trace_size; - std::vector m_seq_map; - uint64_t m_bytes_read; - uint64_t m_records_read; - uint64_t m_records_flushed; - uint64_t m_block_size_bytes; -}; - -inline bool -compareTraceRecords(const TraceRecord* n1, const TraceRecord* n2) -{ - return n1->m_time > n2->m_time; -} - -inline std::ostream& -operator<<(std::ostream& out, const TraceRecord& obj) -{ - obj.print(out); - out << std::flush; - return out; -} - -#endif // __MEM_RUBY_RECORDER_CACHERECORDER_HH__ diff --git a/src/mem/ruby/recorder/SConscript b/src/mem/ruby/recorder/SConscript deleted file mode 100644 index e1b3d78b7..000000000 --- a/src/mem/ruby/recorder/SConscript +++ /dev/null @@ -1,36 +0,0 @@ -# -*- mode:python -*- - -# Copyright (c) 2009 The Hewlett-Packard Development Company -# All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer; -# redistributions in binary form must reproduce the above copyright -# notice, this list of conditions and the following disclaimer in the -# documentation and/or other materials provided with the distribution; -# neither the name of the copyright holders nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -# -# Authors: Nathan Binkert - -Import('*') - -if env['PROTOCOL'] == 'None': - Return() - -Source('CacheRecorder.cc') diff --git a/src/mem/ruby/slicc_interface/AbstractController.hh b/src/mem/ruby/slicc_interface/AbstractController.hh index 4ef3f328b..7dcb3b8ba 100644 --- a/src/mem/ruby/slicc_interface/AbstractController.hh +++ b/src/mem/ruby/slicc_interface/AbstractController.hh @@ -34,14 +34,14 @@ #include "base/callback.hh" #include "mem/protocol/AccessPermission.hh" -#include "mem/ruby/buffers/MessageBuffer.hh" #include "mem/ruby/common/Address.hh" #include "mem/ruby/common/Consumer.hh" #include "mem/ruby/common/DataBlock.hh" #include "mem/ruby/common/Histogram.hh" +#include "mem/ruby/common/MachineID.hh" +#include "mem/ruby/network/MessageBuffer.hh" #include "mem/ruby/network/Network.hh" -#include "mem/ruby/recorder/CacheRecorder.hh" -#include "mem/ruby/system/MachineID.hh" +#include "mem/ruby/system/CacheRecorder.hh" #include "mem/packet.hh" #include "params/RubyController.hh" #include "sim/clocked_object.hh" diff --git a/src/mem/ruby/slicc_interface/RubySlicc_ComponentMapping.hh b/src/mem/ruby/slicc_interface/RubySlicc_ComponentMapping.hh index 894777b97..14d24f028 100644 --- a/src/mem/ruby/slicc_interface/RubySlicc_ComponentMapping.hh +++ b/src/mem/ruby/slicc_interface/RubySlicc_ComponentMapping.hh @@ -31,9 +31,9 @@ #include "mem/protocol/MachineType.hh" #include "mem/ruby/common/Address.hh" +#include "mem/ruby/common/MachineID.hh" #include "mem/ruby/common/NetDest.hh" -#include "mem/ruby/system/DirectoryMemory.hh" -#include "mem/ruby/system/MachineID.hh" +#include "mem/ruby/structures/DirectoryMemory.hh" // used to determine the home directory // returns a value between 0 and total_directories_within_the_system diff --git a/src/mem/ruby/structures/AbstractReplacementPolicy.hh b/src/mem/ruby/structures/AbstractReplacementPolicy.hh new file mode 100644 index 000000000..3c492377e --- /dev/null +++ b/src/mem/ruby/structures/AbstractReplacementPolicy.hh @@ -0,0 +1,89 @@ +/* + * Copyright (c) 2007 Mark D. Hill and David A. Wood + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer; + * redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution; + * neither the name of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef __MEM_RUBY_SYSTEM_ABSTRACTREPLACEMENTPOLICY_HH__ +#define __MEM_RUBY_SYSTEM_ABSTRACTREPLACEMENTPOLICY_HH__ + +#include "base/types.hh" + +class AbstractReplacementPolicy +{ + public: + AbstractReplacementPolicy(Index num_sets, Index assoc); + virtual ~AbstractReplacementPolicy(); + + /* touch a block. a.k.a. update timestamp */ + virtual void touch(Index set, Index way, Tick time) = 0; + + /* returns the way to replace */ + virtual Index getVictim(Index set) const = 0; + + /* get the time of the last access */ + Tick getLastAccess(Index set, Index way); + + protected: + unsigned m_num_sets; /** total number of sets */ + unsigned m_assoc; /** set associativity */ + Tick **m_last_ref_ptr; /** timestamp of last reference */ +}; + +inline +AbstractReplacementPolicy::AbstractReplacementPolicy(Index num_sets, + Index assoc) +{ + m_num_sets = num_sets; + m_assoc = assoc; + m_last_ref_ptr = new Tick*[m_num_sets]; + for(unsigned i = 0; i < m_num_sets; i++){ + m_last_ref_ptr[i] = new Tick[m_assoc]; + for(unsigned j = 0; j < m_assoc; j++){ + m_last_ref_ptr[i][j] = 0; + } + } +} + +inline +AbstractReplacementPolicy::~AbstractReplacementPolicy() +{ + if (m_last_ref_ptr != NULL){ + for (unsigned i = 0; i < m_num_sets; i++){ + if (m_last_ref_ptr[i] != NULL){ + delete[] m_last_ref_ptr[i]; + } + } + delete[] m_last_ref_ptr; + } +} + +inline Tick +AbstractReplacementPolicy::getLastAccess(Index set, Index way) +{ + return m_last_ref_ptr[set][way]; +} + +#endif // __MEM_RUBY_SYSTEM_ABSTRACTREPLACEMENTPOLICY_HH__ diff --git a/src/mem/ruby/structures/BankedArray.cc b/src/mem/ruby/structures/BankedArray.cc new file mode 100644 index 000000000..0644ffe8b --- /dev/null +++ b/src/mem/ruby/structures/BankedArray.cc @@ -0,0 +1,85 @@ +/* + * Copyright (c) 2012 Advanced Micro Devices, Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer; + * redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution; + * neither the name of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * Author: Brad Beckmann + * + */ + +#include "base/intmath.hh" +#include "mem/ruby/structures/BankedArray.hh" +#include "mem/ruby/system/System.hh" + +BankedArray::BankedArray(unsigned int banks, Cycles accessLatency, + unsigned int startIndexBit) +{ + this->banks = banks; + this->accessLatency = accessLatency; + this->startIndexBit = startIndexBit; + + if (banks != 0) { + bankBits = floorLog2(banks); + } + + busyBanks.resize(banks); +} + +bool +BankedArray::tryAccess(Index idx) +{ + if (accessLatency == 0) + return true; + + unsigned int bank = mapIndexToBank(idx); + assert(bank < banks); + + if (busyBanks[bank].endAccess >= curTick()) { + if (!(busyBanks[bank].startAccess == curTick() && + busyBanks[bank].idx == idx)) { + return false; + } else { + // We tried to allocate resources twice + // in the same cycle for the same addr + return true; + } + } + + busyBanks[bank].idx = idx; + busyBanks[bank].startAccess = curTick(); + busyBanks[bank].endAccess = curTick() + + (accessLatency-1) * g_system_ptr->clockPeriod(); + + return true; +} + +unsigned int +BankedArray::mapIndexToBank(Index idx) +{ + if (banks == 1) { + return 0; + } + return idx % banks; +} diff --git a/src/mem/ruby/structures/BankedArray.hh b/src/mem/ruby/structures/BankedArray.hh new file mode 100644 index 000000000..89007befa --- /dev/null +++ b/src/mem/ruby/structures/BankedArray.hh @@ -0,0 +1,72 @@ +/* + * Copyright (c) 2012 Advanced Micro Devices, Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer; + * redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution; + * neither the name of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * Author: Brad Beckmann + * + */ + +#ifndef __MEM_RUBY_SYSTEM_BANKEDARRAY_HH__ +#define __MEM_RUBY_SYSTEM_BANKEDARRAY_HH__ + +#include + +#include "mem/ruby/common/TypeDefines.hh" +#include "sim/core.hh" + +class BankedArray +{ + private: + unsigned int banks; + Cycles accessLatency; + unsigned int bankBits; + unsigned int startIndexBit; + + class AccessRecord + { + public: + AccessRecord() : idx(0), startAccess(0), endAccess(0) {} + Index idx; + Tick startAccess; + Tick endAccess; + }; + + // If the tick event is scheduled then the bank is busy + // otherwise, schedule the event and wait for it to complete + std::vector busyBanks; + + unsigned int mapIndexToBank(Index idx); + + public: + BankedArray(unsigned int banks, Cycles accessLatency, unsigned int startIndexBit); + + // Note: We try the access based on the cache index, not the address + // This is so we don't get aliasing on blocks being replaced + bool tryAccess(Index idx); + +}; + +#endif diff --git a/src/mem/ruby/structures/Cache.py b/src/mem/ruby/structures/Cache.py new file mode 100644 index 000000000..14a359233 --- /dev/null +++ b/src/mem/ruby/structures/Cache.py @@ -0,0 +1,49 @@ +# Copyright (c) 2009 Advanced Micro Devices, Inc. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer; +# redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution; +# neither the name of the copyright holders nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# +# Authors: Steve Reinhardt +# Brad Beckmann + +from m5.params import * +from m5.SimObject import SimObject +from Controller import RubyController + +class RubyCache(SimObject): + type = 'RubyCache' + cxx_class = 'CacheMemory' + cxx_header = "mem/ruby/structures/CacheMemory.hh" + size = Param.MemorySize("capacity in bytes"); + latency = Param.Cycles(""); + assoc = Param.Int(""); + replacement_policy = Param.String("PSEUDO_LRU", ""); + start_index_bit = Param.Int(6, "index start, default 6 for 64-byte line"); + is_icache = Param.Bool(False, "is instruction only cache"); + + dataArrayBanks = Param.Int(1, "Number of banks for the data array") + tagArrayBanks = Param.Int(1, "Number of banks for the tag array") + dataAccessLatency = Param.Cycles(1, "cycles for a data array access") + tagAccessLatency = Param.Cycles(1, "cycles for a tag array access") + resourceStalls = Param.Bool(False, "stall if there is a resource failure") diff --git a/src/mem/ruby/structures/CacheMemory.cc b/src/mem/ruby/structures/CacheMemory.cc new file mode 100644 index 000000000..7ce6cd584 --- /dev/null +++ b/src/mem/ruby/structures/CacheMemory.cc @@ -0,0 +1,565 @@ +/* + * Copyright (c) 1999-2012 Mark D. Hill and David A. Wood + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer; + * redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution; + * neither the name of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "base/intmath.hh" +#include "debug/RubyCache.hh" +#include "debug/RubyCacheTrace.hh" +#include "debug/RubyResourceStalls.hh" +#include "debug/RubyStats.hh" +#include "mem/protocol/AccessPermission.hh" +#include "mem/ruby/structures/CacheMemory.hh" +#include "mem/ruby/system/System.hh" + +using namespace std; + +ostream& +operator<<(ostream& out, const CacheMemory& obj) +{ + obj.print(out); + out << flush; + return out; +} + +CacheMemory * +RubyCacheParams::create() +{ + return new CacheMemory(this); +} + +CacheMemory::CacheMemory(const Params *p) + : SimObject(p), + dataArray(p->dataArrayBanks, p->dataAccessLatency, p->start_index_bit), + tagArray(p->tagArrayBanks, p->tagAccessLatency, p->start_index_bit) +{ + m_cache_size = p->size; + m_latency = p->latency; + m_cache_assoc = p->assoc; + m_policy = p->replacement_policy; + m_start_index_bit = p->start_index_bit; + m_is_instruction_only_cache = p->is_icache; + m_resource_stalls = p->resourceStalls; +} + +void +CacheMemory::init() +{ + m_cache_num_sets = (m_cache_size / m_cache_assoc) / + RubySystem::getBlockSizeBytes(); + assert(m_cache_num_sets > 1); + m_cache_num_set_bits = floorLog2(m_cache_num_sets); + assert(m_cache_num_set_bits > 0); + + if (m_policy == "PSEUDO_LRU") + m_replacementPolicy_ptr = + new PseudoLRUPolicy(m_cache_num_sets, m_cache_assoc); + else if (m_policy == "LRU") + m_replacementPolicy_ptr = + new LRUPolicy(m_cache_num_sets, m_cache_assoc); + else + assert(false); + + m_cache.resize(m_cache_num_sets); + for (int i = 0; i < m_cache_num_sets; i++) { + m_cache[i].resize(m_cache_assoc); + for (int j = 0; j < m_cache_assoc; j++) { + m_cache[i][j] = NULL; + } + } +} + +CacheMemory::~CacheMemory() +{ + if (m_replacementPolicy_ptr != NULL) + delete m_replacementPolicy_ptr; + for (int i = 0; i < m_cache_num_sets; i++) { + for (int j = 0; j < m_cache_assoc; j++) { + delete m_cache[i][j]; + } + } +} + +// convert a Address to its location in the cache +Index +CacheMemory::addressToCacheSet(const Address& address) const +{ + assert(address == line_address(address)); + return address.bitSelect(m_start_index_bit, + m_start_index_bit + m_cache_num_set_bits - 1); +} + +// Given a cache index: returns the index of the tag in a set. +// returns -1 if the tag is not found. +int +CacheMemory::findTagInSet(Index cacheSet, const Address& tag) const +{ + assert(tag == line_address(tag)); + // search the set for the tags + m5::hash_map::const_iterator it = m_tag_index.find(tag); + if (it != m_tag_index.end()) + if (m_cache[cacheSet][it->second]->m_Permission != + AccessPermission_NotPresent) + return it->second; + return -1; // Not found +} + +// Given a cache index: returns the index of the tag in a set. +// returns -1 if the tag is not found. +int +CacheMemory::findTagInSetIgnorePermissions(Index cacheSet, + const Address& tag) const +{ + assert(tag == line_address(tag)); + // search the set for the tags + m5::hash_map::const_iterator it = m_tag_index.find(tag); + if (it != m_tag_index.end()) + return it->second; + return -1; // Not found +} + +bool +CacheMemory::tryCacheAccess(const Address& address, RubyRequestType type, + DataBlock*& data_ptr) +{ + assert(address == line_address(address)); + DPRINTF(RubyCache, "address: %s\n", address); + Index cacheSet = addressToCacheSet(address); + int loc = findTagInSet(cacheSet, address); + if (loc != -1) { + // Do we even have a tag match? + AbstractCacheEntry* entry = m_cache[cacheSet][loc]; + m_replacementPolicy_ptr->touch(cacheSet, loc, curTick()); + data_ptr = &(entry->getDataBlk()); + + if (entry->m_Permission == AccessPermission_Read_Write) { + return true; + } + if ((entry->m_Permission == AccessPermission_Read_Only) && + (type == RubyRequestType_LD || type == RubyRequestType_IFETCH)) { + return true; + } + // The line must not be accessible + } + data_ptr = NULL; + return false; +} + +bool +CacheMemory::testCacheAccess(const Address& address, RubyRequestType type, + DataBlock*& data_ptr) +{ + assert(address == line_address(address)); + DPRINTF(RubyCache, "address: %s\n", address); + Index cacheSet = addressToCacheSet(address); + int loc = findTagInSet(cacheSet, address); + + if (loc != -1) { + // Do we even have a tag match? + AbstractCacheEntry* entry = m_cache[cacheSet][loc]; + m_replacementPolicy_ptr->touch(cacheSet, loc, curTick()); + data_ptr = &(entry->getDataBlk()); + + return m_cache[cacheSet][loc]->m_Permission != + AccessPermission_NotPresent; + } + + data_ptr = NULL; + return false; +} + +// tests to see if an address is present in the cache +bool +CacheMemory::isTagPresent(const Address& address) const +{ + assert(address == line_address(address)); + Index cacheSet = addressToCacheSet(address); + int loc = findTagInSet(cacheSet, address); + + if (loc == -1) { + // We didn't find the tag + DPRINTF(RubyCache, "No tag match for address: %s\n", address); + return false; + } + DPRINTF(RubyCache, "address: %s found\n", address); + return true; +} + +// Returns true if there is: +// a) a tag match on this address or there is +// b) an unused line in the same cache "way" +bool +CacheMemory::cacheAvail(const Address& address) const +{ + assert(address == line_address(address)); + + Index cacheSet = addressToCacheSet(address); + + for (int i = 0; i < m_cache_assoc; i++) { + AbstractCacheEntry* entry = m_cache[cacheSet][i]; + if (entry != NULL) { + if (entry->m_Address == address || + entry->m_Permission == AccessPermission_NotPresent) { + // Already in the cache or we found an empty entry + return true; + } + } else { + return true; + } + } + return false; +} + +AbstractCacheEntry* +CacheMemory::allocate(const Address& address, AbstractCacheEntry* entry) +{ + assert(address == line_address(address)); + assert(!isTagPresent(address)); + assert(cacheAvail(address)); + DPRINTF(RubyCache, "address: %s\n", address); + + // Find the first open slot + Index cacheSet = addressToCacheSet(address); + std::vector &set = m_cache[cacheSet]; + for (int i = 0; i < m_cache_assoc; i++) { + if (!set[i] || set[i]->m_Permission == AccessPermission_NotPresent) { + set[i] = entry; // Init entry + set[i]->m_Address = address; + set[i]->m_Permission = AccessPermission_Invalid; + DPRINTF(RubyCache, "Allocate clearing lock for addr: %x\n", + address); + set[i]->m_locked = -1; + m_tag_index[address] = i; + + m_replacementPolicy_ptr->touch(cacheSet, i, curTick()); + + return entry; + } + } + panic("Allocate didn't find an available entry"); +} + +void +CacheMemory::deallocate(const Address& address) +{ + assert(address == line_address(address)); + assert(isTagPresent(address)); + DPRINTF(RubyCache, "address: %s\n", address); + Index cacheSet = addressToCacheSet(address); + int loc = findTagInSet(cacheSet, address); + if (loc != -1) { + delete m_cache[cacheSet][loc]; + m_cache[cacheSet][loc] = NULL; + m_tag_index.erase(address); + } +} + +// Returns with the physical address of the conflicting cache line +Address +CacheMemory::cacheProbe(const Address& address) const +{ + assert(address == line_address(address)); + assert(!cacheAvail(address)); + + Index cacheSet = addressToCacheSet(address); + return m_cache[cacheSet][m_replacementPolicy_ptr->getVictim(cacheSet)]-> + m_Address; +} + +// looks an address up in the cache +AbstractCacheEntry* +CacheMemory::lookup(const Address& address) +{ + assert(address == line_address(address)); + Index cacheSet = addressToCacheSet(address); + int loc = findTagInSet(cacheSet, address); + if(loc == -1) return NULL; + return m_cache[cacheSet][loc]; +} + +// looks an address up in the cache +const AbstractCacheEntry* +CacheMemory::lookup(const Address& address) const +{ + assert(address == line_address(address)); + Index cacheSet = addressToCacheSet(address); + int loc = findTagInSet(cacheSet, address); + if(loc == -1) return NULL; + return m_cache[cacheSet][loc]; +} + +// Sets the most recently used bit for a cache block +void +CacheMemory::setMRU(const Address& address) +{ + Index cacheSet = addressToCacheSet(address); + int loc = findTagInSet(cacheSet, address); + + if(loc != -1) + m_replacementPolicy_ptr->touch(cacheSet, loc, curTick()); +} + +void +CacheMemory::recordCacheContents(int cntrl, CacheRecorder* tr) const +{ + uint64 warmedUpBlocks = 0; + uint64 totalBlocks M5_VAR_USED = (uint64)m_cache_num_sets + * (uint64)m_cache_assoc; + + for (int i = 0; i < m_cache_num_sets; i++) { + for (int j = 0; j < m_cache_assoc; j++) { + if (m_cache[i][j] != NULL) { + AccessPermission perm = m_cache[i][j]->m_Permission; + RubyRequestType request_type = RubyRequestType_NULL; + if (perm == AccessPermission_Read_Only) { + if (m_is_instruction_only_cache) { + request_type = RubyRequestType_IFETCH; + } else { + request_type = RubyRequestType_LD; + } + } else if (perm == AccessPermission_Read_Write) { + request_type = RubyRequestType_ST; + } + + if (request_type != RubyRequestType_NULL) { + tr->addRecord(cntrl, m_cache[i][j]->m_Address.getAddress(), + 0, request_type, + m_replacementPolicy_ptr->getLastAccess(i, j), + m_cache[i][j]->getDataBlk()); + warmedUpBlocks++; + } + } + } + } + + DPRINTF(RubyCacheTrace, "%s: %lli blocks of %lli total blocks" + "recorded %.2f%% \n", name().c_str(), warmedUpBlocks, + (uint64)m_cache_num_sets * (uint64)m_cache_assoc, + (float(warmedUpBlocks)/float(totalBlocks))*100.0); +} + +void +CacheMemory::print(ostream& out) const +{ + out << "Cache dump: " << name() << endl; + for (int i = 0; i < m_cache_num_sets; i++) { + for (int j = 0; j < m_cache_assoc; j++) { + if (m_cache[i][j] != NULL) { + out << " Index: " << i + << " way: " << j + << " entry: " << *m_cache[i][j] << endl; + } else { + out << " Index: " << i + << " way: " << j + << " entry: NULL" << endl; + } + } + } +} + +void +CacheMemory::printData(ostream& out) const +{ + out << "printData() not supported" << endl; +} + +void +CacheMemory::setLocked(const Address& address, int context) +{ + DPRINTF(RubyCache, "Setting Lock for addr: %x to %d\n", address, context); + assert(address == line_address(address)); + Index cacheSet = addressToCacheSet(address); + int loc = findTagInSet(cacheSet, address); + assert(loc != -1); + m_cache[cacheSet][loc]->m_locked = context; +} + +void +CacheMemory::clearLocked(const Address& address) +{ + DPRINTF(RubyCache, "Clear Lock for addr: %x\n", address); + assert(address == line_address(address)); + Index cacheSet = addressToCacheSet(address); + int loc = findTagInSet(cacheSet, address); + assert(loc != -1); + m_cache[cacheSet][loc]->m_locked = -1; +} + +bool +CacheMemory::isLocked(const Address& address, int context) +{ + assert(address == line_address(address)); + Index cacheSet = addressToCacheSet(address); + int loc = findTagInSet(cacheSet, address); + assert(loc != -1); + DPRINTF(RubyCache, "Testing Lock for addr: %llx cur %d con %d\n", + address, m_cache[cacheSet][loc]->m_locked, context); + return m_cache[cacheSet][loc]->m_locked == context; +} + +void +CacheMemory::regStats() +{ + m_demand_hits + .name(name() + ".demand_hits") + .desc("Number of cache demand hits") + ; + + m_demand_misses + .name(name() + ".demand_misses") + .desc("Number of cache demand misses") + ; + + m_demand_accesses + .name(name() + ".demand_accesses") + .desc("Number of cache demand accesses") + ; + + m_demand_accesses = m_demand_hits + m_demand_misses; + + m_sw_prefetches + .name(name() + ".total_sw_prefetches") + .desc("Number of software prefetches") + .flags(Stats::nozero) + ; + + m_hw_prefetches + .name(name() + ".total_hw_prefetches") + .desc("Number of hardware prefetches") + .flags(Stats::nozero) + ; + + m_prefetches + .name(name() + ".total_prefetches") + .desc("Number of prefetches") + .flags(Stats::nozero) + ; + + m_prefetches = m_sw_prefetches + m_hw_prefetches; + + m_accessModeType + .init(RubyRequestType_NUM) + .name(name() + ".access_mode") + .flags(Stats::pdf | Stats::total) + ; + for (int i = 0; i < RubyAccessMode_NUM; i++) { + m_accessModeType + .subname(i, RubyAccessMode_to_string(RubyAccessMode(i))) + .flags(Stats::nozero) + ; + } + + numDataArrayReads + .name(name() + ".num_data_array_reads") + .desc("number of data array reads") + .flags(Stats::nozero) + ; + + numDataArrayWrites + .name(name() + ".num_data_array_writes") + .desc("number of data array writes") + .flags(Stats::nozero) + ; + + numTagArrayReads + .name(name() + ".num_tag_array_reads") + .desc("number of tag array reads") + .flags(Stats::nozero) + ; + + numTagArrayWrites + .name(name() + ".num_tag_array_writes") + .desc("number of tag array writes") + .flags(Stats::nozero) + ; + + numTagArrayStalls + .name(name() + ".num_tag_array_stalls") + .desc("number of stalls caused by tag array") + .flags(Stats::nozero) + ; + + numDataArrayStalls + .name(name() + ".num_data_array_stalls") + .desc("number of stalls caused by data array") + .flags(Stats::nozero) + ; +} + +void +CacheMemory::recordRequestType(CacheRequestType requestType) +{ + DPRINTF(RubyStats, "Recorded statistic: %s\n", + CacheRequestType_to_string(requestType)); + switch(requestType) { + case CacheRequestType_DataArrayRead: + numDataArrayReads++; + return; + case CacheRequestType_DataArrayWrite: + numDataArrayWrites++; + return; + case CacheRequestType_TagArrayRead: + numTagArrayReads++; + return; + case CacheRequestType_TagArrayWrite: + numTagArrayWrites++; + return; + default: + warn("CacheMemory access_type not found: %s", + CacheRequestType_to_string(requestType)); + } +} + +bool +CacheMemory::checkResourceAvailable(CacheResourceType res, Address addr) +{ + if (!m_resource_stalls) { + return true; + } + + if (res == CacheResourceType_TagArray) { + if (tagArray.tryAccess(addressToCacheSet(addr))) return true; + else { + DPRINTF(RubyResourceStalls, + "Tag array stall on addr %s in set %d\n", + addr, addressToCacheSet(addr)); + numTagArrayStalls++; + return false; + } + } else if (res == CacheResourceType_DataArray) { + if (dataArray.tryAccess(addressToCacheSet(addr))) return true; + else { + DPRINTF(RubyResourceStalls, + "Data array stall on addr %s in set %d\n", + addr, addressToCacheSet(addr)); + numDataArrayStalls++; + return false; + } + } else { + assert(false); + return true; + } +} diff --git a/src/mem/ruby/structures/CacheMemory.hh b/src/mem/ruby/structures/CacheMemory.hh new file mode 100644 index 000000000..87a0b40c0 --- /dev/null +++ b/src/mem/ruby/structures/CacheMemory.hh @@ -0,0 +1,173 @@ +/* + * Copyright (c) 1999-2012 Mark D. Hill and David A. Wood + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer; + * redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution; + * neither the name of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef __MEM_RUBY_SYSTEM_CACHEMEMORY_HH__ +#define __MEM_RUBY_SYSTEM_CACHEMEMORY_HH__ + +#include +#include + +#include "base/hashmap.hh" +#include "base/statistics.hh" +#include "mem/protocol/CacheRequestType.hh" +#include "mem/protocol/CacheResourceType.hh" +#include "mem/protocol/RubyRequest.hh" +#include "mem/ruby/common/DataBlock.hh" +#include "mem/ruby/slicc_interface/AbstractCacheEntry.hh" +#include "mem/ruby/slicc_interface/RubySlicc_ComponentMapping.hh" +#include "mem/ruby/structures/BankedArray.hh" +#include "mem/ruby/structures/LRUPolicy.hh" +#include "mem/ruby/structures/PseudoLRUPolicy.hh" +#include "mem/ruby/system/CacheRecorder.hh" +#include "params/RubyCache.hh" +#include "sim/sim_object.hh" + +class CacheMemory : public SimObject +{ + public: + typedef RubyCacheParams Params; + CacheMemory(const Params *p); + ~CacheMemory(); + + void init(); + + // Public Methods + // perform a cache access and see if we hit or not. Return true on a hit. + bool tryCacheAccess(const Address& address, RubyRequestType type, + DataBlock*& data_ptr); + + // similar to above, but doesn't require full access check + bool testCacheAccess(const Address& address, RubyRequestType type, + DataBlock*& data_ptr); + + // tests to see if an address is present in the cache + bool isTagPresent(const Address& address) const; + + // Returns true if there is: + // a) a tag match on this address or there is + // b) an unused line in the same cache "way" + bool cacheAvail(const Address& address) const; + + // find an unused entry and sets the tag appropriate for the address + AbstractCacheEntry* allocate(const Address& address, AbstractCacheEntry* new_entry); + void allocateVoid(const Address& address, AbstractCacheEntry* new_entry) + { + allocate(address, new_entry); + } + + // Explicitly free up this address + void deallocate(const Address& address); + + // Returns with the physical address of the conflicting cache line + Address cacheProbe(const Address& address) const; + + // looks an address up in the cache + AbstractCacheEntry* lookup(const Address& address); + const AbstractCacheEntry* lookup(const Address& address) const; + + Cycles getLatency() const { return m_latency; } + + // Hook for checkpointing the contents of the cache + void recordCacheContents(int cntrl, CacheRecorder* tr) const; + + // Set this address to most recently used + void setMRU(const Address& address); + + void setLocked (const Address& addr, int context); + void clearLocked (const Address& addr); + bool isLocked (const Address& addr, int context); + + // Print cache contents + void print(std::ostream& out) const; + void printData(std::ostream& out) const; + + void regStats(); + bool checkResourceAvailable(CacheResourceType res, Address addr); + void recordRequestType(CacheRequestType requestType); + + public: + Stats::Scalar m_demand_hits; + Stats::Scalar m_demand_misses; + Stats::Formula m_demand_accesses; + + Stats::Scalar m_sw_prefetches; + Stats::Scalar m_hw_prefetches; + Stats::Formula m_prefetches; + + Stats::Vector m_accessModeType; + + Stats::Scalar numDataArrayReads; + Stats::Scalar numDataArrayWrites; + Stats::Scalar numTagArrayReads; + Stats::Scalar numTagArrayWrites; + + Stats::Scalar numTagArrayStalls; + Stats::Scalar numDataArrayStalls; + + private: + // convert a Address to its location in the cache + Index addressToCacheSet(const Address& address) const; + + // Given a cache tag: returns the index of the tag in a set. + // returns -1 if the tag is not found. + int findTagInSet(Index line, const Address& tag) const; + int findTagInSetIgnorePermissions(Index cacheSet, + const Address& tag) const; + + // Private copy constructor and assignment operator + CacheMemory(const CacheMemory& obj); + CacheMemory& operator=(const CacheMemory& obj); + + private: + Cycles m_latency; + + // Data Members (m_prefix) + bool m_is_instruction_only_cache; + + // The first index is the # of cache lines. + // The second index is the the amount associativity. + m5::hash_map m_tag_index; + std::vector > m_cache; + + AbstractReplacementPolicy *m_replacementPolicy_ptr; + + BankedArray dataArray; + BankedArray tagArray; + + int m_cache_size; + std::string m_policy; + int m_cache_num_sets; + int m_cache_num_set_bits; + int m_cache_assoc; + int m_start_index_bit; + bool m_resource_stalls; +}; + +std::ostream& operator<<(std::ostream& out, const CacheMemory& obj); + +#endif // __MEM_RUBY_SYSTEM_CACHEMEMORY_HH__ diff --git a/src/mem/ruby/structures/DirectoryMemory.cc b/src/mem/ruby/structures/DirectoryMemory.cc new file mode 100644 index 000000000..db165460c --- /dev/null +++ b/src/mem/ruby/structures/DirectoryMemory.cc @@ -0,0 +1,212 @@ +/* + * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer; + * redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution; + * neither the name of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "base/intmath.hh" +#include "debug/RubyCache.hh" +#include "debug/RubyStats.hh" +#include "mem/ruby/slicc_interface/RubySlicc_Util.hh" +#include "mem/ruby/structures/DirectoryMemory.hh" +#include "mem/ruby/system/System.hh" + +using namespace std; + +int DirectoryMemory::m_num_directories = 0; +int DirectoryMemory::m_num_directories_bits = 0; +uint64_t DirectoryMemory::m_total_size_bytes = 0; +int DirectoryMemory::m_numa_high_bit = 0; + +DirectoryMemory::DirectoryMemory(const Params *p) + : SimObject(p) +{ + m_version = p->version; + m_size_bytes = p->size; + m_size_bits = floorLog2(m_size_bytes); + m_num_entries = 0; + m_use_map = p->use_map; + m_map_levels = p->map_levels; + m_numa_high_bit = p->numa_high_bit; +} + +void +DirectoryMemory::init() +{ + m_num_entries = m_size_bytes / RubySystem::getBlockSizeBytes(); + + if (m_use_map) { + m_sparseMemory = new SparseMemory(m_map_levels); + g_system_ptr->registerSparseMemory(m_sparseMemory); + } else { + m_entries = new AbstractEntry*[m_num_entries]; + for (int i = 0; i < m_num_entries; i++) + m_entries[i] = NULL; + m_ram = g_system_ptr->getMemoryVector(); + } + + m_num_directories++; + m_num_directories_bits = ceilLog2(m_num_directories); + m_total_size_bytes += m_size_bytes; + + if (m_numa_high_bit == 0) { + m_numa_high_bit = RubySystem::getMemorySizeBits() - 1; + } + assert(m_numa_high_bit != 0); +} + +DirectoryMemory::~DirectoryMemory() +{ + // free up all the directory entries + if (m_entries != NULL) { + for (uint64 i = 0; i < m_num_entries; i++) { + if (m_entries[i] != NULL) { + delete m_entries[i]; + } + } + delete [] m_entries; + } else if (m_use_map) { + delete m_sparseMemory; + } +} + +uint64 +DirectoryMemory::mapAddressToDirectoryVersion(PhysAddress address) +{ + if (m_num_directories_bits == 0) + return 0; + + uint64 ret = address.bitSelect(m_numa_high_bit - m_num_directories_bits + 1, + m_numa_high_bit); + return ret; +} + +bool +DirectoryMemory::isPresent(PhysAddress address) +{ + bool ret = (mapAddressToDirectoryVersion(address) == m_version); + return ret; +} + +uint64 +DirectoryMemory::mapAddressToLocalIdx(PhysAddress address) +{ + uint64 ret; + if (m_num_directories_bits > 0) { + ret = address.bitRemove(m_numa_high_bit - m_num_directories_bits + 1, + m_numa_high_bit); + } else { + ret = address.getAddress(); + } + + return ret >> (RubySystem::getBlockSizeBits()); +} + +AbstractEntry* +DirectoryMemory::lookup(PhysAddress address) +{ + assert(isPresent(address)); + DPRINTF(RubyCache, "Looking up address: %s\n", address); + + if (m_use_map) { + return m_sparseMemory->lookup(address); + } else { + uint64_t idx = mapAddressToLocalIdx(address); + assert(idx < m_num_entries); + return m_entries[idx]; + } +} + +AbstractEntry* +DirectoryMemory::allocate(const PhysAddress& address, AbstractEntry* entry) +{ + assert(isPresent(address)); + uint64 idx; + DPRINTF(RubyCache, "Looking up address: %s\n", address); + + if (m_use_map) { + m_sparseMemory->add(address, entry); + entry->changePermission(AccessPermission_Read_Write); + } else { + idx = mapAddressToLocalIdx(address); + assert(idx < m_num_entries); + entry->getDataBlk().assign(m_ram->getBlockPtr(address)); + entry->changePermission(AccessPermission_Read_Only); + m_entries[idx] = entry; + } + + return entry; +} + +void +DirectoryMemory::invalidateBlock(PhysAddress address) +{ + if (m_use_map) { + assert(m_sparseMemory->exist(address)); + m_sparseMemory->remove(address); + } +#if 0 + else { + assert(isPresent(address)); + + Index index = address.memoryModuleIndex(); + + if (index < 0 || index > m_size) { + ERROR_MSG("Directory Memory Assertion: " + "accessing memory out of range."); + } + + if (m_entries[index] != NULL){ + delete m_entries[index]; + m_entries[index] = NULL; + } + } +#endif +} + +void +DirectoryMemory::print(ostream& out) const +{ +} + +void +DirectoryMemory::regStats() +{ + if (m_use_map) { + m_sparseMemory->regStats(name()); + } +} + +void +DirectoryMemory::recordRequestType(DirectoryRequestType requestType) { + DPRINTF(RubyStats, "Recorded statistic: %s\n", + DirectoryRequestType_to_string(requestType)); +} + +DirectoryMemory * +RubyDirectoryMemoryParams::create() +{ + return new DirectoryMemory(this); +} diff --git a/src/mem/ruby/structures/DirectoryMemory.hh b/src/mem/ruby/structures/DirectoryMemory.hh new file mode 100644 index 000000000..cc390e428 --- /dev/null +++ b/src/mem/ruby/structures/DirectoryMemory.hh @@ -0,0 +1,104 @@ +/* + * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer; + * redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution; + * neither the name of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef __MEM_RUBY_SYSTEM_DIRECTORYMEMORY_HH__ +#define __MEM_RUBY_SYSTEM_DIRECTORYMEMORY_HH__ + +#include +#include + +#include "mem/protocol/DirectoryRequestType.hh" +#include "mem/ruby/common/Address.hh" +#include "mem/ruby/slicc_interface/AbstractEntry.hh" +#include "mem/ruby/structures/MemoryVector.hh" +#include "mem/ruby/structures/SparseMemory.hh" +#include "params/RubyDirectoryMemory.hh" +#include "sim/sim_object.hh" + +class DirectoryMemory : public SimObject +{ + public: + typedef RubyDirectoryMemoryParams Params; + DirectoryMemory(const Params *p); + ~DirectoryMemory(); + + void init(); + + uint64 mapAddressToLocalIdx(PhysAddress address); + static uint64 mapAddressToDirectoryVersion(PhysAddress address); + + bool isSparseImplementation() { return m_use_map; } + uint64 getSize() { return m_size_bytes; } + + bool isPresent(PhysAddress address); + AbstractEntry* lookup(PhysAddress address); + AbstractEntry* allocate(const PhysAddress& address, + AbstractEntry* new_entry); + + void invalidateBlock(PhysAddress address); + + void print(std::ostream& out) const; + void regStats(); + + void recordRequestType(DirectoryRequestType requestType); + + private: + // Private copy constructor and assignment operator + DirectoryMemory(const DirectoryMemory& obj); + DirectoryMemory& operator=(const DirectoryMemory& obj); + + private: + const std::string m_name; + AbstractEntry **m_entries; + // int m_size; // # of memory module blocks this directory is + // responsible for + uint64 m_size_bytes; + uint64 m_size_bits; + uint64 m_num_entries; + int m_version; + + static int m_num_directories; + static int m_num_directories_bits; + static uint64_t m_total_size_bytes; + static int m_numa_high_bit; + + MemoryVector* m_ram; + SparseMemory* m_sparseMemory; + bool m_use_map; + int m_map_levels; +}; + +inline std::ostream& +operator<<(std::ostream& out, const DirectoryMemory& obj) +{ + obj.print(out); + out << std::flush; + return out; +} + +#endif // __MEM_RUBY_SYSTEM_DIRECTORYMEMORY_HH__ diff --git a/src/mem/ruby/structures/DirectoryMemory.py b/src/mem/ruby/structures/DirectoryMemory.py new file mode 100644 index 000000000..c64439ce5 --- /dev/null +++ b/src/mem/ruby/structures/DirectoryMemory.py @@ -0,0 +1,44 @@ +# Copyright (c) 2009 Advanced Micro Devices, Inc. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer; +# redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution; +# neither the name of the copyright holders nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# +# Authors: Steve Reinhardt +# Brad Beckmann + +from m5.params import * +from m5.proxy import * +from m5.SimObject import SimObject + +class RubyDirectoryMemory(SimObject): + type = 'RubyDirectoryMemory' + cxx_class = 'DirectoryMemory' + cxx_header = "mem/ruby/structures/DirectoryMemory.hh" + version = Param.Int(0, "") + size = Param.MemorySize("1GB", "capacity in bytes") + use_map = Param.Bool(False, "enable sparse memory") + map_levels = Param.Int(4, "sparse memory map levels") + # the default value of the numa high bit is specified in the command line + # option and must be passed into the directory memory sim object + numa_high_bit = Param.Int("numa high bit") diff --git a/src/mem/ruby/structures/LRUPolicy.hh b/src/mem/ruby/structures/LRUPolicy.hh new file mode 100644 index 000000000..bb61b9d50 --- /dev/null +++ b/src/mem/ruby/structures/LRUPolicy.hh @@ -0,0 +1,95 @@ +/* + * Copyright (c) 2007 Mark D. Hill and David A. Wood + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer; + * redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution; + * neither the name of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef __MEM_RUBY_SYSTEM_LRUPOLICY_HH__ +#define __MEM_RUBY_SYSTEM_LRUPOLICY_HH__ + +#include "mem/ruby/structures/AbstractReplacementPolicy.hh" + +/* Simple true LRU replacement policy */ + +class LRUPolicy : public AbstractReplacementPolicy +{ + public: + LRUPolicy(Index num_sets, Index assoc); + ~LRUPolicy(); + + void touch(Index set, Index way, Tick time); + Index getVictim(Index set) const; +}; + +inline +LRUPolicy::LRUPolicy(Index num_sets, Index assoc) + : AbstractReplacementPolicy(num_sets, assoc) +{ +} + +inline +LRUPolicy::~LRUPolicy() +{ +} + +inline void +LRUPolicy::touch(Index set, Index index, Tick time) +{ + assert(index >= 0 && index < m_assoc); + assert(set >= 0 && set < m_num_sets); + + m_last_ref_ptr[set][index] = time; +} + +inline Index +LRUPolicy::getVictim(Index set) const +{ + // assert(m_assoc != 0); + Tick time, smallest_time; + Index smallest_index; + + smallest_index = 0; + smallest_time = m_last_ref_ptr[set][0]; + + for (unsigned i = 0; i < m_assoc; i++) { + time = m_last_ref_ptr[set][i]; + // assert(m_cache[cacheSet][i].m_Permission != + // AccessPermission_NotPresent); + + if (time < smallest_time) { + smallest_index = i; + smallest_time = time; + } + } + + // DEBUG_EXPR(CACHE_COMP, MedPrio, cacheSet); + // DEBUG_EXPR(CACHE_COMP, MedPrio, smallest_index); + // DEBUG_EXPR(CACHE_COMP, MedPrio, m_cache[cacheSet][smallest_index]); + // DEBUG_EXPR(CACHE_COMP, MedPrio, *this); + + return smallest_index; +} + +#endif // __MEM_RUBY_SYSTEM_LRUPOLICY_HH__ diff --git a/src/mem/ruby/structures/MemoryControl.cc b/src/mem/ruby/structures/MemoryControl.cc new file mode 100644 index 000000000..6c933b4d4 --- /dev/null +++ b/src/mem/ruby/structures/MemoryControl.cc @@ -0,0 +1,49 @@ +/* + * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood + * Copyright (c) 2012 Advanced Micro Devices, Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer; + * redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution; + * neither the name of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "debug/RubyStats.hh" +#include "mem/ruby/common/Global.hh" +#include "mem/ruby/slicc_interface/RubySlicc_ComponentMapping.hh" +#include "mem/ruby/structures/MemoryControl.hh" +#include "mem/ruby/system/System.hh" + +using namespace std; +MemoryControl::MemoryControl(const Params *p) + : ClockedObject(p), Consumer(this), m_event(this) +{ + g_system_ptr->registerMemController(this); +} + +MemoryControl::~MemoryControl() {}; + +void +MemoryControl::recordRequestType(MemoryControlRequestType request) { + DPRINTF(RubyStats, "Recorded request: %s\n", + MemoryControlRequestType_to_string(request)); +} diff --git a/src/mem/ruby/structures/MemoryControl.hh b/src/mem/ruby/structures/MemoryControl.hh new file mode 100644 index 000000000..7285e0021 --- /dev/null +++ b/src/mem/ruby/structures/MemoryControl.hh @@ -0,0 +1,114 @@ +/* + * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood + * Copyright (c) 2012 Advanced Micro Devices, Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer; + * redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution; + * neither the name of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef __MEM_RUBY_SYSTEM_ABSTRACT_MEMORY_CONTROL_HH__ +#define __MEM_RUBY_SYSTEM_ABSTRACT_MEMORY_CONTROL_HH__ + +#include +#include +#include + +#include "mem/protocol/MemoryControlRequestType.hh" +#include "mem/ruby/common/Consumer.hh" +#include "mem/ruby/slicc_interface/Message.hh" +#include "mem/ruby/structures/MemoryNode.hh" +#include "params/MemoryControl.hh" +#include "sim/clocked_object.hh" + +////////////////////////////////////////////////////////////////////////////// + +class MemoryControl : public ClockedObject, public Consumer +{ + public: + typedef MemoryControlParams Params; + const Params *params() const + { return dynamic_cast(_params); } + + MemoryControl(const Params *p); + virtual void init() = 0; + virtual void reset() = 0; + + ~MemoryControl(); + + virtual void wakeup() = 0; + + virtual void setConsumer(Consumer* consumer_ptr) = 0; + virtual Consumer* getConsumer() = 0; + virtual void setClockObj(ClockedObject* consumer_ptr) {} + + virtual void setDescription(const std::string& name) = 0; + virtual std::string getDescription() = 0; + + // Called from the directory: + virtual void enqueue(const MsgPtr& message, Cycles latency) = 0; + virtual void enqueueMemRef(MemoryNode *memRef) = 0; + virtual void dequeue() = 0; + virtual const Message* peek() = 0; + virtual MemoryNode *peekNode() = 0; + virtual bool isReady() = 0; + virtual bool areNSlotsAvailable(int n) = 0; // infinite queue length + + virtual void print(std::ostream& out) const = 0; + virtual void regStats() {}; + + virtual const int getChannel(const physical_address_t addr) const = 0; + virtual const int getBank(const physical_address_t addr) const = 0; + virtual const int getRank(const physical_address_t addr) const = 0; + virtual const int getRow(const physical_address_t addr) const = 0; + + //added by SS + virtual int getBanksPerRank() = 0; + virtual int getRanksPerDimm() = 0; + virtual int getDimmsPerChannel() = 0; + + virtual void recordRequestType(MemoryControlRequestType requestType); + + virtual bool functionalReadBuffers(Packet *pkt) + { fatal("Functional read access not implemented!");} + virtual uint32_t functionalWriteBuffers(Packet *pkt) + { fatal("Functional read access not implemented!");} + +protected: + class MemCntrlEvent : public Event + { + public: + MemCntrlEvent(MemoryControl* _mem_cntrl) + { + mem_cntrl = _mem_cntrl; + } + private: + void process() { mem_cntrl->wakeup(); } + + MemoryControl* mem_cntrl; + }; + + MemCntrlEvent m_event; +}; + +#endif // __MEM_RUBY_SYSTEM_ABSTRACT_MEMORY_CONTROL_HH__ diff --git a/src/mem/ruby/structures/MemoryControl.py b/src/mem/ruby/structures/MemoryControl.py new file mode 100644 index 000000000..8a6879cb9 --- /dev/null +++ b/src/mem/ruby/structures/MemoryControl.py @@ -0,0 +1,39 @@ +# Copyright (c) 2009 Advanced Micro Devices, Inc. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer; +# redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution; +# neither the name of the copyright holders nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# +# Authors: Steve Reinhardt +# Brad Beckmann + +from m5.params import * +from ClockedObject import ClockedObject + +class MemoryControl(ClockedObject): + abstract = True + type = 'MemoryControl' + cxx_class = 'MemoryControl' + cxx_header = "mem/ruby/structures/MemoryControl.hh" + version = Param.Int(""); + ruby_system = Param.RubySystem("") diff --git a/src/mem/ruby/structures/MemoryNode.cc b/src/mem/ruby/structures/MemoryNode.cc new file mode 100644 index 000000000..2a5cbb189 --- /dev/null +++ b/src/mem/ruby/structures/MemoryNode.cc @@ -0,0 +1,41 @@ +/* + * Copyright (c) 1999 Mark D. Hill and David A. Wood + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer; + * redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution; + * neither the name of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "mem/ruby/structures/MemoryNode.hh" + +using namespace std; + +void +MemoryNode::print(ostream& out) const +{ + out << "["; + out << m_time << ", "; + out << m_msg_counter << ", "; + out << m_msgptr << "; "; + out << "]"; +} diff --git a/src/mem/ruby/structures/MemoryNode.hh b/src/mem/ruby/structures/MemoryNode.hh new file mode 100644 index 000000000..f215ab649 --- /dev/null +++ b/src/mem/ruby/structures/MemoryNode.hh @@ -0,0 +1,92 @@ +/* + * Copyright (c) 2008 Mark D. Hill and David A. Wood + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer; + * redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution; + * neither the name of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +/* + * Description: + * This structure records everything known about a single + * memory request that is queued in the memory controller. + * It is created when the memory request first arrives + * at a memory controller and is deleted when the underlying + * message is enqueued to be sent back to the directory. + */ + +#ifndef __MEM_RUBY_SYSTEM_MEMORYNODE_HH__ +#define __MEM_RUBY_SYSTEM_MEMORYNODE_HH__ + +#include + +#include "mem/ruby/common/TypeDefines.hh" +#include "mem/ruby/slicc_interface/Message.hh" + +class MemoryNode +{ + public: + // old constructor + MemoryNode(const Cycles& time, int counter, const MsgPtr& msgptr, + const physical_address_t addr, const bool is_mem_read) + : m_time(time) + { + m_msg_counter = counter; + m_msgptr = msgptr; + m_addr = addr; + m_is_mem_read = is_mem_read; + m_is_dirty_wb = !is_mem_read; + } + + // new constructor + MemoryNode(const Cycles& time, const MsgPtr& msgptr, + const physical_address_t addr, const bool is_mem_read, + const bool is_dirty_wb) + : m_time(time) + { + m_msg_counter = 0; + m_msgptr = msgptr; + m_addr = addr; + m_is_mem_read = is_mem_read; + m_is_dirty_wb = is_dirty_wb; + } + + void print(std::ostream& out) const; + + Cycles m_time; + int m_msg_counter; + MsgPtr m_msgptr; + physical_address_t m_addr; + bool m_is_mem_read; + bool m_is_dirty_wb; +}; + +inline std::ostream& +operator<<(std::ostream& out, const MemoryNode& obj) +{ + obj.print(out); + out << std::flush; + return out; +} + +#endif // __MEM_RUBY_SYSTEM_MEMORYNODE_HH__ diff --git a/src/mem/ruby/structures/MemoryVector.hh b/src/mem/ruby/structures/MemoryVector.hh new file mode 100644 index 000000000..f2488b591 --- /dev/null +++ b/src/mem/ruby/structures/MemoryVector.hh @@ -0,0 +1,237 @@ +/* + * Copyright (c) 2009 Mark D. Hill and David A. Wood + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer; + * redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution; + * neither the name of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef __MEM_RUBY_SYSTEM_MEMORYVECTOR_HH__ +#define __MEM_RUBY_SYSTEM_MEMORYVECTOR_HH__ + +#include "base/trace.hh" +#include "debug/RubyCacheTrace.hh" +#include "mem/ruby/common/Address.hh" + +class DirectoryMemory; + +/** + * MemoryVector holds memory data (DRAM only) + */ +class MemoryVector +{ + public: + MemoryVector(); + MemoryVector(uint64 size); + ~MemoryVector(); + friend class DirectoryMemory; + + void resize(uint64 size); // destructive + + void write(const Address & paddr, uint8_t *data, int len); + uint8_t *read(const Address & paddr, uint8_t *data, int len); + uint32_t collatePages(uint8_t *&raw_data); + void populatePages(uint8_t *raw_data); + + private: + uint8_t *getBlockPtr(const PhysAddress & addr); + + uint64 m_size; + uint8_t **m_pages; + uint32_t m_num_pages; + const uint32_t m_page_offset_mask; + static const uint32_t PAGE_SIZE = 4096; +}; + +inline +MemoryVector::MemoryVector() + : m_page_offset_mask(4095) +{ + m_size = 0; + m_num_pages = 0; + m_pages = NULL; +} + +inline +MemoryVector::MemoryVector(uint64 size) + : m_page_offset_mask(4095) +{ + resize(size); +} + +inline +MemoryVector::~MemoryVector() +{ + for (int i = 0; i < m_num_pages; i++) { + if (m_pages[i] != 0) { + delete [] m_pages[i]; + } + } + delete [] m_pages; +} + +inline void +MemoryVector::resize(uint64 size) +{ + if (m_pages != NULL){ + for (int i = 0; i < m_num_pages; i++) { + if (m_pages[i] != 0) { + delete [] m_pages[i]; + } + } + delete [] m_pages; + } + m_size = size; + assert(size%PAGE_SIZE == 0); + m_num_pages = size >> 12; + m_pages = new uint8_t*[m_num_pages]; + memset(m_pages, 0, m_num_pages * sizeof(uint8_t*)); +} + +inline void +MemoryVector::write(const Address & paddr, uint8_t *data, int len) +{ + assert(paddr.getAddress() + len <= m_size); + uint32_t page_num = paddr.getAddress() >> 12; + if (m_pages[page_num] == 0) { + bool all_zeros = true; + for (int i = 0; i < len;i++) { + if (data[i] != 0) { + all_zeros = false; + break; + } + } + if (all_zeros) + return; + m_pages[page_num] = new uint8_t[PAGE_SIZE]; + memset(m_pages[page_num], 0, PAGE_SIZE); + uint32_t offset = paddr.getAddress() & m_page_offset_mask; + memcpy(&m_pages[page_num][offset], data, len); + } else { + memcpy(&m_pages[page_num][paddr.getAddress()&m_page_offset_mask], + data, len); + } +} + +inline uint8_t* +MemoryVector::read(const Address & paddr, uint8_t *data, int len) +{ + assert(paddr.getAddress() + len <= m_size); + uint32_t page_num = paddr.getAddress() >> 12; + if (m_pages[page_num] == 0) { + memset(data, 0, len); + } else { + memcpy(data, &m_pages[page_num][paddr.getAddress()&m_page_offset_mask], + len); + } + return data; +} + +inline uint8_t* +MemoryVector::getBlockPtr(const PhysAddress & paddr) +{ + uint32_t page_num = paddr.getAddress() >> 12; + if (m_pages[page_num] == 0) { + m_pages[page_num] = new uint8_t[PAGE_SIZE]; + memset(m_pages[page_num], 0, PAGE_SIZE); + } + return &m_pages[page_num][paddr.getAddress()&m_page_offset_mask]; +} + +/*! + * Function for collating all the pages of the physical memory together. + * In case a pointer for a page is NULL, this page needs only a single byte + * to represent that the pointer is NULL. Otherwise, it needs 1 + PAGE_SIZE + * bytes. The first represents that the page pointer is not NULL, and rest of + * the bytes represent the data on the page. + */ + +inline uint32_t +MemoryVector::collatePages(uint8_t *&raw_data) +{ + uint32_t num_zero_pages = 0; + uint32_t data_size = 0; + + for (uint32_t i = 0;i < m_num_pages; ++i) + { + if (m_pages[i] == 0) num_zero_pages++; + } + + raw_data = new uint8_t[sizeof(uint32_t) /* number of pages*/ + + m_num_pages /* whether the page is all zeros */ + + PAGE_SIZE * (m_num_pages - num_zero_pages)]; + + /* Write the number of pages to be stored. */ + memcpy(raw_data, &m_num_pages, sizeof(uint32_t)); + data_size = sizeof(uint32_t); + + DPRINTF(RubyCacheTrace, "collating %d pages\n", m_num_pages); + + for (uint32_t i = 0;i < m_num_pages; ++i) + { + if (m_pages[i] == 0) { + raw_data[data_size] = 0; + } else { + raw_data[data_size] = 1; + memcpy(raw_data + data_size + 1, m_pages[i], PAGE_SIZE); + data_size += PAGE_SIZE; + } + data_size += 1; + } + + return data_size; +} + +/*! + * Function for populating the pages of the memory using the available raw + * data. Each page has a byte associate with it, which represents whether the + * page was NULL or not, when all the pages were collated. The function assumes + * that the number of pages in the memory are same as those that were recorded + * in the checkpoint. + */ +inline void +MemoryVector::populatePages(uint8_t *raw_data) +{ + uint32_t data_size = 0; + uint32_t num_pages = 0; + + /* Read the number of pages that were stored. */ + memcpy(&num_pages, raw_data, sizeof(uint32_t)); + data_size = sizeof(uint32_t); + assert(num_pages == m_num_pages); + + DPRINTF(RubyCacheTrace, "Populating %d pages\n", num_pages); + + for (uint32_t i = 0;i < m_num_pages; ++i) + { + assert(m_pages[i] == 0); + if (raw_data[data_size] != 0) { + m_pages[i] = new uint8_t[PAGE_SIZE]; + memcpy(m_pages[i], raw_data + data_size + 1, PAGE_SIZE); + data_size += PAGE_SIZE; + } + data_size += 1; + } +} + +#endif // __MEM_RUBY_SYSTEM_MEMORYVECTOR_HH__ diff --git a/src/mem/ruby/structures/PerfectCacheMemory.hh b/src/mem/ruby/structures/PerfectCacheMemory.hh new file mode 100644 index 000000000..b56543c41 --- /dev/null +++ b/src/mem/ruby/structures/PerfectCacheMemory.hh @@ -0,0 +1,192 @@ +/* + * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer; + * redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution; + * neither the name of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef __MEM_RUBY_SYSTEM_PERFECTCACHEMEMORY_HH__ +#define __MEM_RUBY_SYSTEM_PERFECTCACHEMEMORY_HH__ + +#include "base/hashmap.hh" +#include "mem/protocol/AccessPermission.hh" +#include "mem/ruby/common/Address.hh" + +template +struct PerfectCacheLineState +{ + PerfectCacheLineState() { m_permission = AccessPermission_NUM; } + AccessPermission m_permission; + ENTRY m_entry; +}; + +template +inline std::ostream& +operator<<(std::ostream& out, const PerfectCacheLineState& obj) +{ + return out; +} + +template +class PerfectCacheMemory +{ + public: + PerfectCacheMemory(); + + // tests to see if an address is present in the cache + bool isTagPresent(const Address& address) const; + + // Returns true if there is: + // a) a tag match on this address or there is + // b) an Invalid line in the same cache "way" + bool cacheAvail(const Address& address) const; + + // find an Invalid entry and sets the tag appropriate for the address + void allocate(const Address& address); + + void deallocate(const Address& address); + + // Returns with the physical address of the conflicting cache line + Address cacheProbe(const Address& newAddress) const; + + // looks an address up in the cache + ENTRY& lookup(const Address& address); + const ENTRY& lookup(const Address& address) const; + + // Get/Set permission of cache block + AccessPermission getPermission(const Address& address) const; + void changePermission(const Address& address, AccessPermission new_perm); + + // Print cache contents + void print(std::ostream& out) const; + + private: + // Private copy constructor and assignment operator + PerfectCacheMemory(const PerfectCacheMemory& obj); + PerfectCacheMemory& operator=(const PerfectCacheMemory& obj); + + // Data Members (m_prefix) + m5::hash_map > m_map; +}; + +template +inline std::ostream& +operator<<(std::ostream& out, const PerfectCacheMemory& obj) +{ + obj.print(out); + out << std::flush; + return out; +} + +template +inline +PerfectCacheMemory::PerfectCacheMemory() +{ +} + +// tests to see if an address is present in the cache +template +inline bool +PerfectCacheMemory::isTagPresent(const Address& address) const +{ + return m_map.count(line_address(address)) > 0; +} + +template +inline bool +PerfectCacheMemory::cacheAvail(const Address& address) const +{ + return true; +} + +// find an Invalid or already allocated entry and sets the tag +// appropriate for the address +template +inline void +PerfectCacheMemory::allocate(const Address& address) +{ + PerfectCacheLineState line_state; + line_state.m_permission = AccessPermission_Invalid; + line_state.m_entry = ENTRY(); + m_map[line_address(address)] = line_state; +} + +// deallocate entry +template +inline void +PerfectCacheMemory::deallocate(const Address& address) +{ + m_map.erase(line_address(address)); +} + +// Returns with the physical address of the conflicting cache line +template +inline Address +PerfectCacheMemory::cacheProbe(const Address& newAddress) const +{ + panic("cacheProbe called in perfect cache"); + return newAddress; +} + +// looks an address up in the cache +template +inline ENTRY& +PerfectCacheMemory::lookup(const Address& address) +{ + return m_map[line_address(address)].m_entry; +} + +// looks an address up in the cache +template +inline const ENTRY& +PerfectCacheMemory::lookup(const Address& address) const +{ + return m_map[line_address(address)].m_entry; +} + +template +inline AccessPermission +PerfectCacheMemory::getPermission(const Address& address) const +{ + return m_map[line_address(address)].m_permission; +} + +template +inline void +PerfectCacheMemory::changePermission(const Address& address, + AccessPermission new_perm) +{ + Address line_address = address; + line_address.makeLineAddress(); + PerfectCacheLineState& line_state = m_map[line_address]; + line_state.m_permission = new_perm; +} + +template +inline void +PerfectCacheMemory::print(std::ostream& out) const +{ +} + +#endif // __MEM_RUBY_SYSTEM_PERFECTCACHEMEMORY_HH__ diff --git a/src/mem/ruby/structures/PersistentTable.cc b/src/mem/ruby/structures/PersistentTable.cc new file mode 100644 index 000000000..57b06946e --- /dev/null +++ b/src/mem/ruby/structures/PersistentTable.cc @@ -0,0 +1,219 @@ +/* + * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer; + * redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution; + * neither the name of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "mem/ruby/structures/PersistentTable.hh" + +using namespace std; + +// randomize so that handoffs are not locality-aware +#if 0 +int persistent_randomize[] = {0, 4, 8, 12, 1, 5, 9, 13, 2, 6, + 10, 14, 3, 7, 11, 15}; +int persistent_randomize[] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, + 10, 11, 12, 13, 14, 15}; +#endif + +PersistentTable::PersistentTable() +{ +} + +PersistentTable::~PersistentTable() +{ +} + +void +PersistentTable::persistentRequestLock(const Address& address, + MachineID locker, + AccessType type) +{ +#if 0 + if (locker == m_chip_ptr->getID()) + cout << "Chip " << m_chip_ptr->getID() << ": " << llocker + << " requesting lock for " << address << endl; + + MachineID locker = (MachineID) persistent_randomize[llocker]; +#endif + + assert(address == line_address(address)); + + static const PersistentTableEntry dflt; + pair r = + m_map.insert(AddressMap::value_type(address, dflt)); + bool present = !r.second; + AddressMap::iterator i = r.first; + PersistentTableEntry &entry = i->second; + + if (present) { + // Make sure we're not already in the locked set + assert(!(entry.m_starving.isElement(locker))); + } + + entry.m_starving.add(locker); + if (type == AccessType_Write) + entry.m_request_to_write.add(locker); + + if (present) + assert(entry.m_marked.isSubset(entry.m_starving)); +} + +void +PersistentTable::persistentRequestUnlock(const Address& address, + MachineID unlocker) +{ +#if 0 + if (unlocker == m_chip_ptr->getID()) + cout << "Chip " << m_chip_ptr->getID() << ": " << uunlocker + << " requesting unlock for " << address << endl; + + MachineID unlocker = (MachineID) persistent_randomize[uunlocker]; +#endif + + assert(address == line_address(address)); + assert(m_map.count(address)); + PersistentTableEntry& entry = m_map[address]; + + // + // Make sure we're in the locked set + // + assert(entry.m_starving.isElement(unlocker)); + assert(entry.m_marked.isSubset(entry.m_starving)); + entry.m_starving.remove(unlocker); + entry.m_marked.remove(unlocker); + entry.m_request_to_write.remove(unlocker); + assert(entry.m_marked.isSubset(entry.m_starving)); + + // Deallocate if empty + if (entry.m_starving.isEmpty()) { + assert(entry.m_marked.isEmpty()); + m_map.erase(address); + } +} + +bool +PersistentTable::okToIssueStarving(const Address& address, + MachineID machId) const +{ + assert(address == line_address(address)); + + AddressMap::const_iterator i = m_map.find(address); + if (i == m_map.end()) { + // No entry present + return true; + } + + const PersistentTableEntry &entry = i->second; + + if (entry.m_starving.isElement(machId)) { + // We can't issue another lockdown until are previous unlock + // has occurred + return false; + } + + return entry.m_marked.isEmpty(); +} + +MachineID +PersistentTable::findSmallest(const Address& address) const +{ + assert(address == line_address(address)); + AddressMap::const_iterator i = m_map.find(address); + assert(i != m_map.end()); + const PersistentTableEntry& entry = i->second; + return entry.m_starving.smallestElement(); +} + +AccessType +PersistentTable::typeOfSmallest(const Address& address) const +{ + assert(address == line_address(address)); + AddressMap::const_iterator i = m_map.find(address); + assert(i != m_map.end()); + const PersistentTableEntry& entry = i->second; + if (entry.m_request_to_write. + isElement(entry.m_starving.smallestElement())) { + return AccessType_Write; + } else { + return AccessType_Read; + } +} + +void +PersistentTable::markEntries(const Address& address) +{ + assert(address == line_address(address)); + AddressMap::iterator i = m_map.find(address); + if (i == m_map.end()) + return; + + PersistentTableEntry& entry = i->second; + + // None should be marked + assert(entry.m_marked.isEmpty()); + + // Mark all the nodes currently in the table + entry.m_marked = entry.m_starving; +} + +bool +PersistentTable::isLocked(const Address& address) const +{ + assert(address == line_address(address)); + + // If an entry is present, it must be locked + return m_map.count(address) > 0; +} + +int +PersistentTable::countStarvingForAddress(const Address& address) const +{ + assert(address == line_address(address)); + AddressMap::const_iterator i = m_map.find(address); + if (i == m_map.end()) + return 0; + + const PersistentTableEntry& entry = i->second; + return entry.m_starving.count(); +} + +int +PersistentTable::countReadStarvingForAddress(const Address& address) const +{ + assert(address == line_address(address)); + AddressMap::const_iterator i = m_map.find(address); + if (i == m_map.end()) + return 0; + + const PersistentTableEntry& entry = i->second; + return entry.m_starving.count() - entry.m_request_to_write.count(); +} + +void +PersistentTable::print(ostream& out) const +{ +} + diff --git a/src/mem/ruby/structures/PersistentTable.hh b/src/mem/ruby/structures/PersistentTable.hh new file mode 100644 index 000000000..b023987a4 --- /dev/null +++ b/src/mem/ruby/structures/PersistentTable.hh @@ -0,0 +1,100 @@ +/* + * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer; + * redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution; + * neither the name of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef __MEM_RUBY_SYSTEM_PERSISTENTTABLE_HH__ +#define __MEM_RUBY_SYSTEM_PERSISTENTTABLE_HH__ + +#include + +#include "base/hashmap.hh" +#include "mem/protocol/AccessType.hh" +#include "mem/ruby/common/Address.hh" +#include "mem/ruby/common/MachineID.hh" +#include "mem/ruby/common/NetDest.hh" + +class PersistentTableEntry +{ + public: + PersistentTableEntry() {} + void print(std::ostream& out) const {} + + NetDest m_starving; + NetDest m_marked; + NetDest m_request_to_write; +}; + +class PersistentTable +{ + public: + // Constructors + PersistentTable(); + + // Destructor + ~PersistentTable(); + + // Public Methods + void persistentRequestLock(const Address& address, MachineID locker, + AccessType type); + void persistentRequestUnlock(const Address& address, MachineID unlocker); + bool okToIssueStarving(const Address& address, MachineID machID) const; + MachineID findSmallest(const Address& address) const; + AccessType typeOfSmallest(const Address& address) const; + void markEntries(const Address& address); + bool isLocked(const Address& addr) const; + int countStarvingForAddress(const Address& addr) const; + int countReadStarvingForAddress(const Address& addr) const; + + void print(std::ostream& out) const; + + private: + // Private copy constructor and assignment operator + PersistentTable(const PersistentTable& obj); + PersistentTable& operator=(const PersistentTable& obj); + + // Data Members (m_prefix) + typedef m5::hash_map AddressMap; + AddressMap m_map; +}; + +inline std::ostream& +operator<<(std::ostream& out, const PersistentTable& obj) +{ + obj.print(out); + out << std::flush; + return out; +} + +inline std::ostream& +operator<<(std::ostream& out, const PersistentTableEntry& obj) +{ + obj.print(out); + out << std::flush; + return out; +} + +#endif // __MEM_RUBY_SYSTEM_PERSISTENTTABLE_HH__ diff --git a/src/mem/ruby/structures/Prefetcher.hh b/src/mem/ruby/structures/Prefetcher.hh index 967d96086..2bc7d812e 100644 --- a/src/mem/ruby/structures/Prefetcher.hh +++ b/src/mem/ruby/structures/Prefetcher.hh @@ -34,8 +34,8 @@ #include #include "base/statistics.hh" -#include "mem/ruby/buffers/MessageBuffer.hh" #include "mem/ruby/common/Address.hh" +#include "mem/ruby/network/MessageBuffer.hh" #include "mem/ruby/slicc_interface/AbstractController.hh" #include "mem/ruby/slicc_interface/RubyRequest.hh" #include "mem/ruby/system/System.hh" diff --git a/src/mem/ruby/structures/PseudoLRUPolicy.hh b/src/mem/ruby/structures/PseudoLRUPolicy.hh new file mode 100644 index 000000000..e464bbeac --- /dev/null +++ b/src/mem/ruby/structures/PseudoLRUPolicy.hh @@ -0,0 +1,137 @@ +/* + * Copyright (c) 2007 Mark D. Hill and David A. Wood + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer; + * redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution; + * neither the name of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef __MEM_RUBY_SYSTEM_PSEUDOLRUPOLICY_HH__ +#define __MEM_RUBY_SYSTEM_PSEUDOLRUPOLICY_HH__ + +#include "mem/ruby/structures/AbstractReplacementPolicy.hh" + +/** + * Implementation of tree-based pseudo-LRU replacement + * + * Works for any associativity between 1 and 128. + * + * Also implements associativities that are not a power of 2 by + * ignoring paths that lead to a larger index (i.e. truncating the + * tree). Note that when this occurs, the algorithm becomes less + * fair, as it will favor indicies in the larger (by index) half of + * the associative set. This is most unfair when the nearest power of + * 2 is one below the associativy, and most fair when it is one above. + */ + +class PseudoLRUPolicy : public AbstractReplacementPolicy +{ + public: + PseudoLRUPolicy(Index num_sets, Index assoc); + ~PseudoLRUPolicy(); + + void touch(Index set, Index way, Tick time); + Index getVictim(Index set) const; + + private: + unsigned int m_effective_assoc; /** nearest (to ceiling) power of 2 */ + unsigned int m_num_levels; /** number of levels in the tree */ + uint64* m_trees; /** bit representation of the + * trees, one for each set */ +}; + +inline +PseudoLRUPolicy::PseudoLRUPolicy(Index num_sets, Index assoc) + : AbstractReplacementPolicy(num_sets, assoc) +{ + // associativity cannot exceed capacity of tree representation + assert(num_sets > 0 && assoc > 1 && assoc <= (Index) sizeof(uint64)*4); + + m_trees = NULL; + m_num_levels = 0; + + m_effective_assoc = 1; + while (m_effective_assoc < assoc) { + // effective associativity is ceiling power of 2 + m_effective_assoc <<= 1; + } + assoc = m_effective_assoc; + while (true) { + assoc /= 2; + if(!assoc) break; + m_num_levels++; + } + assert(m_num_levels < sizeof(unsigned int)*4); + m_trees = new uint64[m_num_sets]; + for (unsigned i = 0; i < m_num_sets; i++) { + m_trees[i] = 0; + } +} + +inline +PseudoLRUPolicy::~PseudoLRUPolicy() +{ + if (m_trees != NULL) + delete[] m_trees; +} + +inline void +PseudoLRUPolicy::touch(Index set, Index index, Tick time) +{ + assert(index >= 0 && index < m_assoc); + assert(set >= 0 && set < m_num_sets); + + int tree_index = 0; + int node_val; + for (int i = m_num_levels - 1; i >= 0; i--) { + node_val = (index >> i)&1; + if (node_val) + m_trees[set] |= node_val << tree_index; + else + m_trees[set] &= ~(1 << tree_index); + tree_index = node_val ? (tree_index*2)+2 : (tree_index*2)+1; + } + m_last_ref_ptr[set][index] = time; +} + +inline Index +PseudoLRUPolicy::getVictim(Index set) const +{ + // assert(m_assoc != 0); + Index index = 0; + + int tree_index = 0; + int node_val; + for (unsigned i = 0; i < m_num_levels; i++){ + node_val = (m_trees[set] >> tree_index) & 1; + index += node_val ? 0 : (m_effective_assoc >> (i + 1)); + tree_index = node_val ? (tree_index * 2) + 1 : (tree_index * 2) + 2; + } + assert(index >= 0 && index < m_effective_assoc); + + /* return either the found index or the max possible index */ + /* NOTE: this is not a fair replacement when assoc is not a power of 2 */ + return (index > (m_assoc - 1)) ? m_assoc - 1 : index; +} + +#endif // __MEM_RUBY_SYSTEM_PSEUDOLRUPOLICY_HH__ diff --git a/src/mem/ruby/structures/RubyMemoryControl.cc b/src/mem/ruby/structures/RubyMemoryControl.cc new file mode 100644 index 000000000..bc01c7f94 --- /dev/null +++ b/src/mem/ruby/structures/RubyMemoryControl.cc @@ -0,0 +1,791 @@ +/* + * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood + * Copyright (c) 2012 Advanced Micro Devices, Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer; + * redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution; + * neither the name of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +/* + * Description: This module simulates a basic DDR-style memory controller + * (and can easily be extended to do FB-DIMM as well). + * + * This module models a single channel, connected to any number of + * DIMMs with any number of ranks of DRAMs each. If you want multiple + * address/data channels, you need to instantiate multiple copies of + * this module. + * + * Each memory request is placed in a queue associated with a specific + * memory bank. This queue is of finite size; if the queue is full + * the request will back up in an (infinite) common queue and will + * effectively throttle the whole system. This sort of behavior is + * intended to be closer to real system behavior than if we had an + * infinite queue on each bank. If you want the latter, just make + * the bank queues unreasonably large. + * + * The head item on a bank queue is issued when all of the + * following are true: + * the bank is available + * the address path to the DIMM is available + * the data path to or from the DIMM is available + * + * Note that we are not concerned about fixed offsets in time. The bank + * will not be used at the same moment as the address path, but since + * there is no queue in the DIMM or the DRAM it will be used at a constant + * number of cycles later, so it is treated as if it is used at the same + * time. + * + * We are assuming closed bank policy; that is, we automatically close + * each bank after a single read or write. Adding an option for open + * bank policy is for future work. + * + * We are assuming "posted CAS"; that is, we send the READ or WRITE + * immediately after the ACTIVATE. This makes scheduling the address + * bus trivial; we always schedule a fixed set of cycles. For DDR-400, + * this is a set of two cycles; for some configurations such as + * DDR-800 the parameter tRRD forces this to be set to three cycles. + * + * We assume a four-bit-time transfer on the data wires. This is + * the minimum burst length for DDR-2. This would correspond + * to (for example) a memory where each DIMM is 72 bits wide + * and DIMMs are ganged in pairs to deliver 64 bytes at a shot. + * This gives us the same occupancy on the data wires as on the + * address wires (for the two-address-cycle case). + * + * The only non-trivial scheduling problem is the data wires. + * A write will use the wires earlier in the operation than a read + * will; typically one cycle earlier as seen at the DRAM, but earlier + * by a worst-case round-trip wire delay when seen at the memory controller. + * So, while reads from one rank can be scheduled back-to-back + * every two cycles, and writes (to any rank) scheduled every two cycles, + * when a read is followed by a write we need to insert a bubble. + * Furthermore, consecutive reads from two different ranks may need + * to insert a bubble due to skew between when one DRAM stops driving the + * wires and when the other one starts. (These bubbles are parameters.) + * + * This means that when some number of reads and writes are at the + * heads of their queues, reads could starve writes, and/or reads + * to the same rank could starve out other requests, since the others + * would never see the data bus ready. + * For this reason, we have implemented an anti-starvation feature. + * A group of requests is marked "old", and a counter is incremented + * each cycle as long as any request from that batch has not issued. + * if the counter reaches twice the bank busy time, we hold off any + * newer requests until all of the "old" requests have issued. + * + * We also model tFAW. This is an obscure DRAM parameter that says + * that no more than four activate requests can happen within a window + * of a certain size. For most configurations this does not come into play, + * or has very little effect, but it could be used to throttle the power + * consumption of the DRAM. In this implementation (unlike in a DRAM + * data sheet) TFAW is measured in memory bus cycles; i.e. if TFAW = 16 + * then no more than four activates may happen within any 16 cycle window. + * Refreshes are included in the activates. + * + */ + +#include "base/cast.hh" +#include "base/cprintf.hh" +#include "debug/RubyMemory.hh" +#include "mem/ruby/common/Address.hh" +#include "mem/ruby/common/Global.hh" +#include "mem/ruby/profiler/Profiler.hh" +#include "mem/ruby/slicc_interface/NetworkMessage.hh" +#include "mem/ruby/slicc_interface/RubySlicc_ComponentMapping.hh" +#include "mem/ruby/structures/RubyMemoryControl.hh" +#include "mem/ruby/system/System.hh" + +using namespace std; + +// Value to reset watchdog timer to. +// If we're idle for this many memory control cycles, +// shut down our clock (our rescheduling of ourselves). +// Refresh shuts down as well. +// When we restart, we'll be in a different phase +// with respect to ruby cycles, so this introduces +// a slight inaccuracy. But it is necessary or the +// ruby tester never terminates because the event +// queue is never empty. +#define IDLECOUNT_MAX_VALUE 1000 + +// Output operator definition + +ostream& +operator<<(ostream& out, const RubyMemoryControl& obj) +{ + obj.print(out); + out << flush; + return out; +} + + +// **************************************************************** + +// CONSTRUCTOR +RubyMemoryControl::RubyMemoryControl(const Params *p) + : MemoryControl(p) +{ + m_banks_per_rank = p->banks_per_rank; + m_ranks_per_dimm = p->ranks_per_dimm; + m_dimms_per_channel = p->dimms_per_channel; + m_bank_bit_0 = p->bank_bit_0; + m_rank_bit_0 = p->rank_bit_0; + m_dimm_bit_0 = p->dimm_bit_0; + m_bank_queue_size = p->bank_queue_size; + m_bank_busy_time = p->bank_busy_time; + m_rank_rank_delay = p->rank_rank_delay; + m_read_write_delay = p->read_write_delay; + m_basic_bus_busy_time = p->basic_bus_busy_time; + m_mem_ctl_latency = p->mem_ctl_latency; + m_refresh_period = p->refresh_period; + m_tFaw = p->tFaw; + m_mem_random_arbitrate = p->mem_random_arbitrate; + m_mem_fixed_delay = p->mem_fixed_delay; + + m_profiler_ptr = new MemCntrlProfiler(name(), + m_banks_per_rank, + m_ranks_per_dimm, + m_dimms_per_channel); +} + +void +RubyMemoryControl::init() +{ + m_msg_counter = 0; + + assert(m_tFaw <= 62); // must fit in a uint64 shift register + + m_total_banks = m_banks_per_rank * m_ranks_per_dimm * m_dimms_per_channel; + m_total_ranks = m_ranks_per_dimm * m_dimms_per_channel; + m_refresh_period_system = m_refresh_period / m_total_banks; + + m_bankQueues = new list [m_total_banks]; + assert(m_bankQueues); + + m_bankBusyCounter = new int [m_total_banks]; + assert(m_bankBusyCounter); + + m_oldRequest = new int [m_total_banks]; + assert(m_oldRequest); + + for (int i = 0; i < m_total_banks; i++) { + m_bankBusyCounter[i] = 0; + m_oldRequest[i] = 0; + } + + m_busBusyCounter_Basic = 0; + m_busBusyCounter_Write = 0; + m_busBusyCounter_ReadNewRank = 0; + m_busBusy_WhichRank = 0; + + m_roundRobin = 0; + m_refresh_count = 1; + m_need_refresh = 0; + m_refresh_bank = 0; + m_idleCount = 0; + m_ageCounter = 0; + + // Each tfaw shift register keeps a moving bit pattern + // which shows when recent activates have occurred. + // m_tfaw_count keeps track of how many 1 bits are set + // in each shift register. When m_tfaw_count is >= 4, + // new activates are not allowed. + m_tfaw_shift = new uint64[m_total_ranks]; + m_tfaw_count = new int[m_total_ranks]; + for (int i = 0; i < m_total_ranks; i++) { + m_tfaw_shift[i] = 0; + m_tfaw_count[i] = 0; + } +} + +void +RubyMemoryControl::reset() +{ + m_msg_counter = 0; + + assert(m_tFaw <= 62); // must fit in a uint64 shift register + + m_total_banks = m_banks_per_rank * m_ranks_per_dimm * m_dimms_per_channel; + m_total_ranks = m_ranks_per_dimm * m_dimms_per_channel; + m_refresh_period_system = m_refresh_period / m_total_banks; + + assert(m_bankQueues); + + assert(m_bankBusyCounter); + + assert(m_oldRequest); + + for (int i = 0; i < m_total_banks; i++) { + m_bankBusyCounter[i] = 0; + m_oldRequest[i] = 0; + } + + m_busBusyCounter_Basic = 0; + m_busBusyCounter_Write = 0; + m_busBusyCounter_ReadNewRank = 0; + m_busBusy_WhichRank = 0; + + m_roundRobin = 0; + m_refresh_count = 1; + m_need_refresh = 0; + m_refresh_bank = 0; + m_idleCount = 0; + m_ageCounter = 0; + + // Each tfaw shift register keeps a moving bit pattern + // which shows when recent activates have occurred. + // m_tfaw_count keeps track of how many 1 bits are set + // in each shift register. When m_tfaw_count is >= 4, + // new activates are not allowed. + for (int i = 0; i < m_total_ranks; i++) { + m_tfaw_shift[i] = 0; + m_tfaw_count[i] = 0; + } +} + +RubyMemoryControl::~RubyMemoryControl() +{ + delete [] m_bankQueues; + delete [] m_bankBusyCounter; + delete [] m_oldRequest; + delete m_profiler_ptr; +} + +// enqueue new request from directory +void +RubyMemoryControl::enqueue(const MsgPtr& message, Cycles latency) +{ + Cycles arrival_time = curCycle() + latency; + const MemoryMsg* memMess = safe_cast(message.get()); + physical_address_t addr = memMess->getAddr().getAddress(); + MemoryRequestType type = memMess->getType(); + bool is_mem_read = (type == MemoryRequestType_MEMORY_READ); + MemoryNode *thisReq = new MemoryNode(arrival_time, message, addr, + is_mem_read, !is_mem_read); + enqueueMemRef(thisReq); +} + +// Alternate entry point used when we already have a MemoryNode +// structure built. +void +RubyMemoryControl::enqueueMemRef(MemoryNode *memRef) +{ + m_msg_counter++; + memRef->m_msg_counter = m_msg_counter; + physical_address_t addr = memRef->m_addr; + int bank = getBank(addr); + + DPRINTF(RubyMemory, + "New memory request%7d: %#08x %c arrived at %10d bank = %3x sched %c\n", + m_msg_counter, addr, memRef->m_is_mem_read ? 'R':'W', + memRef->m_time * g_system_ptr->clockPeriod(), + bank, m_event.scheduled() ? 'Y':'N'); + + m_profiler_ptr->profileMemReq(bank); + m_input_queue.push_back(memRef); + + if (!m_event.scheduled()) { + schedule(m_event, clockEdge()); + } +} + +// dequeue, peek, and isReady are used to transfer completed requests +// back to the directory +void +RubyMemoryControl::dequeue() +{ + assert(isReady()); + MemoryNode *req = m_response_queue.front(); + m_response_queue.pop_front(); + delete req; +} + +const Message* +RubyMemoryControl::peek() +{ + MemoryNode *node = peekNode(); + Message* msg_ptr = node->m_msgptr.get(); + assert(msg_ptr != NULL); + return msg_ptr; +} + +MemoryNode * +RubyMemoryControl::peekNode() +{ + assert(isReady()); + MemoryNode *req = m_response_queue.front(); + DPRINTF(RubyMemory, "Peek: memory request%7d: %#08x %c sched %c\n", + req->m_msg_counter, req->m_addr, req->m_is_mem_read ? 'R':'W', + m_event.scheduled() ? 'Y':'N'); + + return req; +} + +bool +RubyMemoryControl::isReady() +{ + return ((!m_response_queue.empty()) && + (m_response_queue.front()->m_time <= g_system_ptr->curCycle())); +} + +void +RubyMemoryControl::setConsumer(Consumer* consumer_ptr) +{ + m_consumer_ptr = consumer_ptr; +} + +void +RubyMemoryControl::print(ostream& out) const +{ +} + +// Queue up a completed request to send back to directory +void +RubyMemoryControl::enqueueToDirectory(MemoryNode *req, Cycles latency) +{ + Tick arrival_time = clockEdge(latency); + Cycles ruby_arrival_time = g_system_ptr->ticksToCycles(arrival_time); + req->m_time = ruby_arrival_time; + m_response_queue.push_back(req); + + DPRINTF(RubyMemory, "Enqueueing msg %#08x %c back to directory at %15d\n", + req->m_addr, req->m_is_mem_read ? 'R':'W', arrival_time); + + // schedule the wake up + m_consumer_ptr->scheduleEventAbsolute(arrival_time); +} + +// getBank returns an integer that is unique for each +// bank across this memory controller. +const int +RubyMemoryControl::getBank(const physical_address_t addr) const +{ + int dimm = (addr >> m_dimm_bit_0) & (m_dimms_per_channel - 1); + int rank = (addr >> m_rank_bit_0) & (m_ranks_per_dimm - 1); + int bank = (addr >> m_bank_bit_0) & (m_banks_per_rank - 1); + return (dimm * m_ranks_per_dimm * m_banks_per_rank) + + (rank * m_banks_per_rank) + + bank; +} + +const int +RubyMemoryControl::getRank(const physical_address_t addr) const +{ + int bank = getBank(addr); + int rank = (bank / m_banks_per_rank); + assert (rank < (m_ranks_per_dimm * m_dimms_per_channel)); + return rank; +} + +// getRank returns an integer that is unique for each rank +// and independent of individual bank. +const int +RubyMemoryControl::getRank(int bank) const +{ + int rank = (bank / m_banks_per_rank); + assert (rank < (m_ranks_per_dimm * m_dimms_per_channel)); + return rank; +} + +// Not used! +const int +RubyMemoryControl::getChannel(const physical_address_t addr) const +{ + assert(false); + return -1; +} + +// Not used! +const int +RubyMemoryControl::getRow(const physical_address_t addr) const +{ + assert(false); + return -1; +} + +// queueReady determines if the head item in a bank queue +// can be issued this cycle +bool +RubyMemoryControl::queueReady(int bank) +{ + if ((m_bankBusyCounter[bank] > 0) && !m_mem_fixed_delay) { + m_profiler_ptr->profileMemBankBusy(); + + DPRINTF(RubyMemory, "bank %x busy %d\n", bank, m_bankBusyCounter[bank]); + return false; + } + + if (m_mem_random_arbitrate >= 2) { + if ((random() % 100) < m_mem_random_arbitrate) { + m_profiler_ptr->profileMemRandBusy(); + return false; + } + } + + if (m_mem_fixed_delay) + return true; + + if ((m_ageCounter > (2 * m_bank_busy_time)) && !m_oldRequest[bank]) { + m_profiler_ptr->profileMemNotOld(); + return false; + } + + if (m_busBusyCounter_Basic == m_basic_bus_busy_time) { + // Another bank must have issued this same cycle. For + // profiling, we count this as an arb wait rather than a bus + // wait. This is a little inaccurate since it MIGHT have also + // been blocked waiting for a read-write or a read-read + // instead, but it's pretty close. + m_profiler_ptr->profileMemArbWait(1); + return false; + } + + if (m_busBusyCounter_Basic > 0) { + m_profiler_ptr->profileMemBusBusy(); + return false; + } + + int rank = getRank(bank); + if (m_tfaw_count[rank] >= ACTIVATE_PER_TFAW) { + m_profiler_ptr->profileMemTfawBusy(); + return false; + } + + bool write = !m_bankQueues[bank].front()->m_is_mem_read; + if (write && (m_busBusyCounter_Write > 0)) { + m_profiler_ptr->profileMemReadWriteBusy(); + return false; + } + + if (!write && (rank != m_busBusy_WhichRank) + && (m_busBusyCounter_ReadNewRank > 0)) { + m_profiler_ptr->profileMemDataBusBusy(); + return false; + } + + return true; +} + +// issueRefresh checks to see if this bank has a refresh scheduled +// and, if so, does the refresh and returns true +bool +RubyMemoryControl::issueRefresh(int bank) +{ + if (!m_need_refresh || (m_refresh_bank != bank)) + return false; + if (m_bankBusyCounter[bank] > 0) + return false; + // Note that m_busBusyCounter will prevent multiple issues during + // the same cycle, as well as on different but close cycles: + if (m_busBusyCounter_Basic > 0) + return false; + int rank = getRank(bank); + if (m_tfaw_count[rank] >= ACTIVATE_PER_TFAW) + return false; + + // Issue it: + DPRINTF(RubyMemory, "Refresh bank %3x\n", bank); + + m_profiler_ptr->profileMemRefresh(); + m_need_refresh--; + m_refresh_bank++; + if (m_refresh_bank >= m_total_banks) + m_refresh_bank = 0; + m_bankBusyCounter[bank] = m_bank_busy_time; + m_busBusyCounter_Basic = m_basic_bus_busy_time; + m_busBusyCounter_Write = m_basic_bus_busy_time; + m_busBusyCounter_ReadNewRank = m_basic_bus_busy_time; + markTfaw(rank); + return true; +} + +// Mark the activate in the tFaw shift register +void +RubyMemoryControl::markTfaw(int rank) +{ + if (m_tFaw) { + m_tfaw_shift[rank] |= (1 << (m_tFaw-1)); + m_tfaw_count[rank]++; + } +} + +// Issue a memory request: Activate the bank, reserve the address and +// data buses, and queue the request for return to the requesting +// processor after a fixed latency. +void +RubyMemoryControl::issueRequest(int bank) +{ + int rank = getRank(bank); + MemoryNode *req = m_bankQueues[bank].front(); + m_bankQueues[bank].pop_front(); + + DPRINTF(RubyMemory, "Mem issue request%7d: %#08x %c " + "bank=%3x sched %c\n", req->m_msg_counter, req->m_addr, + req->m_is_mem_read? 'R':'W', + bank, m_event.scheduled() ? 'Y':'N'); + + if (req->m_msgptr) { // don't enqueue L3 writebacks + enqueueToDirectory(req, Cycles(m_mem_ctl_latency + m_mem_fixed_delay)); + } + m_oldRequest[bank] = 0; + markTfaw(rank); + m_bankBusyCounter[bank] = m_bank_busy_time; + m_busBusy_WhichRank = rank; + if (req->m_is_mem_read) { + m_profiler_ptr->profileMemRead(); + m_busBusyCounter_Basic = m_basic_bus_busy_time; + m_busBusyCounter_Write = m_basic_bus_busy_time + m_read_write_delay; + m_busBusyCounter_ReadNewRank = + m_basic_bus_busy_time + m_rank_rank_delay; + } else { + m_profiler_ptr->profileMemWrite(); + m_busBusyCounter_Basic = m_basic_bus_busy_time; + m_busBusyCounter_Write = m_basic_bus_busy_time; + m_busBusyCounter_ReadNewRank = m_basic_bus_busy_time; + } +} + +// executeCycle: This function is called once per memory clock cycle +// to simulate all the periodic hardware. +void +RubyMemoryControl::executeCycle() +{ + // Keep track of time by counting down the busy counters: + for (int bank=0; bank < m_total_banks; bank++) { + if (m_bankBusyCounter[bank] > 0) m_bankBusyCounter[bank]--; + } + if (m_busBusyCounter_Write > 0) + m_busBusyCounter_Write--; + if (m_busBusyCounter_ReadNewRank > 0) + m_busBusyCounter_ReadNewRank--; + if (m_busBusyCounter_Basic > 0) + m_busBusyCounter_Basic--; + + // Count down the tFAW shift registers: + for (int rank=0; rank < m_total_ranks; rank++) { + if (m_tfaw_shift[rank] & 1) m_tfaw_count[rank]--; + m_tfaw_shift[rank] >>= 1; + } + + // After time period expires, latch an indication that we need a refresh. + // Disable refresh if in mem_fixed_delay mode. + if (!m_mem_fixed_delay) m_refresh_count--; + if (m_refresh_count == 0) { + m_refresh_count = m_refresh_period_system; + + // Are we overrunning our ability to refresh? + assert(m_need_refresh < 10); + m_need_refresh++; + } + + // If this batch of requests is all done, make a new batch: + m_ageCounter++; + int anyOld = 0; + for (int bank=0; bank < m_total_banks; bank++) { + anyOld |= m_oldRequest[bank]; + } + if (!anyOld) { + for (int bank=0; bank < m_total_banks; bank++) { + if (!m_bankQueues[bank].empty()) m_oldRequest[bank] = 1; + } + m_ageCounter = 0; + } + + // If randomness desired, re-randomize round-robin position each cycle + if (m_mem_random_arbitrate) { + m_roundRobin = random() % m_total_banks; + } + + // For each channel, scan round-robin, and pick an old, ready + // request and issue it. Treat a refresh request as if it were at + // the head of its bank queue. After we issue something, keep + // scanning the queues just to gather statistics about how many + // are waiting. If in mem_fixed_delay mode, we can issue more + // than one request per cycle. + int queueHeads = 0; + int banksIssued = 0; + for (int i = 0; i < m_total_banks; i++) { + m_roundRobin++; + if (m_roundRobin >= m_total_banks) m_roundRobin = 0; + issueRefresh(m_roundRobin); + int qs = m_bankQueues[m_roundRobin].size(); + if (qs > 1) { + m_profiler_ptr->profileMemBankQ(qs-1); + } + if (qs > 0) { + // we're not idle if anything is queued + m_idleCount = IDLECOUNT_MAX_VALUE; + queueHeads++; + if (queueReady(m_roundRobin)) { + issueRequest(m_roundRobin); + banksIssued++; + if (m_mem_fixed_delay) { + m_profiler_ptr->profileMemWaitCycles(m_mem_fixed_delay); + } + } + } + } + + // memWaitCycles is a redundant catch-all for the specific + // counters in queueReady + m_profiler_ptr->profileMemWaitCycles(queueHeads - banksIssued); + + // Check input queue and move anything to bank queues if not full. + // Since this is done here at the end of the cycle, there will + // always be at least one cycle of latency in the bank queue. We + // deliberately move at most one request per cycle (to simulate + // typical hardware). Note that if one bank queue fills up, other + // requests can get stuck behind it here. + if (!m_input_queue.empty()) { + // we're not idle if anything is pending + m_idleCount = IDLECOUNT_MAX_VALUE; + MemoryNode *req = m_input_queue.front(); + int bank = getBank(req->m_addr); + if (m_bankQueues[bank].size() < m_bank_queue_size) { + m_input_queue.pop_front(); + m_bankQueues[bank].push_back(req); + } + m_profiler_ptr->profileMemInputQ(m_input_queue.size()); + } +} + +unsigned int +RubyMemoryControl::drain(DrainManager *dm) +{ + DPRINTF(RubyMemory, "MemoryController drain\n"); + if(m_event.scheduled()) { + deschedule(m_event); + } + return 0; +} + +// wakeup: This function is called once per memory controller clock cycle. +void +RubyMemoryControl::wakeup() +{ + DPRINTF(RubyMemory, "MemoryController wakeup\n"); + // execute everything + executeCycle(); + + m_idleCount--; + if (m_idleCount > 0) { + assert(!m_event.scheduled()); + schedule(m_event, clockEdge(Cycles(1))); + } +} + +/** + * This function reads the different buffers that exist in the Ruby Memory + * Controller, and figures out if any of the buffers hold a message that + * contains the data for the address provided in the packet. True is returned + * if any of the messages was read, otherwise false is returned. + * + * I think we should move these buffers to being message buffers, instead of + * being lists. + */ +bool +RubyMemoryControl::functionalReadBuffers(Packet *pkt) +{ + for (std::list::iterator it = m_input_queue.begin(); + it != m_input_queue.end(); ++it) { + Message* msg_ptr = (*it)->m_msgptr.get(); + if (msg_ptr->functionalRead(pkt)) { + return true; + } + } + + for (std::list::iterator it = m_response_queue.begin(); + it != m_response_queue.end(); ++it) { + Message* msg_ptr = (*it)->m_msgptr.get(); + if (msg_ptr->functionalRead(pkt)) { + return true; + } + } + + for (uint32_t bank = 0; bank < m_total_banks; ++bank) { + for (std::list::iterator it = m_bankQueues[bank].begin(); + it != m_bankQueues[bank].end(); ++it) { + Message* msg_ptr = (*it)->m_msgptr.get(); + if (msg_ptr->functionalRead(pkt)) { + return true; + } + } + } + + return false; +} + +/** + * This function reads the different buffers that exist in the Ruby Memory + * Controller, and figures out if any of the buffers hold a message that + * needs to functionally written with the data in the packet. + * + * The number of messages written is returned at the end. This is required + * for debugging purposes. + */ +uint32_t +RubyMemoryControl::functionalWriteBuffers(Packet *pkt) +{ + uint32_t num_functional_writes = 0; + + for (std::list::iterator it = m_input_queue.begin(); + it != m_input_queue.end(); ++it) { + Message* msg_ptr = (*it)->m_msgptr.get(); + if (msg_ptr->functionalWrite(pkt)) { + num_functional_writes++; + } + } + + for (std::list::iterator it = m_response_queue.begin(); + it != m_response_queue.end(); ++it) { + Message* msg_ptr = (*it)->m_msgptr.get(); + if (msg_ptr->functionalWrite(pkt)) { + num_functional_writes++; + } + } + + for (uint32_t bank = 0; bank < m_total_banks; ++bank) { + for (std::list::iterator it = m_bankQueues[bank].begin(); + it != m_bankQueues[bank].end(); ++it) { + Message* msg_ptr = (*it)->m_msgptr.get(); + if (msg_ptr->functionalWrite(pkt)) { + num_functional_writes++; + } + } + } + + return num_functional_writes; +} + +void +RubyMemoryControl::regStats() +{ + m_profiler_ptr->regStats(); +} + +RubyMemoryControl * +RubyMemoryControlParams::create() +{ + return new RubyMemoryControl(this); +} diff --git a/src/mem/ruby/structures/RubyMemoryControl.hh b/src/mem/ruby/structures/RubyMemoryControl.hh new file mode 100644 index 000000000..f7fb17975 --- /dev/null +++ b/src/mem/ruby/structures/RubyMemoryControl.hh @@ -0,0 +1,172 @@ +/* + * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood + * Copyright (c) 2012 Advanced Micro Devices, Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer; + * redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution; + * neither the name of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef __MEM_RUBY_SYSTEM_MEMORY_CONTROL_HH__ +#define __MEM_RUBY_SYSTEM_MEMORY_CONTROL_HH__ + +#include +#include +#include + +#include "mem/protocol/MemoryMsg.hh" +#include "mem/ruby/common/Address.hh" +#include "mem/ruby/common/Consumer.hh" +#include "mem/ruby/common/Global.hh" +#include "mem/ruby/profiler/MemCntrlProfiler.hh" +#include "mem/ruby/slicc_interface/Message.hh" +#include "mem/ruby/structures/MemoryControl.hh" +#include "mem/ruby/structures/MemoryNode.hh" +#include "mem/ruby/system/System.hh" +#include "params/RubyMemoryControl.hh" +#include "sim/sim_object.hh" + +// This constant is part of the definition of tFAW; see +// the comments in header to RubyMemoryControl.cc +#define ACTIVATE_PER_TFAW 4 + +////////////////////////////////////////////////////////////////////////////// + +class RubyMemoryControl : public MemoryControl +{ + public: + typedef RubyMemoryControlParams Params; + RubyMemoryControl(const Params *p); + void init(); + void reset(); + + ~RubyMemoryControl(); + + unsigned int drain(DrainManager *dm); + + void wakeup(); + + void setConsumer(Consumer* consumer_ptr); + Consumer* getConsumer() { return m_consumer_ptr; }; + void setDescription(const std::string& name) { m_description = name; }; + std::string getDescription() { return m_description; }; + + // Called from the directory: + void enqueue(const MsgPtr& message, Cycles latency); + void enqueueMemRef(MemoryNode *memRef); + void dequeue(); + const Message* peek(); + MemoryNode *peekNode(); + bool isReady(); + bool areNSlotsAvailable(int n) { return true; }; // infinite queue length + + void print(std::ostream& out) const; + void regStats(); + + const int getBank(const physical_address_t addr) const; + const int getRank(const physical_address_t addr) const; + + // not used in Ruby memory controller + const int getChannel(const physical_address_t addr) const; + const int getRow(const physical_address_t addr) const; + + //added by SS + int getBanksPerRank() { return m_banks_per_rank; }; + int getRanksPerDimm() { return m_ranks_per_dimm; }; + int getDimmsPerChannel() { return m_dimms_per_channel; } + + bool functionalReadBuffers(Packet *pkt); + uint32_t functionalWriteBuffers(Packet *pkt); + + private: + void enqueueToDirectory(MemoryNode *req, Cycles latency); + const int getRank(int bank) const; + bool queueReady(int bank); + void issueRequest(int bank); + bool issueRefresh(int bank); + void markTfaw(int rank); + void executeCycle(); + + // Private copy constructor and assignment operator + RubyMemoryControl (const RubyMemoryControl& obj); + RubyMemoryControl& operator=(const RubyMemoryControl& obj); + + // data members + Consumer* m_consumer_ptr; // Consumer to signal a wakeup() + std::string m_description; + int m_msg_counter; + + int m_banks_per_rank; + int m_ranks_per_dimm; + int m_dimms_per_channel; + int m_bank_bit_0; + int m_rank_bit_0; + int m_dimm_bit_0; + unsigned int m_bank_queue_size; + int m_bank_busy_time; + int m_rank_rank_delay; + int m_read_write_delay; + int m_basic_bus_busy_time; + Cycles m_mem_ctl_latency; + int m_refresh_period; + int m_mem_random_arbitrate; + int m_tFaw; + Cycles m_mem_fixed_delay; + + int m_total_banks; + int m_total_ranks; + int m_refresh_period_system; + + // queues where memory requests live + std::list m_response_queue; + std::list m_input_queue; + std::list* m_bankQueues; + + // Each entry indicates number of address-bus cycles until bank + // is reschedulable: + int* m_bankBusyCounter; + int* m_oldRequest; + + uint64* m_tfaw_shift; + int* m_tfaw_count; + + // Each of these indicates number of address-bus cycles until + // we can issue a new request of the corresponding type: + int m_busBusyCounter_Write; + int m_busBusyCounter_ReadNewRank; + int m_busBusyCounter_Basic; + + int m_busBusy_WhichRank; // which rank last granted + int m_roundRobin; // which bank queue was last granted + int m_refresh_count; // cycles until next refresh + int m_need_refresh; // set whenever m_refresh_count goes to zero + int m_refresh_bank; // which bank to refresh next + int m_ageCounter; // age of old requests; to detect starvation + int m_idleCount; // watchdog timer for shutting down + + MemCntrlProfiler* m_profiler_ptr; +}; + +std::ostream& operator<<(std::ostream& out, const RubyMemoryControl& obj); + +#endif // __MEM_RUBY_SYSTEM_MEMORY_CONTROL_HH__ diff --git a/src/mem/ruby/structures/RubyMemoryControl.py b/src/mem/ruby/structures/RubyMemoryControl.py new file mode 100644 index 000000000..f0828fb19 --- /dev/null +++ b/src/mem/ruby/structures/RubyMemoryControl.py @@ -0,0 +1,55 @@ +# Copyright (c) 2009 Advanced Micro Devices, Inc. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer; +# redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution; +# neither the name of the copyright holders nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# +# Authors: Steve Reinhardt +# Brad Beckmann + +from m5.params import * +from m5.SimObject import SimObject +from MemoryControl import MemoryControl + +class RubyMemoryControl(MemoryControl): + type = 'RubyMemoryControl' + cxx_class = 'RubyMemoryControl' + cxx_header = "mem/ruby/structures/RubyMemoryControl.hh" + version = Param.Int(""); + + banks_per_rank = Param.Int(8, ""); + ranks_per_dimm = Param.Int(2, ""); + dimms_per_channel = Param.Int(2, ""); + bank_bit_0 = Param.Int(8, ""); + rank_bit_0 = Param.Int(11, ""); + dimm_bit_0 = Param.Int(12, ""); + bank_queue_size = Param.Int(12, ""); + bank_busy_time = Param.Int(11, ""); + rank_rank_delay = Param.Int(1, ""); + read_write_delay = Param.Int(2, ""); + basic_bus_busy_time = Param.Int(2, ""); + mem_ctl_latency = Param.Cycles(12, ""); + refresh_period = Param.Cycles(1560, ""); + tFaw = Param.Int(0, ""); + mem_random_arbitrate = Param.Int(0, ""); + mem_fixed_delay = Param.Cycles(0, ""); diff --git a/src/mem/ruby/structures/SConscript b/src/mem/ruby/structures/SConscript index 170f61e88..a5abbf449 100644 --- a/src/mem/ruby/structures/SConscript +++ b/src/mem/ruby/structures/SConscript @@ -33,5 +33,21 @@ Import('*') if env['PROTOCOL'] == 'None': Return() +SimObject('Cache.py') +SimObject('DirectoryMemory.py') +SimObject('MemoryControl.py') +SimObject('RubyMemoryControl.py') SimObject('RubyPrefetcher.py') +SimObject('WireBuffer.py') + +Source('DirectoryMemory.cc') +Source('SparseMemory.cc') +Source('CacheMemory.cc') +Source('MemoryControl.cc') +Source('WireBuffer.cc') +Source('RubyMemoryControl.cc') +Source('MemoryNode.cc') +Source('PersistentTable.cc') Source('Prefetcher.cc') +Source('TimerTable.cc') +Source('BankedArray.cc') diff --git a/src/mem/ruby/structures/SparseMemory.cc b/src/mem/ruby/structures/SparseMemory.cc new file mode 100644 index 000000000..a63790502 --- /dev/null +++ b/src/mem/ruby/structures/SparseMemory.cc @@ -0,0 +1,417 @@ +/* + * Copyright (c) 2009 Advanced Micro Devices, Inc. + * Copyright (c) 2012 Mark D. Hill and David A. Wood + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer; + * redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution; + * neither the name of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include + +#include "debug/RubyCache.hh" +#include "mem/ruby/structures/SparseMemory.hh" +#include "mem/ruby/system/System.hh" + +using namespace std; + +SparseMemory::SparseMemory(int number_of_levels) +{ + int even_level_bits; + int extra; + m_total_number_of_bits = RubySystem::getMemorySizeBits() + - RubySystem::getBlockSizeBits();; + + m_number_of_levels = number_of_levels; + + // + // Create the array that describes the bits per level + // + m_number_of_bits_per_level = new int[m_number_of_levels]; + even_level_bits = m_total_number_of_bits / m_number_of_levels; + extra = m_total_number_of_bits % m_number_of_levels; + for (int level = 0; level < m_number_of_levels; level++) { + if (level < extra) + m_number_of_bits_per_level[level] = even_level_bits + 1; + else + m_number_of_bits_per_level[level] = even_level_bits; + } + m_map_head = new SparseMapType; +} + +SparseMemory::~SparseMemory() +{ + recursivelyRemoveTables(m_map_head, 0); + delete m_map_head; + delete [] m_number_of_bits_per_level; +} + +// Recursively search table hierarchy for the lowest level table. +// Delete the lowest table first, the tables above +void +SparseMemory::recursivelyRemoveTables(SparseMapType* curTable, int curLevel) +{ + SparseMapType::iterator iter; + + for (iter = curTable->begin(); iter != curTable->end(); iter++) { + SparseMemEntry entry = (*iter).second; + + if (curLevel != (m_number_of_levels - 1)) { + // If the not at the last level, analyze those lower level + // tables first, then delete those next tables + SparseMapType* nextTable = (SparseMapType*)(entry); + recursivelyRemoveTables(nextTable, (curLevel + 1)); + delete nextTable; + } else { + // If at the last level, delete the directory entry + delete (AbstractEntry*)(entry); + } + entry = NULL; + } + + // Once all entries have been deleted, erase the entries + curTable->erase(curTable->begin(), curTable->end()); +} + +// tests to see if an address is present in the memory +bool +SparseMemory::exist(const Address& address) const +{ + SparseMapType* curTable = m_map_head; + Address curAddress; + + // Initiallize the high bit to be the total number of bits plus + // the block offset. However the highest bit index is one less + // than this value. + int highBit = m_total_number_of_bits + RubySystem::getBlockSizeBits(); + int lowBit; + assert(address == line_address(address)); + DPRINTF(RubyCache, "address: %s\n", address); + + for (int level = 0; level < m_number_of_levels; level++) { + // Create the appropriate sub address for this level + // Note: that set Address is inclusive of the specified range, + // thus the high bit is one less than the total number of bits + // used to create the address. + lowBit = highBit - m_number_of_bits_per_level[level]; + curAddress.setAddress(address.bitSelect(lowBit, highBit - 1)); + + DPRINTF(RubyCache, "level: %d, lowBit: %d, highBit - 1: %d, " + "curAddress: %s\n", + level, lowBit, highBit - 1, curAddress); + + // Adjust the highBit value for the next level + highBit -= m_number_of_bits_per_level[level]; + + // If the address is found, move on to the next level. + // Otherwise, return not found + if (curTable->count(curAddress) != 0) { + curTable = (SparseMapType*)((*curTable)[curAddress]); + } else { + DPRINTF(RubyCache, "Not found\n"); + return false; + } + } + + DPRINTF(RubyCache, "Entry found\n"); + return true; +} + +// add an address to memory +void +SparseMemory::add(const Address& address, AbstractEntry* entry) +{ + assert(address == line_address(address)); + assert(!exist(address)); + + m_total_adds++; + + Address curAddress; + SparseMapType* curTable = m_map_head; + + // Initiallize the high bit to be the total number of bits plus + // the block offset. However the highest bit index is one less + // than this value. + int highBit = m_total_number_of_bits + RubySystem::getBlockSizeBits(); + int lowBit; + void* newEntry = NULL; + + for (int level = 0; level < m_number_of_levels; level++) { + // create the appropriate address for this level + // Note: that set Address is inclusive of the specified range, + // thus the high bit is one less than the total number of bits + // used to create the address. + lowBit = highBit - m_number_of_bits_per_level[level]; + curAddress.setAddress(address.bitSelect(lowBit, highBit - 1)); + + // Adjust the highBit value for the next level + highBit -= m_number_of_bits_per_level[level]; + + // if the address exists in the cur table, move on. Otherwise + // create a new table. + if (curTable->count(curAddress) != 0) { + curTable = (SparseMapType*)((*curTable)[curAddress]); + } else { + m_adds_per_level[level]++; + + // if the last level, add a directory entry. Otherwise add a map. + if (level == (m_number_of_levels - 1)) { + entry->getDataBlk().clear(); + newEntry = (void*)entry; + } else { + SparseMapType* tempMap = new SparseMapType; + newEntry = (void*)(tempMap); + } + + // Create the pointer container SparseMemEntry and add it + // to the table. + (*curTable)[curAddress] = newEntry; + + // Move to the next level of the heirarchy + curTable = (SparseMapType*)newEntry; + } + } + + assert(exist(address)); + return; +} + +// recursively search table hierarchy for the lowest level table. +// remove the lowest entry and any empty tables above it. +int +SparseMemory::recursivelyRemoveLevels(const Address& address, + CurNextInfo& curInfo) +{ + Address curAddress; + CurNextInfo nextInfo; + SparseMemEntry entry; + + // create the appropriate address for this level + // Note: that set Address is inclusive of the specified range, + // thus the high bit is one less than the total number of bits + // used to create the address. + curAddress.setAddress(address.bitSelect(curInfo.lowBit, + curInfo.highBit - 1)); + + DPRINTF(RubyCache, "address: %s, curInfo.level: %d, curInfo.lowBit: %d, " + "curInfo.highBit - 1: %d, curAddress: %s\n", + address, curInfo.level, curInfo.lowBit, + curInfo.highBit - 1, curAddress); + + assert(curInfo.curTable->count(curAddress) != 0); + + entry = (*(curInfo.curTable))[curAddress]; + + if (curInfo.level < (m_number_of_levels - 1)) { + // set up next level's info + nextInfo.curTable = (SparseMapType*)(entry); + nextInfo.level = curInfo.level + 1; + + nextInfo.highBit = curInfo.highBit - + m_number_of_bits_per_level[curInfo.level]; + + nextInfo.lowBit = curInfo.lowBit - + m_number_of_bits_per_level[curInfo.level + 1]; + + // recursively search the table hierarchy + int tableSize = recursivelyRemoveLevels(address, nextInfo); + + // If this table below is now empty, we must delete it and + // erase it from our table. + if (tableSize == 0) { + m_removes_per_level[curInfo.level]++; + delete nextInfo.curTable; + entry = NULL; + curInfo.curTable->erase(curAddress); + } + } else { + // if this is the last level, we have reached the Directory + // Entry and thus we should delete it including the + // SparseMemEntry container struct. + delete (AbstractEntry*)(entry); + entry = NULL; + curInfo.curTable->erase(curAddress); + m_removes_per_level[curInfo.level]++; + } + return curInfo.curTable->size(); +} + +// remove an entry from the table +void +SparseMemory::remove(const Address& address) +{ + assert(address == line_address(address)); + assert(exist(address)); + + m_total_removes++; + + CurNextInfo nextInfo; + + // Initialize table pointer and level value + nextInfo.curTable = m_map_head; + nextInfo.level = 0; + + // Initiallize the high bit to be the total number of bits plus + // the block offset. However the highest bit index is one less + // than this value. + nextInfo.highBit = m_total_number_of_bits + RubySystem::getBlockSizeBits(); + nextInfo.lowBit = nextInfo.highBit - m_number_of_bits_per_level[0];; + + // recursively search the table hierarchy for empty tables + // starting from the level 0. Note we do not check the return + // value because the head table is never deleted; + recursivelyRemoveLevels(address, nextInfo); + + assert(!exist(address)); + return; +} + +// looks an address up in memory +AbstractEntry* +SparseMemory::lookup(const Address& address) +{ + assert(address == line_address(address)); + + Address curAddress; + SparseMapType* curTable = m_map_head; + AbstractEntry* entry = NULL; + + // Initiallize the high bit to be the total number of bits plus + // the block offset. However the highest bit index is one less + // than this value. + int highBit = m_total_number_of_bits + RubySystem::getBlockSizeBits(); + int lowBit; + + for (int level = 0; level < m_number_of_levels; level++) { + // create the appropriate address for this level + // Note: that set Address is inclusive of the specified range, + // thus the high bit is one less than the total number of bits + // used to create the address. + lowBit = highBit - m_number_of_bits_per_level[level]; + curAddress.setAddress(address.bitSelect(lowBit, highBit - 1)); + + DPRINTF(RubyCache, "level: %d, lowBit: %d, highBit - 1: %d, " + "curAddress: %s\n", + level, lowBit, highBit - 1, curAddress); + + // Adjust the highBit value for the next level + highBit -= m_number_of_bits_per_level[level]; + + // If the address is found, move on to the next level. + // Otherwise, return not found + if (curTable->count(curAddress) != 0) { + curTable = (SparseMapType*)((*curTable)[curAddress]); + } else { + DPRINTF(RubyCache, "Not found\n"); + return NULL; + } + } + + // The last entry actually points to the Directory entry not a table + entry = (AbstractEntry*)curTable; + + return entry; +} + +void +SparseMemory::recordBlocks(int cntrl_id, CacheRecorder* tr) const +{ + queue unexplored_nodes[2]; + queue address_of_nodes[2]; + + unexplored_nodes[0].push(m_map_head); + address_of_nodes[0].push(0); + + int parity_of_level = 0; + physical_address_t address, temp_address; + Address curAddress; + + // Initiallize the high bit to be the total number of bits plus + // the block offset. However the highest bit index is one less + // than this value. + int highBit = m_total_number_of_bits + RubySystem::getBlockSizeBits(); + int lowBit; + + for (int cur_level = 0; cur_level < m_number_of_levels; cur_level++) { + + // create the appropriate address for this level + // Note: that set Address is inclusive of the specified range, + // thus the high bit is one less than the total number of bits + // used to create the address. + lowBit = highBit - m_number_of_bits_per_level[cur_level]; + + while (!unexplored_nodes[parity_of_level].empty()) { + + SparseMapType* node = unexplored_nodes[parity_of_level].front(); + unexplored_nodes[parity_of_level].pop(); + + address = address_of_nodes[parity_of_level].front(); + address_of_nodes[parity_of_level].pop(); + + SparseMapType::iterator iter; + + for (iter = node->begin(); iter != node->end(); iter++) { + SparseMemEntry entry = (*iter).second; + curAddress = (*iter).first; + + if (cur_level != (m_number_of_levels - 1)) { + // If not at the last level, put this node in the queue + unexplored_nodes[1 - parity_of_level].push( + (SparseMapType*)(entry)); + address_of_nodes[1 - parity_of_level].push(address | + (curAddress.getAddress() << lowBit)); + } else { + // If at the last level, add a trace record + temp_address = address | (curAddress.getAddress() + << lowBit); + DataBlock block = ((AbstractEntry*)entry)->getDataBlk(); + tr->addRecord(cntrl_id, temp_address, 0, RubyRequestType_ST, 0, + block); + } + } + } + + // Adjust the highBit value for the next level + highBit -= m_number_of_bits_per_level[cur_level]; + parity_of_level = 1 - parity_of_level; + } +} + +void +SparseMemory::regStats(const string &name) +{ + m_total_adds.name(name + ".total_adds"); + + m_adds_per_level + .init(m_number_of_levels) + .name(name + ".adds_per_level") + .flags(Stats::pdf | Stats::total) + ; + + m_total_removes.name(name + ".total_removes"); + m_removes_per_level + .init(m_number_of_levels) + .name(name + ".removes_per_level") + .flags(Stats::pdf | Stats::total) + ; +} diff --git a/src/mem/ruby/structures/SparseMemory.hh b/src/mem/ruby/structures/SparseMemory.hh new file mode 100644 index 000000000..9d3c6a844 --- /dev/null +++ b/src/mem/ruby/structures/SparseMemory.hh @@ -0,0 +1,98 @@ +/* + * Copyright (c) 2009 Advanced Micro Devices, Inc. + * Copyright (c) 2012 Mark D. Hill and David A. Wood + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer; + * redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution; + * neither the name of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef __MEM_RUBY_SYSTEM_SPARSEMEMORY_HH__ +#define __MEM_RUBY_SYSTEM_SPARSEMEMORY_HH__ + +#include +#include + +#include "base/hashmap.hh" +#include "base/statistics.hh" +#include "mem/ruby/common/Address.hh" +#include "mem/ruby/slicc_interface/AbstractEntry.hh" +#include "mem/ruby/system/CacheRecorder.hh" + +typedef void* SparseMemEntry; +typedef m5::hash_map SparseMapType; + +struct CurNextInfo +{ + SparseMapType* curTable; + int level; + int highBit; + int lowBit; +}; + +class SparseMemory +{ + public: + SparseMemory(int number_of_levels); + ~SparseMemory(); + + bool exist(const Address& address) const; + void add(const Address& address, AbstractEntry*); + void remove(const Address& address); + + /*! + * Function for recording the contents of memory. This function walks + * through all the levels of the sparse memory in a breadth first + * fashion. This might need more memory than a depth first approach. + * But breadth first seems easier to me than a depth first approach. + */ + void recordBlocks(int cntrl_id, CacheRecorder *) const; + + AbstractEntry* lookup(const Address& address); + void regStats(const std::string &name); + + private: + // Private copy constructor and assignment operator + SparseMemory(const SparseMemory& obj); + SparseMemory& operator=(const SparseMemory& obj); + + // Used by destructor to recursively remove all tables + void recursivelyRemoveTables(SparseMapType* currentTable, int level); + + // recursive search for address and remove associated entries + int recursivelyRemoveLevels(const Address& address, CurNextInfo& curInfo); + + // Data Members (m_prefix) + SparseMapType* m_map_head; + + int m_total_number_of_bits; + int m_number_of_levels; + int* m_number_of_bits_per_level; + + Stats::Scalar m_total_adds; + Stats::Vector m_adds_per_level; + Stats::Scalar m_total_removes; + Stats::Vector m_removes_per_level; +}; + +#endif // __MEM_RUBY_SYSTEM_SPARSEMEMORY_HH__ diff --git a/src/mem/ruby/structures/TBETable.hh b/src/mem/ruby/structures/TBETable.hh new file mode 100644 index 000000000..018da6cbb --- /dev/null +++ b/src/mem/ruby/structures/TBETable.hh @@ -0,0 +1,124 @@ +/* + * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer; + * redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution; + * neither the name of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef __MEM_RUBY_SYSTEM_TBETABLE_HH__ +#define __MEM_RUBY_SYSTEM_TBETABLE_HH__ + +#include + +#include "base/hashmap.hh" +#include "mem/ruby/common/Address.hh" + +template +class TBETable +{ + public: + TBETable(int number_of_TBEs) + : m_number_of_TBEs(number_of_TBEs) + { + } + + bool isPresent(const Address& address) const; + void allocate(const Address& address); + void deallocate(const Address& address); + bool + areNSlotsAvailable(int n) const + { + return (m_number_of_TBEs - m_map.size()) >= n; + } + + ENTRY* lookup(const Address& address); + + // Print cache contents + void print(std::ostream& out) const; + + private: + // Private copy constructor and assignment operator + TBETable(const TBETable& obj); + TBETable& operator=(const TBETable& obj); + + // Data Members (m_prefix) + m5::hash_map m_map; + + private: + int m_number_of_TBEs; +}; + +template +inline std::ostream& +operator<<(std::ostream& out, const TBETable& obj) +{ + obj.print(out); + out << std::flush; + return out; +} + +template +inline bool +TBETable::isPresent(const Address& address) const +{ + assert(address == line_address(address)); + assert(m_map.size() <= m_number_of_TBEs); + return !!m_map.count(address); +} + +template +inline void +TBETable::allocate(const Address& address) +{ + assert(!isPresent(address)); + assert(m_map.size() < m_number_of_TBEs); + m_map[address] = ENTRY(); +} + +template +inline void +TBETable::deallocate(const Address& address) +{ + assert(isPresent(address)); + assert(m_map.size() > 0); + m_map.erase(address); +} + +// looks an address up in the cache +template +inline ENTRY* +TBETable::lookup(const Address& address) +{ + if(m_map.find(address) != m_map.end()) return &(m_map.find(address)->second); + return NULL; +} + + +template +inline void +TBETable::print(std::ostream& out) const +{ +} + +#endif // __MEM_RUBY_SYSTEM_TBETABLE_HH__ diff --git a/src/mem/ruby/structures/TimerTable.cc b/src/mem/ruby/structures/TimerTable.cc new file mode 100644 index 000000000..84c096b05 --- /dev/null +++ b/src/mem/ruby/structures/TimerTable.cc @@ -0,0 +1,129 @@ +/* + * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer; + * redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution; + * neither the name of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "mem/ruby/common/Global.hh" +#include "mem/ruby/structures/TimerTable.hh" +#include "mem/ruby/system/System.hh" + +TimerTable::TimerTable() + : m_next_time(0) +{ + m_consumer_ptr = NULL; + m_clockobj_ptr = NULL; + + m_next_valid = false; + m_next_address = Address(0); +} + +bool +TimerTable::isReady() const +{ + if (m_map.empty()) + return false; + + if (!m_next_valid) { + updateNext(); + } + assert(m_next_valid); + return (m_clockobj_ptr->curCycle() >= m_next_time); +} + +const Address& +TimerTable::readyAddress() const +{ + assert(isReady()); + + if (!m_next_valid) { + updateNext(); + } + assert(m_next_valid); + return m_next_address; +} + +void +TimerTable::set(const Address& address, Cycles relative_latency) +{ + assert(address == line_address(address)); + assert(relative_latency > 0); + assert(!m_map.count(address)); + + Cycles ready_time = m_clockobj_ptr->curCycle() + relative_latency; + m_map[address] = ready_time; + assert(m_consumer_ptr != NULL); + m_consumer_ptr-> + scheduleEventAbsolute(m_clockobj_ptr->clockPeriod() * ready_time); + m_next_valid = false; + + // Don't always recalculate the next ready address + if (ready_time <= m_next_time) { + m_next_valid = false; + } +} + +void +TimerTable::unset(const Address& address) +{ + assert(address == line_address(address)); + assert(m_map.count(address)); + m_map.erase(address); + + // Don't always recalculate the next ready address + if (address == m_next_address) { + m_next_valid = false; + } +} + +void +TimerTable::print(std::ostream& out) const +{ +} + +void +TimerTable::updateNext() const +{ + if (m_map.empty()) { + assert(!m_next_valid); + return; + } + + AddressMap::const_iterator i = m_map.begin(); + AddressMap::const_iterator end = m_map.end(); + + m_next_address = i->first; + m_next_time = i->second; + ++i; + + for (; i != end; ++i) { + if (i->second < m_next_time) { + m_next_address = i->first; + m_next_time = i->second; + } + } + + m_next_valid = true; +} diff --git a/src/mem/ruby/structures/TimerTable.hh b/src/mem/ruby/structures/TimerTable.hh new file mode 100644 index 000000000..b271d3e37 --- /dev/null +++ b/src/mem/ruby/structures/TimerTable.hh @@ -0,0 +1,107 @@ +/* + * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer; + * redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution; + * neither the name of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef __MEM_RUBY_SYSTEM_TIMERTABLE_HH__ +#define __MEM_RUBY_SYSTEM_TIMERTABLE_HH__ + +#include +#include +#include +#include + +#include "mem/ruby/common/Address.hh" +#include "mem/ruby/common/Consumer.hh" + +class TimerTable +{ + public: + TimerTable(); + + void + setConsumer(Consumer* consumer_ptr) + { + assert(m_consumer_ptr == NULL); + m_consumer_ptr = consumer_ptr; + } + + void setClockObj(ClockedObject* obj) + { + assert(m_clockobj_ptr == NULL); + m_clockobj_ptr = obj; + } + + void + setDescription(const std::string& name) + { + m_name = name; + } + + bool isReady() const; + const Address& readyAddress() const; + bool isSet(const Address& address) const { return !!m_map.count(address); } + void set(const Address& address, Cycles relative_latency); + void set(const Address& address, uint64_t relative_latency) + { set(address, Cycles(relative_latency)); } + + void unset(const Address& address); + void print(std::ostream& out) const; + + private: + void updateNext() const; + + // Private copy constructor and assignment operator + TimerTable(const TimerTable& obj); + TimerTable& operator=(const TimerTable& obj); + + // Data Members (m_prefix) + + // use a std::map for the address map as this container is sorted + // and ensures a well-defined iteration order + typedef std::map AddressMap; + AddressMap m_map; + mutable bool m_next_valid; + mutable Cycles m_next_time; // Only valid if m_next_valid is true + mutable Address m_next_address; // Only valid if m_next_valid is true + + //! Object used for querying time. + ClockedObject* m_clockobj_ptr; + //! Consumer to signal a wakeup() + Consumer* m_consumer_ptr; + + std::string m_name; +}; + +inline std::ostream& +operator<<(std::ostream& out, const TimerTable& obj) +{ + obj.print(out); + out << std::flush; + return out; +} + +#endif // __MEM_RUBY_SYSTEM_TIMERTABLE_HH__ diff --git a/src/mem/ruby/structures/WireBuffer.cc b/src/mem/ruby/structures/WireBuffer.cc new file mode 100644 index 000000000..702a53f16 --- /dev/null +++ b/src/mem/ruby/structures/WireBuffer.cc @@ -0,0 +1,158 @@ +/* + * Copyright (c) 2010 Advanced Micro Devices, Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer; + * redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution; + * neither the name of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * Author: Lisa Hsu + * + */ + +#include +#include + +#include "base/cprintf.hh" +#include "base/stl_helpers.hh" +#include "mem/ruby/common/Global.hh" +#include "mem/ruby/structures/WireBuffer.hh" +#include "mem/ruby/system/System.hh" + +using namespace std; + +// Output operator definition + +ostream& +operator<<(ostream& out, const WireBuffer& obj) +{ + obj.print(out); + out << flush; + return out; +} + + +// **************************************************************** + +// CONSTRUCTOR +WireBuffer::WireBuffer(const Params *p) + : SimObject(p) +{ + m_msg_counter = 0; +} + +void +WireBuffer::init() +{ +} + +WireBuffer::~WireBuffer() +{ +} + +void +WireBuffer::enqueue(MsgPtr message, Cycles latency) +{ + m_msg_counter++; + Cycles current_time = g_system_ptr->curCycle(); + Cycles arrival_time = current_time + latency; + assert(arrival_time > current_time); + + MessageBufferNode thisNode(arrival_time, m_msg_counter, message); + m_message_queue.push_back(thisNode); + if (m_consumer_ptr != NULL) { + m_consumer_ptr-> + scheduleEventAbsolute(g_system_ptr->clockPeriod() * arrival_time); + } else { + panic("No Consumer for WireBuffer! %s\n", *this); + } +} + +void +WireBuffer::dequeue() +{ + assert(isReady()); + pop_heap(m_message_queue.begin(), m_message_queue.end(), + greater()); + m_message_queue.pop_back(); +} + +const Message* +WireBuffer::peek() +{ + MessageBufferNode node = peekNode(); + Message* msg_ptr = node.m_msgptr.get(); + assert(msg_ptr != NULL); + return msg_ptr; +} + +MessageBufferNode +WireBuffer::peekNode() +{ + assert(isReady()); + MessageBufferNode req = m_message_queue.front(); + return req; +} + +void +WireBuffer::recycle() +{ + // Because you don't want anything reordered, make sure the recycle latency + // is just 1 cycle. As a result, you really want to use this only in + // Wire-like situations because you don't want to deadlock as a result of + // being stuck behind something if you're not actually supposed to. + assert(isReady()); + MessageBufferNode node = m_message_queue.front(); + pop_heap(m_message_queue.begin(), m_message_queue.end(), + greater()); + + node.m_time = g_system_ptr->curCycle() + Cycles(1); + m_message_queue.back() = node; + push_heap(m_message_queue.begin(), m_message_queue.end(), + greater()); + m_consumer_ptr-> + scheduleEventAbsolute(g_system_ptr->clockPeriod() * node.m_time); +} + +bool +WireBuffer::isReady() +{ + return ((!m_message_queue.empty()) && + (m_message_queue.front().m_time <= g_system_ptr->curCycle())); +} + +void +WireBuffer::print(ostream& out) const +{ +} + +void +WireBuffer::wakeup() +{ +} + +WireBuffer * +RubyWireBufferParams::create() +{ + return new WireBuffer(this); +} + diff --git a/src/mem/ruby/structures/WireBuffer.hh b/src/mem/ruby/structures/WireBuffer.hh new file mode 100644 index 000000000..6dee01ae0 --- /dev/null +++ b/src/mem/ruby/structures/WireBuffer.hh @@ -0,0 +1,102 @@ +/* + * Copyright (c) 2010 Advanced Micro Devices, Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer; + * redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution; + * neither the name of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * Author: Lisa Hsu + * + */ + +#ifndef __MEM_RUBY_SYSTEM_WIREBUFFER_HH__ +#define __MEM_RUBY_SYSTEM_WIREBUFFER_HH__ + +#include +#include +#include + +#include "mem/ruby/common/Consumer.hh" +#include "mem/ruby/network/MessageBufferNode.hh" +#include "params/RubyWireBuffer.hh" +#include "sim/sim_object.hh" + +////////////////////////////////////////////////////////////////////////////// +// This object was written to literally mimic a Wire in Ruby, in the sense +// that there is no way for messages to get reordered en route on the WireBuffer. +// With Message Buffers, even if randomization is off and ordered is on, +// messages can arrive in different orders than they were sent because of +// network issues. This mimics a Wire, such that that is not possible. This can +// allow for messages between closely coupled controllers that are not actually +// separated by a network in real systems to simplify coherence. +///////////////////////////////////////////////////////////////////////////// + +class Message; + +class WireBuffer : public SimObject +{ + public: + typedef RubyWireBufferParams Params; + WireBuffer(const Params *p); + void init(); + + ~WireBuffer(); + + void wakeup(); + + void setConsumer(Consumer* consumer_ptr) + { + m_consumer_ptr = consumer_ptr; + } + Consumer* getConsumer() { return m_consumer_ptr; }; + void setDescription(const std::string& name) { m_description = name; }; + std::string getDescription() { return m_description; }; + + void enqueue(MsgPtr message, Cycles latency); + void dequeue(); + const Message* peek(); + MessageBufferNode peekNode(); + void recycle(); + bool isReady(); + bool areNSlotsAvailable(int n) { return true; }; // infinite queue length + + void print(std::ostream& out) const; + uint64_t m_msg_counter; + + private: + // Private copy constructor and assignment operator + WireBuffer (const WireBuffer& obj); + WireBuffer& operator=(const WireBuffer& obj); + + // data members + Consumer* m_consumer_ptr; // Consumer to signal a wakeup() + std::string m_description; + + // queues where memory requests live + std::vector m_message_queue; + +}; + +std::ostream& operator<<(std::ostream& out, const WireBuffer& obj); + +#endif // __MEM_RUBY_SYSTEM_WireBuffer_HH__ diff --git a/src/mem/ruby/structures/WireBuffer.py b/src/mem/ruby/structures/WireBuffer.py new file mode 100644 index 000000000..441947adf --- /dev/null +++ b/src/mem/ruby/structures/WireBuffer.py @@ -0,0 +1,35 @@ +# Copyright (c) 2010 Advanced Micro Devices, Inc. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer; +# redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution; +# neither the name of the copyright holders nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# +# Author: Lisa Hsu + +from m5.params import * +from m5.SimObject import SimObject + +class RubyWireBuffer(SimObject): + type = 'RubyWireBuffer' + cxx_class = 'WireBuffer' + cxx_header = "mem/ruby/structures/WireBuffer.hh" diff --git a/src/mem/ruby/system/AbstractReplacementPolicy.hh b/src/mem/ruby/system/AbstractReplacementPolicy.hh deleted file mode 100644 index 3c492377e..000000000 --- a/src/mem/ruby/system/AbstractReplacementPolicy.hh +++ /dev/null @@ -1,89 +0,0 @@ -/* - * Copyright (c) 2007 Mark D. Hill and David A. Wood - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer; - * redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution; - * neither the name of the copyright holders nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#ifndef __MEM_RUBY_SYSTEM_ABSTRACTREPLACEMENTPOLICY_HH__ -#define __MEM_RUBY_SYSTEM_ABSTRACTREPLACEMENTPOLICY_HH__ - -#include "base/types.hh" - -class AbstractReplacementPolicy -{ - public: - AbstractReplacementPolicy(Index num_sets, Index assoc); - virtual ~AbstractReplacementPolicy(); - - /* touch a block. a.k.a. update timestamp */ - virtual void touch(Index set, Index way, Tick time) = 0; - - /* returns the way to replace */ - virtual Index getVictim(Index set) const = 0; - - /* get the time of the last access */ - Tick getLastAccess(Index set, Index way); - - protected: - unsigned m_num_sets; /** total number of sets */ - unsigned m_assoc; /** set associativity */ - Tick **m_last_ref_ptr; /** timestamp of last reference */ -}; - -inline -AbstractReplacementPolicy::AbstractReplacementPolicy(Index num_sets, - Index assoc) -{ - m_num_sets = num_sets; - m_assoc = assoc; - m_last_ref_ptr = new Tick*[m_num_sets]; - for(unsigned i = 0; i < m_num_sets; i++){ - m_last_ref_ptr[i] = new Tick[m_assoc]; - for(unsigned j = 0; j < m_assoc; j++){ - m_last_ref_ptr[i][j] = 0; - } - } -} - -inline -AbstractReplacementPolicy::~AbstractReplacementPolicy() -{ - if (m_last_ref_ptr != NULL){ - for (unsigned i = 0; i < m_num_sets; i++){ - if (m_last_ref_ptr[i] != NULL){ - delete[] m_last_ref_ptr[i]; - } - } - delete[] m_last_ref_ptr; - } -} - -inline Tick -AbstractReplacementPolicy::getLastAccess(Index set, Index way) -{ - return m_last_ref_ptr[set][way]; -} - -#endif // __MEM_RUBY_SYSTEM_ABSTRACTREPLACEMENTPOLICY_HH__ diff --git a/src/mem/ruby/system/BankedArray.cc b/src/mem/ruby/system/BankedArray.cc deleted file mode 100644 index df7852a0e..000000000 --- a/src/mem/ruby/system/BankedArray.cc +++ /dev/null @@ -1,85 +0,0 @@ -/* - * Copyright (c) 2012 Advanced Micro Devices, Inc. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer; - * redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution; - * neither the name of the copyright holders nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - * Author: Brad Beckmann - * - */ - -#include "base/intmath.hh" -#include "mem/ruby/system/BankedArray.hh" -#include "mem/ruby/system/System.hh" - -BankedArray::BankedArray(unsigned int banks, Cycles accessLatency, - unsigned int startIndexBit) -{ - this->banks = banks; - this->accessLatency = accessLatency; - this->startIndexBit = startIndexBit; - - if (banks != 0) { - bankBits = floorLog2(banks); - } - - busyBanks.resize(banks); -} - -bool -BankedArray::tryAccess(Index idx) -{ - if (accessLatency == 0) - return true; - - unsigned int bank = mapIndexToBank(idx); - assert(bank < banks); - - if (busyBanks[bank].endAccess >= curTick()) { - if (!(busyBanks[bank].startAccess == curTick() && - busyBanks[bank].idx == idx)) { - return false; - } else { - // We tried to allocate resources twice - // in the same cycle for the same addr - return true; - } - } - - busyBanks[bank].idx = idx; - busyBanks[bank].startAccess = curTick(); - busyBanks[bank].endAccess = curTick() + - (accessLatency-1) * g_system_ptr->clockPeriod(); - - return true; -} - -unsigned int -BankedArray::mapIndexToBank(Index idx) -{ - if (banks == 1) { - return 0; - } - return idx % banks; -} diff --git a/src/mem/ruby/system/BankedArray.hh b/src/mem/ruby/system/BankedArray.hh deleted file mode 100644 index 89007befa..000000000 --- a/src/mem/ruby/system/BankedArray.hh +++ /dev/null @@ -1,72 +0,0 @@ -/* - * Copyright (c) 2012 Advanced Micro Devices, Inc. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer; - * redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution; - * neither the name of the copyright holders nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - * Author: Brad Beckmann - * - */ - -#ifndef __MEM_RUBY_SYSTEM_BANKEDARRAY_HH__ -#define __MEM_RUBY_SYSTEM_BANKEDARRAY_HH__ - -#include - -#include "mem/ruby/common/TypeDefines.hh" -#include "sim/core.hh" - -class BankedArray -{ - private: - unsigned int banks; - Cycles accessLatency; - unsigned int bankBits; - unsigned int startIndexBit; - - class AccessRecord - { - public: - AccessRecord() : idx(0), startAccess(0), endAccess(0) {} - Index idx; - Tick startAccess; - Tick endAccess; - }; - - // If the tick event is scheduled then the bank is busy - // otherwise, schedule the event and wait for it to complete - std::vector busyBanks; - - unsigned int mapIndexToBank(Index idx); - - public: - BankedArray(unsigned int banks, Cycles accessLatency, unsigned int startIndexBit); - - // Note: We try the access based on the cache index, not the address - // This is so we don't get aliasing on blocks being replaced - bool tryAccess(Index idx); - -}; - -#endif diff --git a/src/mem/ruby/system/Cache.py b/src/mem/ruby/system/Cache.py deleted file mode 100644 index d4af1320a..000000000 --- a/src/mem/ruby/system/Cache.py +++ /dev/null @@ -1,49 +0,0 @@ -# Copyright (c) 2009 Advanced Micro Devices, Inc. -# All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer; -# redistributions in binary form must reproduce the above copyright -# notice, this list of conditions and the following disclaimer in the -# documentation and/or other materials provided with the distribution; -# neither the name of the copyright holders nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -# -# Authors: Steve Reinhardt -# Brad Beckmann - -from m5.params import * -from m5.SimObject import SimObject -from Controller import RubyController - -class RubyCache(SimObject): - type = 'RubyCache' - cxx_class = 'CacheMemory' - cxx_header = "mem/ruby/system/CacheMemory.hh" - size = Param.MemorySize("capacity in bytes"); - latency = Param.Cycles(""); - assoc = Param.Int(""); - replacement_policy = Param.String("PSEUDO_LRU", ""); - start_index_bit = Param.Int(6, "index start, default 6 for 64-byte line"); - is_icache = Param.Bool(False, "is instruction only cache"); - - dataArrayBanks = Param.Int(1, "Number of banks for the data array") - tagArrayBanks = Param.Int(1, "Number of banks for the tag array") - dataAccessLatency = Param.Cycles(1, "cycles for a data array access") - tagAccessLatency = Param.Cycles(1, "cycles for a tag array access") - resourceStalls = Param.Bool(False, "stall if there is a resource failure") diff --git a/src/mem/ruby/system/CacheMemory.cc b/src/mem/ruby/system/CacheMemory.cc deleted file mode 100644 index 2ea6942ff..000000000 --- a/src/mem/ruby/system/CacheMemory.cc +++ /dev/null @@ -1,565 +0,0 @@ -/* - * Copyright (c) 1999-2012 Mark D. Hill and David A. Wood - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer; - * redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution; - * neither the name of the copyright holders nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#include "base/intmath.hh" -#include "debug/RubyCache.hh" -#include "debug/RubyCacheTrace.hh" -#include "debug/RubyResourceStalls.hh" -#include "debug/RubyStats.hh" -#include "mem/protocol/AccessPermission.hh" -#include "mem/ruby/system/CacheMemory.hh" -#include "mem/ruby/system/System.hh" - -using namespace std; - -ostream& -operator<<(ostream& out, const CacheMemory& obj) -{ - obj.print(out); - out << flush; - return out; -} - -CacheMemory * -RubyCacheParams::create() -{ - return new CacheMemory(this); -} - -CacheMemory::CacheMemory(const Params *p) - : SimObject(p), - dataArray(p->dataArrayBanks, p->dataAccessLatency, p->start_index_bit), - tagArray(p->tagArrayBanks, p->tagAccessLatency, p->start_index_bit) -{ - m_cache_size = p->size; - m_latency = p->latency; - m_cache_assoc = p->assoc; - m_policy = p->replacement_policy; - m_start_index_bit = p->start_index_bit; - m_is_instruction_only_cache = p->is_icache; - m_resource_stalls = p->resourceStalls; -} - -void -CacheMemory::init() -{ - m_cache_num_sets = (m_cache_size / m_cache_assoc) / - RubySystem::getBlockSizeBytes(); - assert(m_cache_num_sets > 1); - m_cache_num_set_bits = floorLog2(m_cache_num_sets); - assert(m_cache_num_set_bits > 0); - - if (m_policy == "PSEUDO_LRU") - m_replacementPolicy_ptr = - new PseudoLRUPolicy(m_cache_num_sets, m_cache_assoc); - else if (m_policy == "LRU") - m_replacementPolicy_ptr = - new LRUPolicy(m_cache_num_sets, m_cache_assoc); - else - assert(false); - - m_cache.resize(m_cache_num_sets); - for (int i = 0; i < m_cache_num_sets; i++) { - m_cache[i].resize(m_cache_assoc); - for (int j = 0; j < m_cache_assoc; j++) { - m_cache[i][j] = NULL; - } - } -} - -CacheMemory::~CacheMemory() -{ - if (m_replacementPolicy_ptr != NULL) - delete m_replacementPolicy_ptr; - for (int i = 0; i < m_cache_num_sets; i++) { - for (int j = 0; j < m_cache_assoc; j++) { - delete m_cache[i][j]; - } - } -} - -// convert a Address to its location in the cache -Index -CacheMemory::addressToCacheSet(const Address& address) const -{ - assert(address == line_address(address)); - return address.bitSelect(m_start_index_bit, - m_start_index_bit + m_cache_num_set_bits - 1); -} - -// Given a cache index: returns the index of the tag in a set. -// returns -1 if the tag is not found. -int -CacheMemory::findTagInSet(Index cacheSet, const Address& tag) const -{ - assert(tag == line_address(tag)); - // search the set for the tags - m5::hash_map::const_iterator it = m_tag_index.find(tag); - if (it != m_tag_index.end()) - if (m_cache[cacheSet][it->second]->m_Permission != - AccessPermission_NotPresent) - return it->second; - return -1; // Not found -} - -// Given a cache index: returns the index of the tag in a set. -// returns -1 if the tag is not found. -int -CacheMemory::findTagInSetIgnorePermissions(Index cacheSet, - const Address& tag) const -{ - assert(tag == line_address(tag)); - // search the set for the tags - m5::hash_map::const_iterator it = m_tag_index.find(tag); - if (it != m_tag_index.end()) - return it->second; - return -1; // Not found -} - -bool -CacheMemory::tryCacheAccess(const Address& address, RubyRequestType type, - DataBlock*& data_ptr) -{ - assert(address == line_address(address)); - DPRINTF(RubyCache, "address: %s\n", address); - Index cacheSet = addressToCacheSet(address); - int loc = findTagInSet(cacheSet, address); - if (loc != -1) { - // Do we even have a tag match? - AbstractCacheEntry* entry = m_cache[cacheSet][loc]; - m_replacementPolicy_ptr->touch(cacheSet, loc, curTick()); - data_ptr = &(entry->getDataBlk()); - - if (entry->m_Permission == AccessPermission_Read_Write) { - return true; - } - if ((entry->m_Permission == AccessPermission_Read_Only) && - (type == RubyRequestType_LD || type == RubyRequestType_IFETCH)) { - return true; - } - // The line must not be accessible - } - data_ptr = NULL; - return false; -} - -bool -CacheMemory::testCacheAccess(const Address& address, RubyRequestType type, - DataBlock*& data_ptr) -{ - assert(address == line_address(address)); - DPRINTF(RubyCache, "address: %s\n", address); - Index cacheSet = addressToCacheSet(address); - int loc = findTagInSet(cacheSet, address); - - if (loc != -1) { - // Do we even have a tag match? - AbstractCacheEntry* entry = m_cache[cacheSet][loc]; - m_replacementPolicy_ptr->touch(cacheSet, loc, curTick()); - data_ptr = &(entry->getDataBlk()); - - return m_cache[cacheSet][loc]->m_Permission != - AccessPermission_NotPresent; - } - - data_ptr = NULL; - return false; -} - -// tests to see if an address is present in the cache -bool -CacheMemory::isTagPresent(const Address& address) const -{ - assert(address == line_address(address)); - Index cacheSet = addressToCacheSet(address); - int loc = findTagInSet(cacheSet, address); - - if (loc == -1) { - // We didn't find the tag - DPRINTF(RubyCache, "No tag match for address: %s\n", address); - return false; - } - DPRINTF(RubyCache, "address: %s found\n", address); - return true; -} - -// Returns true if there is: -// a) a tag match on this address or there is -// b) an unused line in the same cache "way" -bool -CacheMemory::cacheAvail(const Address& address) const -{ - assert(address == line_address(address)); - - Index cacheSet = addressToCacheSet(address); - - for (int i = 0; i < m_cache_assoc; i++) { - AbstractCacheEntry* entry = m_cache[cacheSet][i]; - if (entry != NULL) { - if (entry->m_Address == address || - entry->m_Permission == AccessPermission_NotPresent) { - // Already in the cache or we found an empty entry - return true; - } - } else { - return true; - } - } - return false; -} - -AbstractCacheEntry* -CacheMemory::allocate(const Address& address, AbstractCacheEntry* entry) -{ - assert(address == line_address(address)); - assert(!isTagPresent(address)); - assert(cacheAvail(address)); - DPRINTF(RubyCache, "address: %s\n", address); - - // Find the first open slot - Index cacheSet = addressToCacheSet(address); - std::vector &set = m_cache[cacheSet]; - for (int i = 0; i < m_cache_assoc; i++) { - if (!set[i] || set[i]->m_Permission == AccessPermission_NotPresent) { - set[i] = entry; // Init entry - set[i]->m_Address = address; - set[i]->m_Permission = AccessPermission_Invalid; - DPRINTF(RubyCache, "Allocate clearing lock for addr: %x\n", - address); - set[i]->m_locked = -1; - m_tag_index[address] = i; - - m_replacementPolicy_ptr->touch(cacheSet, i, curTick()); - - return entry; - } - } - panic("Allocate didn't find an available entry"); -} - -void -CacheMemory::deallocate(const Address& address) -{ - assert(address == line_address(address)); - assert(isTagPresent(address)); - DPRINTF(RubyCache, "address: %s\n", address); - Index cacheSet = addressToCacheSet(address); - int loc = findTagInSet(cacheSet, address); - if (loc != -1) { - delete m_cache[cacheSet][loc]; - m_cache[cacheSet][loc] = NULL; - m_tag_index.erase(address); - } -} - -// Returns with the physical address of the conflicting cache line -Address -CacheMemory::cacheProbe(const Address& address) const -{ - assert(address == line_address(address)); - assert(!cacheAvail(address)); - - Index cacheSet = addressToCacheSet(address); - return m_cache[cacheSet][m_replacementPolicy_ptr->getVictim(cacheSet)]-> - m_Address; -} - -// looks an address up in the cache -AbstractCacheEntry* -CacheMemory::lookup(const Address& address) -{ - assert(address == line_address(address)); - Index cacheSet = addressToCacheSet(address); - int loc = findTagInSet(cacheSet, address); - if(loc == -1) return NULL; - return m_cache[cacheSet][loc]; -} - -// looks an address up in the cache -const AbstractCacheEntry* -CacheMemory::lookup(const Address& address) const -{ - assert(address == line_address(address)); - Index cacheSet = addressToCacheSet(address); - int loc = findTagInSet(cacheSet, address); - if(loc == -1) return NULL; - return m_cache[cacheSet][loc]; -} - -// Sets the most recently used bit for a cache block -void -CacheMemory::setMRU(const Address& address) -{ - Index cacheSet = addressToCacheSet(address); - int loc = findTagInSet(cacheSet, address); - - if(loc != -1) - m_replacementPolicy_ptr->touch(cacheSet, loc, curTick()); -} - -void -CacheMemory::recordCacheContents(int cntrl, CacheRecorder* tr) const -{ - uint64 warmedUpBlocks = 0; - uint64 totalBlocks M5_VAR_USED = (uint64)m_cache_num_sets - * (uint64)m_cache_assoc; - - for (int i = 0; i < m_cache_num_sets; i++) { - for (int j = 0; j < m_cache_assoc; j++) { - if (m_cache[i][j] != NULL) { - AccessPermission perm = m_cache[i][j]->m_Permission; - RubyRequestType request_type = RubyRequestType_NULL; - if (perm == AccessPermission_Read_Only) { - if (m_is_instruction_only_cache) { - request_type = RubyRequestType_IFETCH; - } else { - request_type = RubyRequestType_LD; - } - } else if (perm == AccessPermission_Read_Write) { - request_type = RubyRequestType_ST; - } - - if (request_type != RubyRequestType_NULL) { - tr->addRecord(cntrl, m_cache[i][j]->m_Address.getAddress(), - 0, request_type, - m_replacementPolicy_ptr->getLastAccess(i, j), - m_cache[i][j]->getDataBlk()); - warmedUpBlocks++; - } - } - } - } - - DPRINTF(RubyCacheTrace, "%s: %lli blocks of %lli total blocks" - "recorded %.2f%% \n", name().c_str(), warmedUpBlocks, - (uint64)m_cache_num_sets * (uint64)m_cache_assoc, - (float(warmedUpBlocks)/float(totalBlocks))*100.0); -} - -void -CacheMemory::print(ostream& out) const -{ - out << "Cache dump: " << name() << endl; - for (int i = 0; i < m_cache_num_sets; i++) { - for (int j = 0; j < m_cache_assoc; j++) { - if (m_cache[i][j] != NULL) { - out << " Index: " << i - << " way: " << j - << " entry: " << *m_cache[i][j] << endl; - } else { - out << " Index: " << i - << " way: " << j - << " entry: NULL" << endl; - } - } - } -} - -void -CacheMemory::printData(ostream& out) const -{ - out << "printData() not supported" << endl; -} - -void -CacheMemory::setLocked(const Address& address, int context) -{ - DPRINTF(RubyCache, "Setting Lock for addr: %x to %d\n", address, context); - assert(address == line_address(address)); - Index cacheSet = addressToCacheSet(address); - int loc = findTagInSet(cacheSet, address); - assert(loc != -1); - m_cache[cacheSet][loc]->m_locked = context; -} - -void -CacheMemory::clearLocked(const Address& address) -{ - DPRINTF(RubyCache, "Clear Lock for addr: %x\n", address); - assert(address == line_address(address)); - Index cacheSet = addressToCacheSet(address); - int loc = findTagInSet(cacheSet, address); - assert(loc != -1); - m_cache[cacheSet][loc]->m_locked = -1; -} - -bool -CacheMemory::isLocked(const Address& address, int context) -{ - assert(address == line_address(address)); - Index cacheSet = addressToCacheSet(address); - int loc = findTagInSet(cacheSet, address); - assert(loc != -1); - DPRINTF(RubyCache, "Testing Lock for addr: %llx cur %d con %d\n", - address, m_cache[cacheSet][loc]->m_locked, context); - return m_cache[cacheSet][loc]->m_locked == context; -} - -void -CacheMemory::regStats() -{ - m_demand_hits - .name(name() + ".demand_hits") - .desc("Number of cache demand hits") - ; - - m_demand_misses - .name(name() + ".demand_misses") - .desc("Number of cache demand misses") - ; - - m_demand_accesses - .name(name() + ".demand_accesses") - .desc("Number of cache demand accesses") - ; - - m_demand_accesses = m_demand_hits + m_demand_misses; - - m_sw_prefetches - .name(name() + ".total_sw_prefetches") - .desc("Number of software prefetches") - .flags(Stats::nozero) - ; - - m_hw_prefetches - .name(name() + ".total_hw_prefetches") - .desc("Number of hardware prefetches") - .flags(Stats::nozero) - ; - - m_prefetches - .name(name() + ".total_prefetches") - .desc("Number of prefetches") - .flags(Stats::nozero) - ; - - m_prefetches = m_sw_prefetches + m_hw_prefetches; - - m_accessModeType - .init(RubyRequestType_NUM) - .name(name() + ".access_mode") - .flags(Stats::pdf | Stats::total) - ; - for (int i = 0; i < RubyAccessMode_NUM; i++) { - m_accessModeType - .subname(i, RubyAccessMode_to_string(RubyAccessMode(i))) - .flags(Stats::nozero) - ; - } - - numDataArrayReads - .name(name() + ".num_data_array_reads") - .desc("number of data array reads") - .flags(Stats::nozero) - ; - - numDataArrayWrites - .name(name() + ".num_data_array_writes") - .desc("number of data array writes") - .flags(Stats::nozero) - ; - - numTagArrayReads - .name(name() + ".num_tag_array_reads") - .desc("number of tag array reads") - .flags(Stats::nozero) - ; - - numTagArrayWrites - .name(name() + ".num_tag_array_writes") - .desc("number of tag array writes") - .flags(Stats::nozero) - ; - - numTagArrayStalls - .name(name() + ".num_tag_array_stalls") - .desc("number of stalls caused by tag array") - .flags(Stats::nozero) - ; - - numDataArrayStalls - .name(name() + ".num_data_array_stalls") - .desc("number of stalls caused by data array") - .flags(Stats::nozero) - ; -} - -void -CacheMemory::recordRequestType(CacheRequestType requestType) -{ - DPRINTF(RubyStats, "Recorded statistic: %s\n", - CacheRequestType_to_string(requestType)); - switch(requestType) { - case CacheRequestType_DataArrayRead: - numDataArrayReads++; - return; - case CacheRequestType_DataArrayWrite: - numDataArrayWrites++; - return; - case CacheRequestType_TagArrayRead: - numTagArrayReads++; - return; - case CacheRequestType_TagArrayWrite: - numTagArrayWrites++; - return; - default: - warn("CacheMemory access_type not found: %s", - CacheRequestType_to_string(requestType)); - } -} - -bool -CacheMemory::checkResourceAvailable(CacheResourceType res, Address addr) -{ - if (!m_resource_stalls) { - return true; - } - - if (res == CacheResourceType_TagArray) { - if (tagArray.tryAccess(addressToCacheSet(addr))) return true; - else { - DPRINTF(RubyResourceStalls, - "Tag array stall on addr %s in set %d\n", - addr, addressToCacheSet(addr)); - numTagArrayStalls++; - return false; - } - } else if (res == CacheResourceType_DataArray) { - if (dataArray.tryAccess(addressToCacheSet(addr))) return true; - else { - DPRINTF(RubyResourceStalls, - "Data array stall on addr %s in set %d\n", - addr, addressToCacheSet(addr)); - numDataArrayStalls++; - return false; - } - } else { - assert(false); - return true; - } -} diff --git a/src/mem/ruby/system/CacheMemory.hh b/src/mem/ruby/system/CacheMemory.hh deleted file mode 100644 index aa619e59d..000000000 --- a/src/mem/ruby/system/CacheMemory.hh +++ /dev/null @@ -1,173 +0,0 @@ -/* - * Copyright (c) 1999-2012 Mark D. Hill and David A. Wood - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer; - * redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution; - * neither the name of the copyright holders nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#ifndef __MEM_RUBY_SYSTEM_CACHEMEMORY_HH__ -#define __MEM_RUBY_SYSTEM_CACHEMEMORY_HH__ - -#include -#include - -#include "base/hashmap.hh" -#include "base/statistics.hh" -#include "mem/protocol/CacheResourceType.hh" -#include "mem/protocol/CacheRequestType.hh" -#include "mem/protocol/RubyRequest.hh" -#include "mem/ruby/common/DataBlock.hh" -#include "mem/ruby/recorder/CacheRecorder.hh" -#include "mem/ruby/slicc_interface/AbstractCacheEntry.hh" -#include "mem/ruby/slicc_interface/RubySlicc_ComponentMapping.hh" -#include "mem/ruby/system/BankedArray.hh" -#include "mem/ruby/system/LRUPolicy.hh" -#include "mem/ruby/system/PseudoLRUPolicy.hh" -#include "params/RubyCache.hh" -#include "sim/sim_object.hh" - -class CacheMemory : public SimObject -{ - public: - typedef RubyCacheParams Params; - CacheMemory(const Params *p); - ~CacheMemory(); - - void init(); - - // Public Methods - // perform a cache access and see if we hit or not. Return true on a hit. - bool tryCacheAccess(const Address& address, RubyRequestType type, - DataBlock*& data_ptr); - - // similar to above, but doesn't require full access check - bool testCacheAccess(const Address& address, RubyRequestType type, - DataBlock*& data_ptr); - - // tests to see if an address is present in the cache - bool isTagPresent(const Address& address) const; - - // Returns true if there is: - // a) a tag match on this address or there is - // b) an unused line in the same cache "way" - bool cacheAvail(const Address& address) const; - - // find an unused entry and sets the tag appropriate for the address - AbstractCacheEntry* allocate(const Address& address, AbstractCacheEntry* new_entry); - void allocateVoid(const Address& address, AbstractCacheEntry* new_entry) - { - allocate(address, new_entry); - } - - // Explicitly free up this address - void deallocate(const Address& address); - - // Returns with the physical address of the conflicting cache line - Address cacheProbe(const Address& address) const; - - // looks an address up in the cache - AbstractCacheEntry* lookup(const Address& address); - const AbstractCacheEntry* lookup(const Address& address) const; - - Cycles getLatency() const { return m_latency; } - - // Hook for checkpointing the contents of the cache - void recordCacheContents(int cntrl, CacheRecorder* tr) const; - - // Set this address to most recently used - void setMRU(const Address& address); - - void setLocked (const Address& addr, int context); - void clearLocked (const Address& addr); - bool isLocked (const Address& addr, int context); - - // Print cache contents - void print(std::ostream& out) const; - void printData(std::ostream& out) const; - - void regStats(); - bool checkResourceAvailable(CacheResourceType res, Address addr); - void recordRequestType(CacheRequestType requestType); - - public: - Stats::Scalar m_demand_hits; - Stats::Scalar m_demand_misses; - Stats::Formula m_demand_accesses; - - Stats::Scalar m_sw_prefetches; - Stats::Scalar m_hw_prefetches; - Stats::Formula m_prefetches; - - Stats::Vector m_accessModeType; - - Stats::Scalar numDataArrayReads; - Stats::Scalar numDataArrayWrites; - Stats::Scalar numTagArrayReads; - Stats::Scalar numTagArrayWrites; - - Stats::Scalar numTagArrayStalls; - Stats::Scalar numDataArrayStalls; - - private: - // convert a Address to its location in the cache - Index addressToCacheSet(const Address& address) const; - - // Given a cache tag: returns the index of the tag in a set. - // returns -1 if the tag is not found. - int findTagInSet(Index line, const Address& tag) const; - int findTagInSetIgnorePermissions(Index cacheSet, - const Address& tag) const; - - // Private copy constructor and assignment operator - CacheMemory(const CacheMemory& obj); - CacheMemory& operator=(const CacheMemory& obj); - - private: - Cycles m_latency; - - // Data Members (m_prefix) - bool m_is_instruction_only_cache; - - // The first index is the # of cache lines. - // The second index is the the amount associativity. - m5::hash_map m_tag_index; - std::vector > m_cache; - - AbstractReplacementPolicy *m_replacementPolicy_ptr; - - BankedArray dataArray; - BankedArray tagArray; - - int m_cache_size; - std::string m_policy; - int m_cache_num_sets; - int m_cache_num_set_bits; - int m_cache_assoc; - int m_start_index_bit; - bool m_resource_stalls; -}; - -std::ostream& operator<<(std::ostream& out, const CacheMemory& obj); - -#endif // __MEM_RUBY_SYSTEM_CACHEMEMORY_HH__ diff --git a/src/mem/ruby/system/CacheRecorder.cc b/src/mem/ruby/system/CacheRecorder.cc new file mode 100644 index 000000000..3a76a64f7 --- /dev/null +++ b/src/mem/ruby/system/CacheRecorder.cc @@ -0,0 +1,195 @@ +/* + * Copyright (c) 1999-2012 Mark D. Hill and David A. Wood + * Copyright (c) 2010 Advanced Micro Devices, Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer; + * redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution; + * neither the name of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "debug/RubyCacheTrace.hh" +#include "mem/ruby/system/CacheRecorder.hh" +#include "mem/ruby/system/Sequencer.hh" +#include "mem/ruby/system/System.hh" + +using namespace std; + +void +TraceRecord::print(ostream& out) const +{ + out << "[TraceRecord: Node, " << m_cntrl_id << ", " + << m_data_address << ", " << m_pc_address << ", " + << m_type << ", Time: " << m_time << "]"; +} + +CacheRecorder::CacheRecorder() + : m_uncompressed_trace(NULL), + m_uncompressed_trace_size(0), + m_block_size_bytes(RubySystem::getBlockSizeBytes()) +{ +} + +CacheRecorder::CacheRecorder(uint8_t* uncompressed_trace, + uint64_t uncompressed_trace_size, + std::vector& seq_map, + uint64_t block_size_bytes) + : m_uncompressed_trace(uncompressed_trace), + m_uncompressed_trace_size(uncompressed_trace_size), + m_seq_map(seq_map), m_bytes_read(0), m_records_read(0), + m_records_flushed(0), m_block_size_bytes(block_size_bytes) +{ + if (m_uncompressed_trace != NULL) { + if (m_block_size_bytes < RubySystem::getBlockSizeBytes()) { + // Block sizes larger than when the trace was recorded are not + // supported, as we cannot reliably turn accesses to smaller blocks + // into larger ones. + panic("Recorded cache block size (%d) < current block size (%d) !!", + m_block_size_bytes, RubySystem::getBlockSizeBytes()); + } + } +} + +CacheRecorder::~CacheRecorder() +{ + if (m_uncompressed_trace != NULL) { + delete [] m_uncompressed_trace; + m_uncompressed_trace = NULL; + } + m_seq_map.clear(); +} + +void +CacheRecorder::enqueueNextFlushRequest() +{ + if (m_records_flushed < m_records.size()) { + TraceRecord* rec = m_records[m_records_flushed]; + m_records_flushed++; + Request* req = new Request(rec->m_data_address, + m_block_size_bytes, 0, + Request::funcMasterId); + MemCmd::Command requestType = MemCmd::FlushReq; + Packet *pkt = new Packet(req, requestType); + + Sequencer* m_sequencer_ptr = m_seq_map[rec->m_cntrl_id]; + assert(m_sequencer_ptr != NULL); + m_sequencer_ptr->makeRequest(pkt); + + DPRINTF(RubyCacheTrace, "Flushing %s\n", *rec); + } +} + +void +CacheRecorder::enqueueNextFetchRequest() +{ + if (m_bytes_read < m_uncompressed_trace_size) { + TraceRecord* traceRecord = (TraceRecord*) (m_uncompressed_trace + + m_bytes_read); + + DPRINTF(RubyCacheTrace, "Issuing %s\n", *traceRecord); + + for (int rec_bytes_read = 0; rec_bytes_read < m_block_size_bytes; + rec_bytes_read += RubySystem::getBlockSizeBytes()) { + Request* req = new Request(); + MemCmd::Command requestType; + + if (traceRecord->m_type == RubyRequestType_LD) { + requestType = MemCmd::ReadReq; + req->setPhys(traceRecord->m_data_address + rec_bytes_read, + RubySystem::getBlockSizeBytes(), 0, Request::funcMasterId); + } else if (traceRecord->m_type == RubyRequestType_IFETCH) { + requestType = MemCmd::ReadReq; + req->setPhys(traceRecord->m_data_address + rec_bytes_read, + RubySystem::getBlockSizeBytes(), + Request::INST_FETCH, Request::funcMasterId); + } else { + requestType = MemCmd::WriteReq; + req->setPhys(traceRecord->m_data_address + rec_bytes_read, + RubySystem::getBlockSizeBytes(), 0, Request::funcMasterId); + } + + Packet *pkt = new Packet(req, requestType); + pkt->dataStatic(traceRecord->m_data + rec_bytes_read); + + Sequencer* m_sequencer_ptr = m_seq_map[traceRecord->m_cntrl_id]; + assert(m_sequencer_ptr != NULL); + m_sequencer_ptr->makeRequest(pkt); + } + + m_bytes_read += (sizeof(TraceRecord) + m_block_size_bytes); + m_records_read++; + } +} + +void +CacheRecorder::addRecord(int cntrl, const physical_address_t data_addr, + const physical_address_t pc_addr, + RubyRequestType type, Time time, DataBlock& data) +{ + TraceRecord* rec = (TraceRecord*)malloc(sizeof(TraceRecord) + + m_block_size_bytes); + rec->m_cntrl_id = cntrl; + rec->m_time = time; + rec->m_data_address = data_addr; + rec->m_pc_address = pc_addr; + rec->m_type = type; + memcpy(rec->m_data, data.getData(0, m_block_size_bytes), + m_block_size_bytes); + + m_records.push_back(rec); +} + +uint64 +CacheRecorder::aggregateRecords(uint8_t** buf, uint64 total_size) +{ + std::sort(m_records.begin(), m_records.end(), compareTraceRecords); + + int size = m_records.size(); + uint64 current_size = 0; + int record_size = sizeof(TraceRecord) + m_block_size_bytes; + + for (int i = 0; i < size; ++i) { + // Determine if we need to expand the buffer size + if (current_size + record_size > total_size) { + uint8_t* new_buf = new (nothrow) uint8_t[total_size * 2]; + if (new_buf == NULL) { + fatal("Unable to allocate buffer of size %s\n", + total_size * 2); + } + total_size = total_size * 2; + uint8_t* old_buf = *buf; + memcpy(new_buf, old_buf, current_size); + *buf = new_buf; + delete [] old_buf; + } + + // Copy the current record into the buffer + memcpy(&((*buf)[current_size]), m_records[i], record_size); + current_size += record_size; + + free(m_records[i]); + m_records[i] = NULL; + } + + m_records.clear(); + return current_size; +} diff --git a/src/mem/ruby/system/CacheRecorder.hh b/src/mem/ruby/system/CacheRecorder.hh new file mode 100644 index 000000000..2156b0689 --- /dev/null +++ b/src/mem/ruby/system/CacheRecorder.hh @@ -0,0 +1,130 @@ +/* + * Copyright (c) 1999-2012 Mark D. Hill and David A. Wood + * Copyright (c) 2010 Advanced Micro Devices, Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer; + * redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution; + * neither the name of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +/* + * Recording cache requests made to a ruby cache at certain ruby + * time. Also dump the requests to a gziped file. + */ + +#ifndef __MEM_RUBY_RECORDER_CACHERECORDER_HH__ +#define __MEM_RUBY_RECORDER_CACHERECORDER_HH__ + +#include + +#include "base/hashmap.hh" +#include "mem/protocol/RubyRequestType.hh" +#include "mem/ruby/common/Address.hh" +#include "mem/ruby/common/DataBlock.hh" +#include "mem/ruby/common/TypeDefines.hh" + +class Sequencer; + +/*! + * Class for recording cache contents. Note that the last element of the + * class is an array of length zero. It is used for creating variable + * length object, so that while writing the data to a file one does not + * need to copy the meta data and the actual data separately. + */ +class TraceRecord { + public: + int m_cntrl_id; + Time m_time; + physical_address_t m_data_address; + physical_address_t m_pc_address; + RubyRequestType m_type; + uint8_t m_data[0]; + + void print(std::ostream& out) const; +}; + +class CacheRecorder +{ + public: + CacheRecorder(); + ~CacheRecorder(); + + CacheRecorder(uint8_t* uncompressed_trace, + uint64_t uncompressed_trace_size, + std::vector& SequencerMap, + uint64_t block_size_bytes); + void addRecord(int cntrl, const physical_address_t data_addr, + const physical_address_t pc_addr, RubyRequestType type, + Time time, DataBlock& data); + + uint64 aggregateRecords(uint8_t** data, uint64 size); + + /*! + * Function for flushing the memory contents of the caches to the + * main memory. It goes through the recorded contents of the caches, + * and issues flush requests. Except for the first one, a flush request + * is issued only after the previous one has completed. This currently + * requires use of MOESI Hammer protocol since only that protocol + * supports flush requests. + */ + void enqueueNextFlushRequest(); + + /*! + * Function for fetching warming up the memory and the caches. It goes + * through the recorded contents of the caches, as available in the + * checkpoint and issues fetch requests. Except for the first one, a + * fetch request is issued only after the previous one has completed. + * It should be possible to use this with any protocol. + */ + void enqueueNextFetchRequest(); + + private: + // Private copy constructor and assignment operator + CacheRecorder(const CacheRecorder& obj); + CacheRecorder& operator=(const CacheRecorder& obj); + + std::vector m_records; + uint8_t* m_uncompressed_trace; + uint64_t m_uncompressed_trace_size; + std::vector m_seq_map; + uint64_t m_bytes_read; + uint64_t m_records_read; + uint64_t m_records_flushed; + uint64_t m_block_size_bytes; +}; + +inline bool +compareTraceRecords(const TraceRecord* n1, const TraceRecord* n2) +{ + return n1->m_time > n2->m_time; +} + +inline std::ostream& +operator<<(std::ostream& out, const TraceRecord& obj) +{ + obj.print(out); + out << std::flush; + return out; +} + +#endif // __MEM_RUBY_RECORDER_CACHERECORDER_HH__ diff --git a/src/mem/ruby/system/DirectoryMemory.cc b/src/mem/ruby/system/DirectoryMemory.cc deleted file mode 100644 index cb1bf6f90..000000000 --- a/src/mem/ruby/system/DirectoryMemory.cc +++ /dev/null @@ -1,212 +0,0 @@ -/* - * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer; - * redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution; - * neither the name of the copyright holders nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#include "base/intmath.hh" -#include "debug/RubyCache.hh" -#include "debug/RubyStats.hh" -#include "mem/ruby/slicc_interface/RubySlicc_Util.hh" -#include "mem/ruby/system/DirectoryMemory.hh" -#include "mem/ruby/system/System.hh" - -using namespace std; - -int DirectoryMemory::m_num_directories = 0; -int DirectoryMemory::m_num_directories_bits = 0; -uint64_t DirectoryMemory::m_total_size_bytes = 0; -int DirectoryMemory::m_numa_high_bit = 0; - -DirectoryMemory::DirectoryMemory(const Params *p) - : SimObject(p) -{ - m_version = p->version; - m_size_bytes = p->size; - m_size_bits = floorLog2(m_size_bytes); - m_num_entries = 0; - m_use_map = p->use_map; - m_map_levels = p->map_levels; - m_numa_high_bit = p->numa_high_bit; -} - -void -DirectoryMemory::init() -{ - m_num_entries = m_size_bytes / RubySystem::getBlockSizeBytes(); - - if (m_use_map) { - m_sparseMemory = new SparseMemory(m_map_levels); - g_system_ptr->registerSparseMemory(m_sparseMemory); - } else { - m_entries = new AbstractEntry*[m_num_entries]; - for (int i = 0; i < m_num_entries; i++) - m_entries[i] = NULL; - m_ram = g_system_ptr->getMemoryVector(); - } - - m_num_directories++; - m_num_directories_bits = ceilLog2(m_num_directories); - m_total_size_bytes += m_size_bytes; - - if (m_numa_high_bit == 0) { - m_numa_high_bit = RubySystem::getMemorySizeBits() - 1; - } - assert(m_numa_high_bit != 0); -} - -DirectoryMemory::~DirectoryMemory() -{ - // free up all the directory entries - if (m_entries != NULL) { - for (uint64 i = 0; i < m_num_entries; i++) { - if (m_entries[i] != NULL) { - delete m_entries[i]; - } - } - delete [] m_entries; - } else if (m_use_map) { - delete m_sparseMemory; - } -} - -uint64 -DirectoryMemory::mapAddressToDirectoryVersion(PhysAddress address) -{ - if (m_num_directories_bits == 0) - return 0; - - uint64 ret = address.bitSelect(m_numa_high_bit - m_num_directories_bits + 1, - m_numa_high_bit); - return ret; -} - -bool -DirectoryMemory::isPresent(PhysAddress address) -{ - bool ret = (mapAddressToDirectoryVersion(address) == m_version); - return ret; -} - -uint64 -DirectoryMemory::mapAddressToLocalIdx(PhysAddress address) -{ - uint64 ret; - if (m_num_directories_bits > 0) { - ret = address.bitRemove(m_numa_high_bit - m_num_directories_bits + 1, - m_numa_high_bit); - } else { - ret = address.getAddress(); - } - - return ret >> (RubySystem::getBlockSizeBits()); -} - -AbstractEntry* -DirectoryMemory::lookup(PhysAddress address) -{ - assert(isPresent(address)); - DPRINTF(RubyCache, "Looking up address: %s\n", address); - - if (m_use_map) { - return m_sparseMemory->lookup(address); - } else { - uint64_t idx = mapAddressToLocalIdx(address); - assert(idx < m_num_entries); - return m_entries[idx]; - } -} - -AbstractEntry* -DirectoryMemory::allocate(const PhysAddress& address, AbstractEntry* entry) -{ - assert(isPresent(address)); - uint64 idx; - DPRINTF(RubyCache, "Looking up address: %s\n", address); - - if (m_use_map) { - m_sparseMemory->add(address, entry); - entry->changePermission(AccessPermission_Read_Write); - } else { - idx = mapAddressToLocalIdx(address); - assert(idx < m_num_entries); - entry->getDataBlk().assign(m_ram->getBlockPtr(address)); - entry->changePermission(AccessPermission_Read_Only); - m_entries[idx] = entry; - } - - return entry; -} - -void -DirectoryMemory::invalidateBlock(PhysAddress address) -{ - if (m_use_map) { - assert(m_sparseMemory->exist(address)); - m_sparseMemory->remove(address); - } -#if 0 - else { - assert(isPresent(address)); - - Index index = address.memoryModuleIndex(); - - if (index < 0 || index > m_size) { - ERROR_MSG("Directory Memory Assertion: " - "accessing memory out of range."); - } - - if (m_entries[index] != NULL){ - delete m_entries[index]; - m_entries[index] = NULL; - } - } -#endif -} - -void -DirectoryMemory::print(ostream& out) const -{ -} - -void -DirectoryMemory::regStats() -{ - if (m_use_map) { - m_sparseMemory->regStats(name()); - } -} - -void -DirectoryMemory::recordRequestType(DirectoryRequestType requestType) { - DPRINTF(RubyStats, "Recorded statistic: %s\n", - DirectoryRequestType_to_string(requestType)); -} - -DirectoryMemory * -RubyDirectoryMemoryParams::create() -{ - return new DirectoryMemory(this); -} diff --git a/src/mem/ruby/system/DirectoryMemory.hh b/src/mem/ruby/system/DirectoryMemory.hh deleted file mode 100644 index 8aa89ce12..000000000 --- a/src/mem/ruby/system/DirectoryMemory.hh +++ /dev/null @@ -1,104 +0,0 @@ -/* - * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer; - * redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution; - * neither the name of the copyright holders nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#ifndef __MEM_RUBY_SYSTEM_DIRECTORYMEMORY_HH__ -#define __MEM_RUBY_SYSTEM_DIRECTORYMEMORY_HH__ - -#include -#include - -#include "mem/ruby/common/Address.hh" -#include "mem/protocol/DirectoryRequestType.hh" -#include "mem/ruby/slicc_interface/AbstractEntry.hh" -#include "mem/ruby/system/MemoryVector.hh" -#include "mem/ruby/system/SparseMemory.hh" -#include "params/RubyDirectoryMemory.hh" -#include "sim/sim_object.hh" - -class DirectoryMemory : public SimObject -{ - public: - typedef RubyDirectoryMemoryParams Params; - DirectoryMemory(const Params *p); - ~DirectoryMemory(); - - void init(); - - uint64 mapAddressToLocalIdx(PhysAddress address); - static uint64 mapAddressToDirectoryVersion(PhysAddress address); - - bool isSparseImplementation() { return m_use_map; } - uint64 getSize() { return m_size_bytes; } - - bool isPresent(PhysAddress address); - AbstractEntry* lookup(PhysAddress address); - AbstractEntry* allocate(const PhysAddress& address, - AbstractEntry* new_entry); - - void invalidateBlock(PhysAddress address); - - void print(std::ostream& out) const; - void regStats(); - - void recordRequestType(DirectoryRequestType requestType); - - private: - // Private copy constructor and assignment operator - DirectoryMemory(const DirectoryMemory& obj); - DirectoryMemory& operator=(const DirectoryMemory& obj); - - private: - const std::string m_name; - AbstractEntry **m_entries; - // int m_size; // # of memory module blocks this directory is - // responsible for - uint64 m_size_bytes; - uint64 m_size_bits; - uint64 m_num_entries; - int m_version; - - static int m_num_directories; - static int m_num_directories_bits; - static uint64_t m_total_size_bytes; - static int m_numa_high_bit; - - MemoryVector* m_ram; - SparseMemory* m_sparseMemory; - bool m_use_map; - int m_map_levels; -}; - -inline std::ostream& -operator<<(std::ostream& out, const DirectoryMemory& obj) -{ - obj.print(out); - out << std::flush; - return out; -} - -#endif // __MEM_RUBY_SYSTEM_DIRECTORYMEMORY_HH__ diff --git a/src/mem/ruby/system/DirectoryMemory.py b/src/mem/ruby/system/DirectoryMemory.py deleted file mode 100644 index ac4dd5934..000000000 --- a/src/mem/ruby/system/DirectoryMemory.py +++ /dev/null @@ -1,44 +0,0 @@ -# Copyright (c) 2009 Advanced Micro Devices, Inc. -# All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer; -# redistributions in binary form must reproduce the above copyright -# notice, this list of conditions and the following disclaimer in the -# documentation and/or other materials provided with the distribution; -# neither the name of the copyright holders nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -# -# Authors: Steve Reinhardt -# Brad Beckmann - -from m5.params import * -from m5.proxy import * -from m5.SimObject import SimObject - -class RubyDirectoryMemory(SimObject): - type = 'RubyDirectoryMemory' - cxx_class = 'DirectoryMemory' - cxx_header = "mem/ruby/system/DirectoryMemory.hh" - version = Param.Int(0, "") - size = Param.MemorySize("1GB", "capacity in bytes") - use_map = Param.Bool(False, "enable sparse memory") - map_levels = Param.Int(4, "sparse memory map levels") - # the default value of the numa high bit is specified in the command line - # option and must be passed into the directory memory sim object - numa_high_bit = Param.Int("numa high bit") diff --git a/src/mem/ruby/system/LRUPolicy.hh b/src/mem/ruby/system/LRUPolicy.hh deleted file mode 100644 index 622e28659..000000000 --- a/src/mem/ruby/system/LRUPolicy.hh +++ /dev/null @@ -1,95 +0,0 @@ -/* - * Copyright (c) 2007 Mark D. Hill and David A. Wood - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer; - * redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution; - * neither the name of the copyright holders nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#ifndef __MEM_RUBY_SYSTEM_LRUPOLICY_HH__ -#define __MEM_RUBY_SYSTEM_LRUPOLICY_HH__ - -#include "mem/ruby/system/AbstractReplacementPolicy.hh" - -/* Simple true LRU replacement policy */ - -class LRUPolicy : public AbstractReplacementPolicy -{ - public: - LRUPolicy(Index num_sets, Index assoc); - ~LRUPolicy(); - - void touch(Index set, Index way, Tick time); - Index getVictim(Index set) const; -}; - -inline -LRUPolicy::LRUPolicy(Index num_sets, Index assoc) - : AbstractReplacementPolicy(num_sets, assoc) -{ -} - -inline -LRUPolicy::~LRUPolicy() -{ -} - -inline void -LRUPolicy::touch(Index set, Index index, Tick time) -{ - assert(index >= 0 && index < m_assoc); - assert(set >= 0 && set < m_num_sets); - - m_last_ref_ptr[set][index] = time; -} - -inline Index -LRUPolicy::getVictim(Index set) const -{ - // assert(m_assoc != 0); - Tick time, smallest_time; - Index smallest_index; - - smallest_index = 0; - smallest_time = m_last_ref_ptr[set][0]; - - for (unsigned i = 0; i < m_assoc; i++) { - time = m_last_ref_ptr[set][i]; - // assert(m_cache[cacheSet][i].m_Permission != - // AccessPermission_NotPresent); - - if (time < smallest_time) { - smallest_index = i; - smallest_time = time; - } - } - - // DEBUG_EXPR(CACHE_COMP, MedPrio, cacheSet); - // DEBUG_EXPR(CACHE_COMP, MedPrio, smallest_index); - // DEBUG_EXPR(CACHE_COMP, MedPrio, m_cache[cacheSet][smallest_index]); - // DEBUG_EXPR(CACHE_COMP, MedPrio, *this); - - return smallest_index; -} - -#endif // __MEM_RUBY_SYSTEM_LRUPOLICY_HH__ diff --git a/src/mem/ruby/system/MachineID.hh b/src/mem/ruby/system/MachineID.hh deleted file mode 100644 index 0ad898959..000000000 --- a/src/mem/ruby/system/MachineID.hh +++ /dev/null @@ -1,83 +0,0 @@ -/* - * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer; - * redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution; - * neither the name of the copyright holders nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#ifndef __MEM_RUBY_SYSTEM_MACHINEID_HH__ -#define __MEM_RUBY_SYSTEM_MACHINEID_HH__ - -#include -#include - -#include "base/cprintf.hh" -#include "mem/protocol/MachineType.hh" - -struct MachineID -{ - MachineType type; - //! range: 0 ... number of this machine's components in system - 1 - NodeID num; - - MachineType getType() const { return type; } - NodeID getNum() const { return num; } -}; - -inline std::string -MachineIDToString(MachineID machine) -{ - return csprintf("%s_%d", MachineType_to_string(machine.type), machine.num); -} - -inline bool -operator==(const MachineID & obj1, const MachineID & obj2) -{ - return (obj1.type == obj2.type && obj1.num == obj2.num); -} - -inline bool -operator!=(const MachineID & obj1, const MachineID & obj2) -{ - return (obj1.type != obj2.type || obj1.num != obj2.num); -} - -// Output operator declaration -std::ostream& operator<<(std::ostream& out, const MachineID& obj); - -inline std::ostream& -operator<<(std::ostream& out, const MachineID& obj) -{ - if ((obj.type < MachineType_NUM) && (obj.type >= MachineType_FIRST)) { - out << MachineType_to_string(obj.type); - } else { - out << "NULL"; - } - out << "-"; - out << obj.num; - out << std::flush; - return out; -} - -#endif // __MEM_RUBY_SYSTEM_MACHINEID_HH__ diff --git a/src/mem/ruby/system/MemoryControl.cc b/src/mem/ruby/system/MemoryControl.cc deleted file mode 100644 index e58b36f63..000000000 --- a/src/mem/ruby/system/MemoryControl.cc +++ /dev/null @@ -1,49 +0,0 @@ -/* - * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood - * Copyright (c) 2012 Advanced Micro Devices, Inc. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer; - * redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution; - * neither the name of the copyright holders nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#include "debug/RubyStats.hh" -#include "mem/ruby/common/Global.hh" -#include "mem/ruby/slicc_interface/RubySlicc_ComponentMapping.hh" -#include "mem/ruby/system/MemoryControl.hh" -#include "mem/ruby/system/System.hh" - -using namespace std; -MemoryControl::MemoryControl(const Params *p) - : ClockedObject(p), Consumer(this), m_event(this) -{ - g_system_ptr->registerMemController(this); -} - -MemoryControl::~MemoryControl() {}; - -void -MemoryControl::recordRequestType(MemoryControlRequestType request) { - DPRINTF(RubyStats, "Recorded request: %s\n", - MemoryControlRequestType_to_string(request)); -} diff --git a/src/mem/ruby/system/MemoryControl.hh b/src/mem/ruby/system/MemoryControl.hh deleted file mode 100644 index 35eb057f5..000000000 --- a/src/mem/ruby/system/MemoryControl.hh +++ /dev/null @@ -1,114 +0,0 @@ -/* - * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood - * Copyright (c) 2012 Advanced Micro Devices, Inc. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer; - * redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution; - * neither the name of the copyright holders nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#ifndef __MEM_RUBY_SYSTEM_ABSTRACT_MEMORY_CONTROL_HH__ -#define __MEM_RUBY_SYSTEM_ABSTRACT_MEMORY_CONTROL_HH__ - -#include -#include -#include - -#include "mem/protocol/MemoryControlRequestType.hh" -#include "mem/ruby/common/Consumer.hh" -#include "mem/ruby/slicc_interface/Message.hh" -#include "mem/ruby/system/MemoryNode.hh" -#include "params/MemoryControl.hh" -#include "sim/clocked_object.hh" - -////////////////////////////////////////////////////////////////////////////// - -class MemoryControl : public ClockedObject, public Consumer -{ - public: - typedef MemoryControlParams Params; - const Params *params() const - { return dynamic_cast(_params); } - - MemoryControl(const Params *p); - virtual void init() = 0; - virtual void reset() = 0; - - ~MemoryControl(); - - virtual void wakeup() = 0; - - virtual void setConsumer(Consumer* consumer_ptr) = 0; - virtual Consumer* getConsumer() = 0; - virtual void setClockObj(ClockedObject* consumer_ptr) {} - - virtual void setDescription(const std::string& name) = 0; - virtual std::string getDescription() = 0; - - // Called from the directory: - virtual void enqueue(const MsgPtr& message, Cycles latency) = 0; - virtual void enqueueMemRef(MemoryNode *memRef) = 0; - virtual void dequeue() = 0; - virtual const Message* peek() = 0; - virtual MemoryNode *peekNode() = 0; - virtual bool isReady() = 0; - virtual bool areNSlotsAvailable(int n) = 0; // infinite queue length - - virtual void print(std::ostream& out) const = 0; - virtual void regStats() {}; - - virtual const int getChannel(const physical_address_t addr) const = 0; - virtual const int getBank(const physical_address_t addr) const = 0; - virtual const int getRank(const physical_address_t addr) const = 0; - virtual const int getRow(const physical_address_t addr) const = 0; - - //added by SS - virtual int getBanksPerRank() = 0; - virtual int getRanksPerDimm() = 0; - virtual int getDimmsPerChannel() = 0; - - virtual void recordRequestType(MemoryControlRequestType requestType); - - virtual bool functionalReadBuffers(Packet *pkt) - { fatal("Functional read access not implemented!");} - virtual uint32_t functionalWriteBuffers(Packet *pkt) - { fatal("Functional read access not implemented!");} - -protected: - class MemCntrlEvent : public Event - { - public: - MemCntrlEvent(MemoryControl* _mem_cntrl) - { - mem_cntrl = _mem_cntrl; - } - private: - void process() { mem_cntrl->wakeup(); } - - MemoryControl* mem_cntrl; - }; - - MemCntrlEvent m_event; -}; - -#endif // __MEM_RUBY_SYSTEM_ABSTRACT_MEMORY_CONTROL_HH__ diff --git a/src/mem/ruby/system/MemoryControl.py b/src/mem/ruby/system/MemoryControl.py deleted file mode 100644 index ad18efec5..000000000 --- a/src/mem/ruby/system/MemoryControl.py +++ /dev/null @@ -1,39 +0,0 @@ -# Copyright (c) 2009 Advanced Micro Devices, Inc. -# All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer; -# redistributions in binary form must reproduce the above copyright -# notice, this list of conditions and the following disclaimer in the -# documentation and/or other materials provided with the distribution; -# neither the name of the copyright holders nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -# -# Authors: Steve Reinhardt -# Brad Beckmann - -from m5.params import * -from ClockedObject import ClockedObject - -class MemoryControl(ClockedObject): - abstract = True - type = 'MemoryControl' - cxx_class = 'MemoryControl' - cxx_header = "mem/ruby/system/MemoryControl.hh" - version = Param.Int(""); - ruby_system = Param.RubySystem("") diff --git a/src/mem/ruby/system/MemoryNode.cc b/src/mem/ruby/system/MemoryNode.cc deleted file mode 100644 index 07262fba0..000000000 --- a/src/mem/ruby/system/MemoryNode.cc +++ /dev/null @@ -1,41 +0,0 @@ -/* - * Copyright (c) 1999 Mark D. Hill and David A. Wood - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer; - * redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution; - * neither the name of the copyright holders nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#include "mem/ruby/system/MemoryNode.hh" - -using namespace std; - -void -MemoryNode::print(ostream& out) const -{ - out << "["; - out << m_time << ", "; - out << m_msg_counter << ", "; - out << m_msgptr << "; "; - out << "]"; -} diff --git a/src/mem/ruby/system/MemoryNode.hh b/src/mem/ruby/system/MemoryNode.hh deleted file mode 100644 index f215ab649..000000000 --- a/src/mem/ruby/system/MemoryNode.hh +++ /dev/null @@ -1,92 +0,0 @@ -/* - * Copyright (c) 2008 Mark D. Hill and David A. Wood - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer; - * redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution; - * neither the name of the copyright holders nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -/* - * Description: - * This structure records everything known about a single - * memory request that is queued in the memory controller. - * It is created when the memory request first arrives - * at a memory controller and is deleted when the underlying - * message is enqueued to be sent back to the directory. - */ - -#ifndef __MEM_RUBY_SYSTEM_MEMORYNODE_HH__ -#define __MEM_RUBY_SYSTEM_MEMORYNODE_HH__ - -#include - -#include "mem/ruby/common/TypeDefines.hh" -#include "mem/ruby/slicc_interface/Message.hh" - -class MemoryNode -{ - public: - // old constructor - MemoryNode(const Cycles& time, int counter, const MsgPtr& msgptr, - const physical_address_t addr, const bool is_mem_read) - : m_time(time) - { - m_msg_counter = counter; - m_msgptr = msgptr; - m_addr = addr; - m_is_mem_read = is_mem_read; - m_is_dirty_wb = !is_mem_read; - } - - // new constructor - MemoryNode(const Cycles& time, const MsgPtr& msgptr, - const physical_address_t addr, const bool is_mem_read, - const bool is_dirty_wb) - : m_time(time) - { - m_msg_counter = 0; - m_msgptr = msgptr; - m_addr = addr; - m_is_mem_read = is_mem_read; - m_is_dirty_wb = is_dirty_wb; - } - - void print(std::ostream& out) const; - - Cycles m_time; - int m_msg_counter; - MsgPtr m_msgptr; - physical_address_t m_addr; - bool m_is_mem_read; - bool m_is_dirty_wb; -}; - -inline std::ostream& -operator<<(std::ostream& out, const MemoryNode& obj) -{ - obj.print(out); - out << std::flush; - return out; -} - -#endif // __MEM_RUBY_SYSTEM_MEMORYNODE_HH__ diff --git a/src/mem/ruby/system/MemoryVector.hh b/src/mem/ruby/system/MemoryVector.hh deleted file mode 100644 index f2488b591..000000000 --- a/src/mem/ruby/system/MemoryVector.hh +++ /dev/null @@ -1,237 +0,0 @@ -/* - * Copyright (c) 2009 Mark D. Hill and David A. Wood - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer; - * redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution; - * neither the name of the copyright holders nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#ifndef __MEM_RUBY_SYSTEM_MEMORYVECTOR_HH__ -#define __MEM_RUBY_SYSTEM_MEMORYVECTOR_HH__ - -#include "base/trace.hh" -#include "debug/RubyCacheTrace.hh" -#include "mem/ruby/common/Address.hh" - -class DirectoryMemory; - -/** - * MemoryVector holds memory data (DRAM only) - */ -class MemoryVector -{ - public: - MemoryVector(); - MemoryVector(uint64 size); - ~MemoryVector(); - friend class DirectoryMemory; - - void resize(uint64 size); // destructive - - void write(const Address & paddr, uint8_t *data, int len); - uint8_t *read(const Address & paddr, uint8_t *data, int len); - uint32_t collatePages(uint8_t *&raw_data); - void populatePages(uint8_t *raw_data); - - private: - uint8_t *getBlockPtr(const PhysAddress & addr); - - uint64 m_size; - uint8_t **m_pages; - uint32_t m_num_pages; - const uint32_t m_page_offset_mask; - static const uint32_t PAGE_SIZE = 4096; -}; - -inline -MemoryVector::MemoryVector() - : m_page_offset_mask(4095) -{ - m_size = 0; - m_num_pages = 0; - m_pages = NULL; -} - -inline -MemoryVector::MemoryVector(uint64 size) - : m_page_offset_mask(4095) -{ - resize(size); -} - -inline -MemoryVector::~MemoryVector() -{ - for (int i = 0; i < m_num_pages; i++) { - if (m_pages[i] != 0) { - delete [] m_pages[i]; - } - } - delete [] m_pages; -} - -inline void -MemoryVector::resize(uint64 size) -{ - if (m_pages != NULL){ - for (int i = 0; i < m_num_pages; i++) { - if (m_pages[i] != 0) { - delete [] m_pages[i]; - } - } - delete [] m_pages; - } - m_size = size; - assert(size%PAGE_SIZE == 0); - m_num_pages = size >> 12; - m_pages = new uint8_t*[m_num_pages]; - memset(m_pages, 0, m_num_pages * sizeof(uint8_t*)); -} - -inline void -MemoryVector::write(const Address & paddr, uint8_t *data, int len) -{ - assert(paddr.getAddress() + len <= m_size); - uint32_t page_num = paddr.getAddress() >> 12; - if (m_pages[page_num] == 0) { - bool all_zeros = true; - for (int i = 0; i < len;i++) { - if (data[i] != 0) { - all_zeros = false; - break; - } - } - if (all_zeros) - return; - m_pages[page_num] = new uint8_t[PAGE_SIZE]; - memset(m_pages[page_num], 0, PAGE_SIZE); - uint32_t offset = paddr.getAddress() & m_page_offset_mask; - memcpy(&m_pages[page_num][offset], data, len); - } else { - memcpy(&m_pages[page_num][paddr.getAddress()&m_page_offset_mask], - data, len); - } -} - -inline uint8_t* -MemoryVector::read(const Address & paddr, uint8_t *data, int len) -{ - assert(paddr.getAddress() + len <= m_size); - uint32_t page_num = paddr.getAddress() >> 12; - if (m_pages[page_num] == 0) { - memset(data, 0, len); - } else { - memcpy(data, &m_pages[page_num][paddr.getAddress()&m_page_offset_mask], - len); - } - return data; -} - -inline uint8_t* -MemoryVector::getBlockPtr(const PhysAddress & paddr) -{ - uint32_t page_num = paddr.getAddress() >> 12; - if (m_pages[page_num] == 0) { - m_pages[page_num] = new uint8_t[PAGE_SIZE]; - memset(m_pages[page_num], 0, PAGE_SIZE); - } - return &m_pages[page_num][paddr.getAddress()&m_page_offset_mask]; -} - -/*! - * Function for collating all the pages of the physical memory together. - * In case a pointer for a page is NULL, this page needs only a single byte - * to represent that the pointer is NULL. Otherwise, it needs 1 + PAGE_SIZE - * bytes. The first represents that the page pointer is not NULL, and rest of - * the bytes represent the data on the page. - */ - -inline uint32_t -MemoryVector::collatePages(uint8_t *&raw_data) -{ - uint32_t num_zero_pages = 0; - uint32_t data_size = 0; - - for (uint32_t i = 0;i < m_num_pages; ++i) - { - if (m_pages[i] == 0) num_zero_pages++; - } - - raw_data = new uint8_t[sizeof(uint32_t) /* number of pages*/ + - m_num_pages /* whether the page is all zeros */ + - PAGE_SIZE * (m_num_pages - num_zero_pages)]; - - /* Write the number of pages to be stored. */ - memcpy(raw_data, &m_num_pages, sizeof(uint32_t)); - data_size = sizeof(uint32_t); - - DPRINTF(RubyCacheTrace, "collating %d pages\n", m_num_pages); - - for (uint32_t i = 0;i < m_num_pages; ++i) - { - if (m_pages[i] == 0) { - raw_data[data_size] = 0; - } else { - raw_data[data_size] = 1; - memcpy(raw_data + data_size + 1, m_pages[i], PAGE_SIZE); - data_size += PAGE_SIZE; - } - data_size += 1; - } - - return data_size; -} - -/*! - * Function for populating the pages of the memory using the available raw - * data. Each page has a byte associate with it, which represents whether the - * page was NULL or not, when all the pages were collated. The function assumes - * that the number of pages in the memory are same as those that were recorded - * in the checkpoint. - */ -inline void -MemoryVector::populatePages(uint8_t *raw_data) -{ - uint32_t data_size = 0; - uint32_t num_pages = 0; - - /* Read the number of pages that were stored. */ - memcpy(&num_pages, raw_data, sizeof(uint32_t)); - data_size = sizeof(uint32_t); - assert(num_pages == m_num_pages); - - DPRINTF(RubyCacheTrace, "Populating %d pages\n", num_pages); - - for (uint32_t i = 0;i < m_num_pages; ++i) - { - assert(m_pages[i] == 0); - if (raw_data[data_size] != 0) { - m_pages[i] = new uint8_t[PAGE_SIZE]; - memcpy(m_pages[i], raw_data + data_size + 1, PAGE_SIZE); - data_size += PAGE_SIZE; - } - data_size += 1; - } -} - -#endif // __MEM_RUBY_SYSTEM_MEMORYVECTOR_HH__ diff --git a/src/mem/ruby/system/PerfectCacheMemory.hh b/src/mem/ruby/system/PerfectCacheMemory.hh deleted file mode 100644 index b56543c41..000000000 --- a/src/mem/ruby/system/PerfectCacheMemory.hh +++ /dev/null @@ -1,192 +0,0 @@ -/* - * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer; - * redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution; - * neither the name of the copyright holders nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#ifndef __MEM_RUBY_SYSTEM_PERFECTCACHEMEMORY_HH__ -#define __MEM_RUBY_SYSTEM_PERFECTCACHEMEMORY_HH__ - -#include "base/hashmap.hh" -#include "mem/protocol/AccessPermission.hh" -#include "mem/ruby/common/Address.hh" - -template -struct PerfectCacheLineState -{ - PerfectCacheLineState() { m_permission = AccessPermission_NUM; } - AccessPermission m_permission; - ENTRY m_entry; -}; - -template -inline std::ostream& -operator<<(std::ostream& out, const PerfectCacheLineState& obj) -{ - return out; -} - -template -class PerfectCacheMemory -{ - public: - PerfectCacheMemory(); - - // tests to see if an address is present in the cache - bool isTagPresent(const Address& address) const; - - // Returns true if there is: - // a) a tag match on this address or there is - // b) an Invalid line in the same cache "way" - bool cacheAvail(const Address& address) const; - - // find an Invalid entry and sets the tag appropriate for the address - void allocate(const Address& address); - - void deallocate(const Address& address); - - // Returns with the physical address of the conflicting cache line - Address cacheProbe(const Address& newAddress) const; - - // looks an address up in the cache - ENTRY& lookup(const Address& address); - const ENTRY& lookup(const Address& address) const; - - // Get/Set permission of cache block - AccessPermission getPermission(const Address& address) const; - void changePermission(const Address& address, AccessPermission new_perm); - - // Print cache contents - void print(std::ostream& out) const; - - private: - // Private copy constructor and assignment operator - PerfectCacheMemory(const PerfectCacheMemory& obj); - PerfectCacheMemory& operator=(const PerfectCacheMemory& obj); - - // Data Members (m_prefix) - m5::hash_map > m_map; -}; - -template -inline std::ostream& -operator<<(std::ostream& out, const PerfectCacheMemory& obj) -{ - obj.print(out); - out << std::flush; - return out; -} - -template -inline -PerfectCacheMemory::PerfectCacheMemory() -{ -} - -// tests to see if an address is present in the cache -template -inline bool -PerfectCacheMemory::isTagPresent(const Address& address) const -{ - return m_map.count(line_address(address)) > 0; -} - -template -inline bool -PerfectCacheMemory::cacheAvail(const Address& address) const -{ - return true; -} - -// find an Invalid or already allocated entry and sets the tag -// appropriate for the address -template -inline void -PerfectCacheMemory::allocate(const Address& address) -{ - PerfectCacheLineState line_state; - line_state.m_permission = AccessPermission_Invalid; - line_state.m_entry = ENTRY(); - m_map[line_address(address)] = line_state; -} - -// deallocate entry -template -inline void -PerfectCacheMemory::deallocate(const Address& address) -{ - m_map.erase(line_address(address)); -} - -// Returns with the physical address of the conflicting cache line -template -inline Address -PerfectCacheMemory::cacheProbe(const Address& newAddress) const -{ - panic("cacheProbe called in perfect cache"); - return newAddress; -} - -// looks an address up in the cache -template -inline ENTRY& -PerfectCacheMemory::lookup(const Address& address) -{ - return m_map[line_address(address)].m_entry; -} - -// looks an address up in the cache -template -inline const ENTRY& -PerfectCacheMemory::lookup(const Address& address) const -{ - return m_map[line_address(address)].m_entry; -} - -template -inline AccessPermission -PerfectCacheMemory::getPermission(const Address& address) const -{ - return m_map[line_address(address)].m_permission; -} - -template -inline void -PerfectCacheMemory::changePermission(const Address& address, - AccessPermission new_perm) -{ - Address line_address = address; - line_address.makeLineAddress(); - PerfectCacheLineState& line_state = m_map[line_address]; - line_state.m_permission = new_perm; -} - -template -inline void -PerfectCacheMemory::print(std::ostream& out) const -{ -} - -#endif // __MEM_RUBY_SYSTEM_PERFECTCACHEMEMORY_HH__ diff --git a/src/mem/ruby/system/PersistentTable.cc b/src/mem/ruby/system/PersistentTable.cc deleted file mode 100644 index c60d39b8a..000000000 --- a/src/mem/ruby/system/PersistentTable.cc +++ /dev/null @@ -1,219 +0,0 @@ -/* - * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer; - * redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution; - * neither the name of the copyright holders nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#include "mem/ruby/system/PersistentTable.hh" - -using namespace std; - -// randomize so that handoffs are not locality-aware -#if 0 -int persistent_randomize[] = {0, 4, 8, 12, 1, 5, 9, 13, 2, 6, - 10, 14, 3, 7, 11, 15}; -int persistent_randomize[] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, - 10, 11, 12, 13, 14, 15}; -#endif - -PersistentTable::PersistentTable() -{ -} - -PersistentTable::~PersistentTable() -{ -} - -void -PersistentTable::persistentRequestLock(const Address& address, - MachineID locker, - AccessType type) -{ -#if 0 - if (locker == m_chip_ptr->getID()) - cout << "Chip " << m_chip_ptr->getID() << ": " << llocker - << " requesting lock for " << address << endl; - - MachineID locker = (MachineID) persistent_randomize[llocker]; -#endif - - assert(address == line_address(address)); - - static const PersistentTableEntry dflt; - pair r = - m_map.insert(AddressMap::value_type(address, dflt)); - bool present = !r.second; - AddressMap::iterator i = r.first; - PersistentTableEntry &entry = i->second; - - if (present) { - // Make sure we're not already in the locked set - assert(!(entry.m_starving.isElement(locker))); - } - - entry.m_starving.add(locker); - if (type == AccessType_Write) - entry.m_request_to_write.add(locker); - - if (present) - assert(entry.m_marked.isSubset(entry.m_starving)); -} - -void -PersistentTable::persistentRequestUnlock(const Address& address, - MachineID unlocker) -{ -#if 0 - if (unlocker == m_chip_ptr->getID()) - cout << "Chip " << m_chip_ptr->getID() << ": " << uunlocker - << " requesting unlock for " << address << endl; - - MachineID unlocker = (MachineID) persistent_randomize[uunlocker]; -#endif - - assert(address == line_address(address)); - assert(m_map.count(address)); - PersistentTableEntry& entry = m_map[address]; - - // - // Make sure we're in the locked set - // - assert(entry.m_starving.isElement(unlocker)); - assert(entry.m_marked.isSubset(entry.m_starving)); - entry.m_starving.remove(unlocker); - entry.m_marked.remove(unlocker); - entry.m_request_to_write.remove(unlocker); - assert(entry.m_marked.isSubset(entry.m_starving)); - - // Deallocate if empty - if (entry.m_starving.isEmpty()) { - assert(entry.m_marked.isEmpty()); - m_map.erase(address); - } -} - -bool -PersistentTable::okToIssueStarving(const Address& address, - MachineID machId) const -{ - assert(address == line_address(address)); - - AddressMap::const_iterator i = m_map.find(address); - if (i == m_map.end()) { - // No entry present - return true; - } - - const PersistentTableEntry &entry = i->second; - - if (entry.m_starving.isElement(machId)) { - // We can't issue another lockdown until are previous unlock - // has occurred - return false; - } - - return entry.m_marked.isEmpty(); -} - -MachineID -PersistentTable::findSmallest(const Address& address) const -{ - assert(address == line_address(address)); - AddressMap::const_iterator i = m_map.find(address); - assert(i != m_map.end()); - const PersistentTableEntry& entry = i->second; - return entry.m_starving.smallestElement(); -} - -AccessType -PersistentTable::typeOfSmallest(const Address& address) const -{ - assert(address == line_address(address)); - AddressMap::const_iterator i = m_map.find(address); - assert(i != m_map.end()); - const PersistentTableEntry& entry = i->second; - if (entry.m_request_to_write. - isElement(entry.m_starving.smallestElement())) { - return AccessType_Write; - } else { - return AccessType_Read; - } -} - -void -PersistentTable::markEntries(const Address& address) -{ - assert(address == line_address(address)); - AddressMap::iterator i = m_map.find(address); - if (i == m_map.end()) - return; - - PersistentTableEntry& entry = i->second; - - // None should be marked - assert(entry.m_marked.isEmpty()); - - // Mark all the nodes currently in the table - entry.m_marked = entry.m_starving; -} - -bool -PersistentTable::isLocked(const Address& address) const -{ - assert(address == line_address(address)); - - // If an entry is present, it must be locked - return m_map.count(address) > 0; -} - -int -PersistentTable::countStarvingForAddress(const Address& address) const -{ - assert(address == line_address(address)); - AddressMap::const_iterator i = m_map.find(address); - if (i == m_map.end()) - return 0; - - const PersistentTableEntry& entry = i->second; - return entry.m_starving.count(); -} - -int -PersistentTable::countReadStarvingForAddress(const Address& address) const -{ - assert(address == line_address(address)); - AddressMap::const_iterator i = m_map.find(address); - if (i == m_map.end()) - return 0; - - const PersistentTableEntry& entry = i->second; - return entry.m_starving.count() - entry.m_request_to_write.count(); -} - -void -PersistentTable::print(ostream& out) const -{ -} - diff --git a/src/mem/ruby/system/PersistentTable.hh b/src/mem/ruby/system/PersistentTable.hh deleted file mode 100644 index f634c35d1..000000000 --- a/src/mem/ruby/system/PersistentTable.hh +++ /dev/null @@ -1,100 +0,0 @@ -/* - * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer; - * redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution; - * neither the name of the copyright holders nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#ifndef __MEM_RUBY_SYSTEM_PERSISTENTTABLE_HH__ -#define __MEM_RUBY_SYSTEM_PERSISTENTTABLE_HH__ - -#include - -#include "base/hashmap.hh" -#include "mem/protocol/AccessType.hh" -#include "mem/ruby/common/Address.hh" -#include "mem/ruby/common/NetDest.hh" -#include "mem/ruby/system/MachineID.hh" - -class PersistentTableEntry -{ - public: - PersistentTableEntry() {} - void print(std::ostream& out) const {} - - NetDest m_starving; - NetDest m_marked; - NetDest m_request_to_write; -}; - -class PersistentTable -{ - public: - // Constructors - PersistentTable(); - - // Destructor - ~PersistentTable(); - - // Public Methods - void persistentRequestLock(const Address& address, MachineID locker, - AccessType type); - void persistentRequestUnlock(const Address& address, MachineID unlocker); - bool okToIssueStarving(const Address& address, MachineID machID) const; - MachineID findSmallest(const Address& address) const; - AccessType typeOfSmallest(const Address& address) const; - void markEntries(const Address& address); - bool isLocked(const Address& addr) const; - int countStarvingForAddress(const Address& addr) const; - int countReadStarvingForAddress(const Address& addr) const; - - void print(std::ostream& out) const; - - private: - // Private copy constructor and assignment operator - PersistentTable(const PersistentTable& obj); - PersistentTable& operator=(const PersistentTable& obj); - - // Data Members (m_prefix) - typedef m5::hash_map AddressMap; - AddressMap m_map; -}; - -inline std::ostream& -operator<<(std::ostream& out, const PersistentTable& obj) -{ - obj.print(out); - out << std::flush; - return out; -} - -inline std::ostream& -operator<<(std::ostream& out, const PersistentTableEntry& obj) -{ - obj.print(out); - out << std::flush; - return out; -} - -#endif // __MEM_RUBY_SYSTEM_PERSISTENTTABLE_HH__ diff --git a/src/mem/ruby/system/PseudoLRUPolicy.hh b/src/mem/ruby/system/PseudoLRUPolicy.hh deleted file mode 100644 index 4b6ba0db6..000000000 --- a/src/mem/ruby/system/PseudoLRUPolicy.hh +++ /dev/null @@ -1,137 +0,0 @@ -/* - * Copyright (c) 2007 Mark D. Hill and David A. Wood - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer; - * redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution; - * neither the name of the copyright holders nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#ifndef __MEM_RUBY_SYSTEM_PSEUDOLRUPOLICY_HH__ -#define __MEM_RUBY_SYSTEM_PSEUDOLRUPOLICY_HH__ - -#include "mem/ruby/system/AbstractReplacementPolicy.hh" - -/** - * Implementation of tree-based pseudo-LRU replacement - * - * Works for any associativity between 1 and 128. - * - * Also implements associativities that are not a power of 2 by - * ignoring paths that lead to a larger index (i.e. truncating the - * tree). Note that when this occurs, the algorithm becomes less - * fair, as it will favor indicies in the larger (by index) half of - * the associative set. This is most unfair when the nearest power of - * 2 is one below the associativy, and most fair when it is one above. - */ - -class PseudoLRUPolicy : public AbstractReplacementPolicy -{ - public: - PseudoLRUPolicy(Index num_sets, Index assoc); - ~PseudoLRUPolicy(); - - void touch(Index set, Index way, Tick time); - Index getVictim(Index set) const; - - private: - unsigned int m_effective_assoc; /** nearest (to ceiling) power of 2 */ - unsigned int m_num_levels; /** number of levels in the tree */ - uint64* m_trees; /** bit representation of the - * trees, one for each set */ -}; - -inline -PseudoLRUPolicy::PseudoLRUPolicy(Index num_sets, Index assoc) - : AbstractReplacementPolicy(num_sets, assoc) -{ - // associativity cannot exceed capacity of tree representation - assert(num_sets > 0 && assoc > 1 && assoc <= (Index) sizeof(uint64)*4); - - m_trees = NULL; - m_num_levels = 0; - - m_effective_assoc = 1; - while (m_effective_assoc < assoc) { - // effective associativity is ceiling power of 2 - m_effective_assoc <<= 1; - } - assoc = m_effective_assoc; - while (true) { - assoc /= 2; - if(!assoc) break; - m_num_levels++; - } - assert(m_num_levels < sizeof(unsigned int)*4); - m_trees = new uint64[m_num_sets]; - for (unsigned i = 0; i < m_num_sets; i++) { - m_trees[i] = 0; - } -} - -inline -PseudoLRUPolicy::~PseudoLRUPolicy() -{ - if (m_trees != NULL) - delete[] m_trees; -} - -inline void -PseudoLRUPolicy::touch(Index set, Index index, Tick time) -{ - assert(index >= 0 && index < m_assoc); - assert(set >= 0 && set < m_num_sets); - - int tree_index = 0; - int node_val; - for (int i = m_num_levels - 1; i >= 0; i--) { - node_val = (index >> i)&1; - if (node_val) - m_trees[set] |= node_val << tree_index; - else - m_trees[set] &= ~(1 << tree_index); - tree_index = node_val ? (tree_index*2)+2 : (tree_index*2)+1; - } - m_last_ref_ptr[set][index] = time; -} - -inline Index -PseudoLRUPolicy::getVictim(Index set) const -{ - // assert(m_assoc != 0); - Index index = 0; - - int tree_index = 0; - int node_val; - for (unsigned i = 0; i < m_num_levels; i++){ - node_val = (m_trees[set] >> tree_index) & 1; - index += node_val ? 0 : (m_effective_assoc >> (i + 1)); - tree_index = node_val ? (tree_index * 2) + 1 : (tree_index * 2) + 2; - } - assert(index >= 0 && index < m_effective_assoc); - - /* return either the found index or the max possible index */ - /* NOTE: this is not a fair replacement when assoc is not a power of 2 */ - return (index > (m_assoc - 1)) ? m_assoc - 1 : index; -} - -#endif // __MEM_RUBY_SYSTEM_PSEUDOLRUPOLICY_HH__ diff --git a/src/mem/ruby/system/RubyMemoryControl.cc b/src/mem/ruby/system/RubyMemoryControl.cc deleted file mode 100644 index a13d3cd3b..000000000 --- a/src/mem/ruby/system/RubyMemoryControl.cc +++ /dev/null @@ -1,791 +0,0 @@ -/* - * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood - * Copyright (c) 2012 Advanced Micro Devices, Inc. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer; - * redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution; - * neither the name of the copyright holders nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -/* - * Description: This module simulates a basic DDR-style memory controller - * (and can easily be extended to do FB-DIMM as well). - * - * This module models a single channel, connected to any number of - * DIMMs with any number of ranks of DRAMs each. If you want multiple - * address/data channels, you need to instantiate multiple copies of - * this module. - * - * Each memory request is placed in a queue associated with a specific - * memory bank. This queue is of finite size; if the queue is full - * the request will back up in an (infinite) common queue and will - * effectively throttle the whole system. This sort of behavior is - * intended to be closer to real system behavior than if we had an - * infinite queue on each bank. If you want the latter, just make - * the bank queues unreasonably large. - * - * The head item on a bank queue is issued when all of the - * following are true: - * the bank is available - * the address path to the DIMM is available - * the data path to or from the DIMM is available - * - * Note that we are not concerned about fixed offsets in time. The bank - * will not be used at the same moment as the address path, but since - * there is no queue in the DIMM or the DRAM it will be used at a constant - * number of cycles later, so it is treated as if it is used at the same - * time. - * - * We are assuming closed bank policy; that is, we automatically close - * each bank after a single read or write. Adding an option for open - * bank policy is for future work. - * - * We are assuming "posted CAS"; that is, we send the READ or WRITE - * immediately after the ACTIVATE. This makes scheduling the address - * bus trivial; we always schedule a fixed set of cycles. For DDR-400, - * this is a set of two cycles; for some configurations such as - * DDR-800 the parameter tRRD forces this to be set to three cycles. - * - * We assume a four-bit-time transfer on the data wires. This is - * the minimum burst length for DDR-2. This would correspond - * to (for example) a memory where each DIMM is 72 bits wide - * and DIMMs are ganged in pairs to deliver 64 bytes at a shot. - * This gives us the same occupancy on the data wires as on the - * address wires (for the two-address-cycle case). - * - * The only non-trivial scheduling problem is the data wires. - * A write will use the wires earlier in the operation than a read - * will; typically one cycle earlier as seen at the DRAM, but earlier - * by a worst-case round-trip wire delay when seen at the memory controller. - * So, while reads from one rank can be scheduled back-to-back - * every two cycles, and writes (to any rank) scheduled every two cycles, - * when a read is followed by a write we need to insert a bubble. - * Furthermore, consecutive reads from two different ranks may need - * to insert a bubble due to skew between when one DRAM stops driving the - * wires and when the other one starts. (These bubbles are parameters.) - * - * This means that when some number of reads and writes are at the - * heads of their queues, reads could starve writes, and/or reads - * to the same rank could starve out other requests, since the others - * would never see the data bus ready. - * For this reason, we have implemented an anti-starvation feature. - * A group of requests is marked "old", and a counter is incremented - * each cycle as long as any request from that batch has not issued. - * if the counter reaches twice the bank busy time, we hold off any - * newer requests until all of the "old" requests have issued. - * - * We also model tFAW. This is an obscure DRAM parameter that says - * that no more than four activate requests can happen within a window - * of a certain size. For most configurations this does not come into play, - * or has very little effect, but it could be used to throttle the power - * consumption of the DRAM. In this implementation (unlike in a DRAM - * data sheet) TFAW is measured in memory bus cycles; i.e. if TFAW = 16 - * then no more than four activates may happen within any 16 cycle window. - * Refreshes are included in the activates. - * - */ - -#include "base/cast.hh" -#include "base/cprintf.hh" -#include "debug/RubyMemory.hh" -#include "mem/ruby/common/Address.hh" -#include "mem/ruby/common/Global.hh" -#include "mem/ruby/profiler/Profiler.hh" -#include "mem/ruby/slicc_interface/NetworkMessage.hh" -#include "mem/ruby/slicc_interface/RubySlicc_ComponentMapping.hh" -#include "mem/ruby/system/RubyMemoryControl.hh" -#include "mem/ruby/system/System.hh" - -using namespace std; - -// Value to reset watchdog timer to. -// If we're idle for this many memory control cycles, -// shut down our clock (our rescheduling of ourselves). -// Refresh shuts down as well. -// When we restart, we'll be in a different phase -// with respect to ruby cycles, so this introduces -// a slight inaccuracy. But it is necessary or the -// ruby tester never terminates because the event -// queue is never empty. -#define IDLECOUNT_MAX_VALUE 1000 - -// Output operator definition - -ostream& -operator<<(ostream& out, const RubyMemoryControl& obj) -{ - obj.print(out); - out << flush; - return out; -} - - -// **************************************************************** - -// CONSTRUCTOR -RubyMemoryControl::RubyMemoryControl(const Params *p) - : MemoryControl(p) -{ - m_banks_per_rank = p->banks_per_rank; - m_ranks_per_dimm = p->ranks_per_dimm; - m_dimms_per_channel = p->dimms_per_channel; - m_bank_bit_0 = p->bank_bit_0; - m_rank_bit_0 = p->rank_bit_0; - m_dimm_bit_0 = p->dimm_bit_0; - m_bank_queue_size = p->bank_queue_size; - m_bank_busy_time = p->bank_busy_time; - m_rank_rank_delay = p->rank_rank_delay; - m_read_write_delay = p->read_write_delay; - m_basic_bus_busy_time = p->basic_bus_busy_time; - m_mem_ctl_latency = p->mem_ctl_latency; - m_refresh_period = p->refresh_period; - m_tFaw = p->tFaw; - m_mem_random_arbitrate = p->mem_random_arbitrate; - m_mem_fixed_delay = p->mem_fixed_delay; - - m_profiler_ptr = new MemCntrlProfiler(name(), - m_banks_per_rank, - m_ranks_per_dimm, - m_dimms_per_channel); -} - -void -RubyMemoryControl::init() -{ - m_msg_counter = 0; - - assert(m_tFaw <= 62); // must fit in a uint64 shift register - - m_total_banks = m_banks_per_rank * m_ranks_per_dimm * m_dimms_per_channel; - m_total_ranks = m_ranks_per_dimm * m_dimms_per_channel; - m_refresh_period_system = m_refresh_period / m_total_banks; - - m_bankQueues = new list [m_total_banks]; - assert(m_bankQueues); - - m_bankBusyCounter = new int [m_total_banks]; - assert(m_bankBusyCounter); - - m_oldRequest = new int [m_total_banks]; - assert(m_oldRequest); - - for (int i = 0; i < m_total_banks; i++) { - m_bankBusyCounter[i] = 0; - m_oldRequest[i] = 0; - } - - m_busBusyCounter_Basic = 0; - m_busBusyCounter_Write = 0; - m_busBusyCounter_ReadNewRank = 0; - m_busBusy_WhichRank = 0; - - m_roundRobin = 0; - m_refresh_count = 1; - m_need_refresh = 0; - m_refresh_bank = 0; - m_idleCount = 0; - m_ageCounter = 0; - - // Each tfaw shift register keeps a moving bit pattern - // which shows when recent activates have occurred. - // m_tfaw_count keeps track of how many 1 bits are set - // in each shift register. When m_tfaw_count is >= 4, - // new activates are not allowed. - m_tfaw_shift = new uint64[m_total_ranks]; - m_tfaw_count = new int[m_total_ranks]; - for (int i = 0; i < m_total_ranks; i++) { - m_tfaw_shift[i] = 0; - m_tfaw_count[i] = 0; - } -} - -void -RubyMemoryControl::reset() -{ - m_msg_counter = 0; - - assert(m_tFaw <= 62); // must fit in a uint64 shift register - - m_total_banks = m_banks_per_rank * m_ranks_per_dimm * m_dimms_per_channel; - m_total_ranks = m_ranks_per_dimm * m_dimms_per_channel; - m_refresh_period_system = m_refresh_period / m_total_banks; - - assert(m_bankQueues); - - assert(m_bankBusyCounter); - - assert(m_oldRequest); - - for (int i = 0; i < m_total_banks; i++) { - m_bankBusyCounter[i] = 0; - m_oldRequest[i] = 0; - } - - m_busBusyCounter_Basic = 0; - m_busBusyCounter_Write = 0; - m_busBusyCounter_ReadNewRank = 0; - m_busBusy_WhichRank = 0; - - m_roundRobin = 0; - m_refresh_count = 1; - m_need_refresh = 0; - m_refresh_bank = 0; - m_idleCount = 0; - m_ageCounter = 0; - - // Each tfaw shift register keeps a moving bit pattern - // which shows when recent activates have occurred. - // m_tfaw_count keeps track of how many 1 bits are set - // in each shift register. When m_tfaw_count is >= 4, - // new activates are not allowed. - for (int i = 0; i < m_total_ranks; i++) { - m_tfaw_shift[i] = 0; - m_tfaw_count[i] = 0; - } -} - -RubyMemoryControl::~RubyMemoryControl() -{ - delete [] m_bankQueues; - delete [] m_bankBusyCounter; - delete [] m_oldRequest; - delete m_profiler_ptr; -} - -// enqueue new request from directory -void -RubyMemoryControl::enqueue(const MsgPtr& message, Cycles latency) -{ - Cycles arrival_time = curCycle() + latency; - const MemoryMsg* memMess = safe_cast(message.get()); - physical_address_t addr = memMess->getAddr().getAddress(); - MemoryRequestType type = memMess->getType(); - bool is_mem_read = (type == MemoryRequestType_MEMORY_READ); - MemoryNode *thisReq = new MemoryNode(arrival_time, message, addr, - is_mem_read, !is_mem_read); - enqueueMemRef(thisReq); -} - -// Alternate entry point used when we already have a MemoryNode -// structure built. -void -RubyMemoryControl::enqueueMemRef(MemoryNode *memRef) -{ - m_msg_counter++; - memRef->m_msg_counter = m_msg_counter; - physical_address_t addr = memRef->m_addr; - int bank = getBank(addr); - - DPRINTF(RubyMemory, - "New memory request%7d: %#08x %c arrived at %10d bank = %3x sched %c\n", - m_msg_counter, addr, memRef->m_is_mem_read ? 'R':'W', - memRef->m_time * g_system_ptr->clockPeriod(), - bank, m_event.scheduled() ? 'Y':'N'); - - m_profiler_ptr->profileMemReq(bank); - m_input_queue.push_back(memRef); - - if (!m_event.scheduled()) { - schedule(m_event, clockEdge()); - } -} - -// dequeue, peek, and isReady are used to transfer completed requests -// back to the directory -void -RubyMemoryControl::dequeue() -{ - assert(isReady()); - MemoryNode *req = m_response_queue.front(); - m_response_queue.pop_front(); - delete req; -} - -const Message* -RubyMemoryControl::peek() -{ - MemoryNode *node = peekNode(); - Message* msg_ptr = node->m_msgptr.get(); - assert(msg_ptr != NULL); - return msg_ptr; -} - -MemoryNode * -RubyMemoryControl::peekNode() -{ - assert(isReady()); - MemoryNode *req = m_response_queue.front(); - DPRINTF(RubyMemory, "Peek: memory request%7d: %#08x %c sched %c\n", - req->m_msg_counter, req->m_addr, req->m_is_mem_read ? 'R':'W', - m_event.scheduled() ? 'Y':'N'); - - return req; -} - -bool -RubyMemoryControl::isReady() -{ - return ((!m_response_queue.empty()) && - (m_response_queue.front()->m_time <= g_system_ptr->curCycle())); -} - -void -RubyMemoryControl::setConsumer(Consumer* consumer_ptr) -{ - m_consumer_ptr = consumer_ptr; -} - -void -RubyMemoryControl::print(ostream& out) const -{ -} - -// Queue up a completed request to send back to directory -void -RubyMemoryControl::enqueueToDirectory(MemoryNode *req, Cycles latency) -{ - Tick arrival_time = clockEdge(latency); - Cycles ruby_arrival_time = g_system_ptr->ticksToCycles(arrival_time); - req->m_time = ruby_arrival_time; - m_response_queue.push_back(req); - - DPRINTF(RubyMemory, "Enqueueing msg %#08x %c back to directory at %15d\n", - req->m_addr, req->m_is_mem_read ? 'R':'W', arrival_time); - - // schedule the wake up - m_consumer_ptr->scheduleEventAbsolute(arrival_time); -} - -// getBank returns an integer that is unique for each -// bank across this memory controller. -const int -RubyMemoryControl::getBank(const physical_address_t addr) const -{ - int dimm = (addr >> m_dimm_bit_0) & (m_dimms_per_channel - 1); - int rank = (addr >> m_rank_bit_0) & (m_ranks_per_dimm - 1); - int bank = (addr >> m_bank_bit_0) & (m_banks_per_rank - 1); - return (dimm * m_ranks_per_dimm * m_banks_per_rank) - + (rank * m_banks_per_rank) - + bank; -} - -const int -RubyMemoryControl::getRank(const physical_address_t addr) const -{ - int bank = getBank(addr); - int rank = (bank / m_banks_per_rank); - assert (rank < (m_ranks_per_dimm * m_dimms_per_channel)); - return rank; -} - -// getRank returns an integer that is unique for each rank -// and independent of individual bank. -const int -RubyMemoryControl::getRank(int bank) const -{ - int rank = (bank / m_banks_per_rank); - assert (rank < (m_ranks_per_dimm * m_dimms_per_channel)); - return rank; -} - -// Not used! -const int -RubyMemoryControl::getChannel(const physical_address_t addr) const -{ - assert(false); - return -1; -} - -// Not used! -const int -RubyMemoryControl::getRow(const physical_address_t addr) const -{ - assert(false); - return -1; -} - -// queueReady determines if the head item in a bank queue -// can be issued this cycle -bool -RubyMemoryControl::queueReady(int bank) -{ - if ((m_bankBusyCounter[bank] > 0) && !m_mem_fixed_delay) { - m_profiler_ptr->profileMemBankBusy(); - - DPRINTF(RubyMemory, "bank %x busy %d\n", bank, m_bankBusyCounter[bank]); - return false; - } - - if (m_mem_random_arbitrate >= 2) { - if ((random() % 100) < m_mem_random_arbitrate) { - m_profiler_ptr->profileMemRandBusy(); - return false; - } - } - - if (m_mem_fixed_delay) - return true; - - if ((m_ageCounter > (2 * m_bank_busy_time)) && !m_oldRequest[bank]) { - m_profiler_ptr->profileMemNotOld(); - return false; - } - - if (m_busBusyCounter_Basic == m_basic_bus_busy_time) { - // Another bank must have issued this same cycle. For - // profiling, we count this as an arb wait rather than a bus - // wait. This is a little inaccurate since it MIGHT have also - // been blocked waiting for a read-write or a read-read - // instead, but it's pretty close. - m_profiler_ptr->profileMemArbWait(1); - return false; - } - - if (m_busBusyCounter_Basic > 0) { - m_profiler_ptr->profileMemBusBusy(); - return false; - } - - int rank = getRank(bank); - if (m_tfaw_count[rank] >= ACTIVATE_PER_TFAW) { - m_profiler_ptr->profileMemTfawBusy(); - return false; - } - - bool write = !m_bankQueues[bank].front()->m_is_mem_read; - if (write && (m_busBusyCounter_Write > 0)) { - m_profiler_ptr->profileMemReadWriteBusy(); - return false; - } - - if (!write && (rank != m_busBusy_WhichRank) - && (m_busBusyCounter_ReadNewRank > 0)) { - m_profiler_ptr->profileMemDataBusBusy(); - return false; - } - - return true; -} - -// issueRefresh checks to see if this bank has a refresh scheduled -// and, if so, does the refresh and returns true -bool -RubyMemoryControl::issueRefresh(int bank) -{ - if (!m_need_refresh || (m_refresh_bank != bank)) - return false; - if (m_bankBusyCounter[bank] > 0) - return false; - // Note that m_busBusyCounter will prevent multiple issues during - // the same cycle, as well as on different but close cycles: - if (m_busBusyCounter_Basic > 0) - return false; - int rank = getRank(bank); - if (m_tfaw_count[rank] >= ACTIVATE_PER_TFAW) - return false; - - // Issue it: - DPRINTF(RubyMemory, "Refresh bank %3x\n", bank); - - m_profiler_ptr->profileMemRefresh(); - m_need_refresh--; - m_refresh_bank++; - if (m_refresh_bank >= m_total_banks) - m_refresh_bank = 0; - m_bankBusyCounter[bank] = m_bank_busy_time; - m_busBusyCounter_Basic = m_basic_bus_busy_time; - m_busBusyCounter_Write = m_basic_bus_busy_time; - m_busBusyCounter_ReadNewRank = m_basic_bus_busy_time; - markTfaw(rank); - return true; -} - -// Mark the activate in the tFaw shift register -void -RubyMemoryControl::markTfaw(int rank) -{ - if (m_tFaw) { - m_tfaw_shift[rank] |= (1 << (m_tFaw-1)); - m_tfaw_count[rank]++; - } -} - -// Issue a memory request: Activate the bank, reserve the address and -// data buses, and queue the request for return to the requesting -// processor after a fixed latency. -void -RubyMemoryControl::issueRequest(int bank) -{ - int rank = getRank(bank); - MemoryNode *req = m_bankQueues[bank].front(); - m_bankQueues[bank].pop_front(); - - DPRINTF(RubyMemory, "Mem issue request%7d: %#08x %c " - "bank=%3x sched %c\n", req->m_msg_counter, req->m_addr, - req->m_is_mem_read? 'R':'W', - bank, m_event.scheduled() ? 'Y':'N'); - - if (req->m_msgptr) { // don't enqueue L3 writebacks - enqueueToDirectory(req, Cycles(m_mem_ctl_latency + m_mem_fixed_delay)); - } - m_oldRequest[bank] = 0; - markTfaw(rank); - m_bankBusyCounter[bank] = m_bank_busy_time; - m_busBusy_WhichRank = rank; - if (req->m_is_mem_read) { - m_profiler_ptr->profileMemRead(); - m_busBusyCounter_Basic = m_basic_bus_busy_time; - m_busBusyCounter_Write = m_basic_bus_busy_time + m_read_write_delay; - m_busBusyCounter_ReadNewRank = - m_basic_bus_busy_time + m_rank_rank_delay; - } else { - m_profiler_ptr->profileMemWrite(); - m_busBusyCounter_Basic = m_basic_bus_busy_time; - m_busBusyCounter_Write = m_basic_bus_busy_time; - m_busBusyCounter_ReadNewRank = m_basic_bus_busy_time; - } -} - -// executeCycle: This function is called once per memory clock cycle -// to simulate all the periodic hardware. -void -RubyMemoryControl::executeCycle() -{ - // Keep track of time by counting down the busy counters: - for (int bank=0; bank < m_total_banks; bank++) { - if (m_bankBusyCounter[bank] > 0) m_bankBusyCounter[bank]--; - } - if (m_busBusyCounter_Write > 0) - m_busBusyCounter_Write--; - if (m_busBusyCounter_ReadNewRank > 0) - m_busBusyCounter_ReadNewRank--; - if (m_busBusyCounter_Basic > 0) - m_busBusyCounter_Basic--; - - // Count down the tFAW shift registers: - for (int rank=0; rank < m_total_ranks; rank++) { - if (m_tfaw_shift[rank] & 1) m_tfaw_count[rank]--; - m_tfaw_shift[rank] >>= 1; - } - - // After time period expires, latch an indication that we need a refresh. - // Disable refresh if in mem_fixed_delay mode. - if (!m_mem_fixed_delay) m_refresh_count--; - if (m_refresh_count == 0) { - m_refresh_count = m_refresh_period_system; - - // Are we overrunning our ability to refresh? - assert(m_need_refresh < 10); - m_need_refresh++; - } - - // If this batch of requests is all done, make a new batch: - m_ageCounter++; - int anyOld = 0; - for (int bank=0; bank < m_total_banks; bank++) { - anyOld |= m_oldRequest[bank]; - } - if (!anyOld) { - for (int bank=0; bank < m_total_banks; bank++) { - if (!m_bankQueues[bank].empty()) m_oldRequest[bank] = 1; - } - m_ageCounter = 0; - } - - // If randomness desired, re-randomize round-robin position each cycle - if (m_mem_random_arbitrate) { - m_roundRobin = random() % m_total_banks; - } - - // For each channel, scan round-robin, and pick an old, ready - // request and issue it. Treat a refresh request as if it were at - // the head of its bank queue. After we issue something, keep - // scanning the queues just to gather statistics about how many - // are waiting. If in mem_fixed_delay mode, we can issue more - // than one request per cycle. - int queueHeads = 0; - int banksIssued = 0; - for (int i = 0; i < m_total_banks; i++) { - m_roundRobin++; - if (m_roundRobin >= m_total_banks) m_roundRobin = 0; - issueRefresh(m_roundRobin); - int qs = m_bankQueues[m_roundRobin].size(); - if (qs > 1) { - m_profiler_ptr->profileMemBankQ(qs-1); - } - if (qs > 0) { - // we're not idle if anything is queued - m_idleCount = IDLECOUNT_MAX_VALUE; - queueHeads++; - if (queueReady(m_roundRobin)) { - issueRequest(m_roundRobin); - banksIssued++; - if (m_mem_fixed_delay) { - m_profiler_ptr->profileMemWaitCycles(m_mem_fixed_delay); - } - } - } - } - - // memWaitCycles is a redundant catch-all for the specific - // counters in queueReady - m_profiler_ptr->profileMemWaitCycles(queueHeads - banksIssued); - - // Check input queue and move anything to bank queues if not full. - // Since this is done here at the end of the cycle, there will - // always be at least one cycle of latency in the bank queue. We - // deliberately move at most one request per cycle (to simulate - // typical hardware). Note that if one bank queue fills up, other - // requests can get stuck behind it here. - if (!m_input_queue.empty()) { - // we're not idle if anything is pending - m_idleCount = IDLECOUNT_MAX_VALUE; - MemoryNode *req = m_input_queue.front(); - int bank = getBank(req->m_addr); - if (m_bankQueues[bank].size() < m_bank_queue_size) { - m_input_queue.pop_front(); - m_bankQueues[bank].push_back(req); - } - m_profiler_ptr->profileMemInputQ(m_input_queue.size()); - } -} - -unsigned int -RubyMemoryControl::drain(DrainManager *dm) -{ - DPRINTF(RubyMemory, "MemoryController drain\n"); - if(m_event.scheduled()) { - deschedule(m_event); - } - return 0; -} - -// wakeup: This function is called once per memory controller clock cycle. -void -RubyMemoryControl::wakeup() -{ - DPRINTF(RubyMemory, "MemoryController wakeup\n"); - // execute everything - executeCycle(); - - m_idleCount--; - if (m_idleCount > 0) { - assert(!m_event.scheduled()); - schedule(m_event, clockEdge(Cycles(1))); - } -} - -/** - * This function reads the different buffers that exist in the Ruby Memory - * Controller, and figures out if any of the buffers hold a message that - * contains the data for the address provided in the packet. True is returned - * if any of the messages was read, otherwise false is returned. - * - * I think we should move these buffers to being message buffers, instead of - * being lists. - */ -bool -RubyMemoryControl::functionalReadBuffers(Packet *pkt) -{ - for (std::list::iterator it = m_input_queue.begin(); - it != m_input_queue.end(); ++it) { - Message* msg_ptr = (*it)->m_msgptr.get(); - if (msg_ptr->functionalRead(pkt)) { - return true; - } - } - - for (std::list::iterator it = m_response_queue.begin(); - it != m_response_queue.end(); ++it) { - Message* msg_ptr = (*it)->m_msgptr.get(); - if (msg_ptr->functionalRead(pkt)) { - return true; - } - } - - for (uint32_t bank = 0; bank < m_total_banks; ++bank) { - for (std::list::iterator it = m_bankQueues[bank].begin(); - it != m_bankQueues[bank].end(); ++it) { - Message* msg_ptr = (*it)->m_msgptr.get(); - if (msg_ptr->functionalRead(pkt)) { - return true; - } - } - } - - return false; -} - -/** - * This function reads the different buffers that exist in the Ruby Memory - * Controller, and figures out if any of the buffers hold a message that - * needs to functionally written with the data in the packet. - * - * The number of messages written is returned at the end. This is required - * for debugging purposes. - */ -uint32_t -RubyMemoryControl::functionalWriteBuffers(Packet *pkt) -{ - uint32_t num_functional_writes = 0; - - for (std::list::iterator it = m_input_queue.begin(); - it != m_input_queue.end(); ++it) { - Message* msg_ptr = (*it)->m_msgptr.get(); - if (msg_ptr->functionalWrite(pkt)) { - num_functional_writes++; - } - } - - for (std::list::iterator it = m_response_queue.begin(); - it != m_response_queue.end(); ++it) { - Message* msg_ptr = (*it)->m_msgptr.get(); - if (msg_ptr->functionalWrite(pkt)) { - num_functional_writes++; - } - } - - for (uint32_t bank = 0; bank < m_total_banks; ++bank) { - for (std::list::iterator it = m_bankQueues[bank].begin(); - it != m_bankQueues[bank].end(); ++it) { - Message* msg_ptr = (*it)->m_msgptr.get(); - if (msg_ptr->functionalWrite(pkt)) { - num_functional_writes++; - } - } - } - - return num_functional_writes; -} - -void -RubyMemoryControl::regStats() -{ - m_profiler_ptr->regStats(); -} - -RubyMemoryControl * -RubyMemoryControlParams::create() -{ - return new RubyMemoryControl(this); -} diff --git a/src/mem/ruby/system/RubyMemoryControl.hh b/src/mem/ruby/system/RubyMemoryControl.hh deleted file mode 100644 index 042078db1..000000000 --- a/src/mem/ruby/system/RubyMemoryControl.hh +++ /dev/null @@ -1,172 +0,0 @@ -/* - * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood - * Copyright (c) 2012 Advanced Micro Devices, Inc. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer; - * redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution; - * neither the name of the copyright holders nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#ifndef __MEM_RUBY_SYSTEM_MEMORY_CONTROL_HH__ -#define __MEM_RUBY_SYSTEM_MEMORY_CONTROL_HH__ - -#include -#include -#include - -#include "mem/protocol/MemoryMsg.hh" -#include "mem/ruby/common/Address.hh" -#include "mem/ruby/common/Consumer.hh" -#include "mem/ruby/common/Global.hh" -#include "mem/ruby/profiler/MemCntrlProfiler.hh" -#include "mem/ruby/slicc_interface/Message.hh" -#include "mem/ruby/system/MemoryControl.hh" -#include "mem/ruby/system/MemoryNode.hh" -#include "mem/ruby/system/System.hh" -#include "params/RubyMemoryControl.hh" -#include "sim/sim_object.hh" - -// This constant is part of the definition of tFAW; see -// the comments in header to RubyMemoryControl.cc -#define ACTIVATE_PER_TFAW 4 - -////////////////////////////////////////////////////////////////////////////// - -class RubyMemoryControl : public MemoryControl -{ - public: - typedef RubyMemoryControlParams Params; - RubyMemoryControl(const Params *p); - void init(); - void reset(); - - ~RubyMemoryControl(); - - unsigned int drain(DrainManager *dm); - - void wakeup(); - - void setConsumer(Consumer* consumer_ptr); - Consumer* getConsumer() { return m_consumer_ptr; }; - void setDescription(const std::string& name) { m_description = name; }; - std::string getDescription() { return m_description; }; - - // Called from the directory: - void enqueue(const MsgPtr& message, Cycles latency); - void enqueueMemRef(MemoryNode *memRef); - void dequeue(); - const Message* peek(); - MemoryNode *peekNode(); - bool isReady(); - bool areNSlotsAvailable(int n) { return true; }; // infinite queue length - - void print(std::ostream& out) const; - void regStats(); - - const int getBank(const physical_address_t addr) const; - const int getRank(const physical_address_t addr) const; - - // not used in Ruby memory controller - const int getChannel(const physical_address_t addr) const; - const int getRow(const physical_address_t addr) const; - - //added by SS - int getBanksPerRank() { return m_banks_per_rank; }; - int getRanksPerDimm() { return m_ranks_per_dimm; }; - int getDimmsPerChannel() { return m_dimms_per_channel; } - - bool functionalReadBuffers(Packet *pkt); - uint32_t functionalWriteBuffers(Packet *pkt); - - private: - void enqueueToDirectory(MemoryNode *req, Cycles latency); - const int getRank(int bank) const; - bool queueReady(int bank); - void issueRequest(int bank); - bool issueRefresh(int bank); - void markTfaw(int rank); - void executeCycle(); - - // Private copy constructor and assignment operator - RubyMemoryControl (const RubyMemoryControl& obj); - RubyMemoryControl& operator=(const RubyMemoryControl& obj); - - // data members - Consumer* m_consumer_ptr; // Consumer to signal a wakeup() - std::string m_description; - int m_msg_counter; - - int m_banks_per_rank; - int m_ranks_per_dimm; - int m_dimms_per_channel; - int m_bank_bit_0; - int m_rank_bit_0; - int m_dimm_bit_0; - unsigned int m_bank_queue_size; - int m_bank_busy_time; - int m_rank_rank_delay; - int m_read_write_delay; - int m_basic_bus_busy_time; - Cycles m_mem_ctl_latency; - int m_refresh_period; - int m_mem_random_arbitrate; - int m_tFaw; - Cycles m_mem_fixed_delay; - - int m_total_banks; - int m_total_ranks; - int m_refresh_period_system; - - // queues where memory requests live - std::list m_response_queue; - std::list m_input_queue; - std::list* m_bankQueues; - - // Each entry indicates number of address-bus cycles until bank - // is reschedulable: - int* m_bankBusyCounter; - int* m_oldRequest; - - uint64* m_tfaw_shift; - int* m_tfaw_count; - - // Each of these indicates number of address-bus cycles until - // we can issue a new request of the corresponding type: - int m_busBusyCounter_Write; - int m_busBusyCounter_ReadNewRank; - int m_busBusyCounter_Basic; - - int m_busBusy_WhichRank; // which rank last granted - int m_roundRobin; // which bank queue was last granted - int m_refresh_count; // cycles until next refresh - int m_need_refresh; // set whenever m_refresh_count goes to zero - int m_refresh_bank; // which bank to refresh next - int m_ageCounter; // age of old requests; to detect starvation - int m_idleCount; // watchdog timer for shutting down - - MemCntrlProfiler* m_profiler_ptr; -}; - -std::ostream& operator<<(std::ostream& out, const RubyMemoryControl& obj); - -#endif // __MEM_RUBY_SYSTEM_MEMORY_CONTROL_HH__ diff --git a/src/mem/ruby/system/RubyMemoryControl.py b/src/mem/ruby/system/RubyMemoryControl.py deleted file mode 100644 index 118e4f20e..000000000 --- a/src/mem/ruby/system/RubyMemoryControl.py +++ /dev/null @@ -1,55 +0,0 @@ -# Copyright (c) 2009 Advanced Micro Devices, Inc. -# All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer; -# redistributions in binary form must reproduce the above copyright -# notice, this list of conditions and the following disclaimer in the -# documentation and/or other materials provided with the distribution; -# neither the name of the copyright holders nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -# -# Authors: Steve Reinhardt -# Brad Beckmann - -from m5.params import * -from m5.SimObject import SimObject -from MemoryControl import MemoryControl - -class RubyMemoryControl(MemoryControl): - type = 'RubyMemoryControl' - cxx_class = 'RubyMemoryControl' - cxx_header = "mem/ruby/system/RubyMemoryControl.hh" - version = Param.Int(""); - - banks_per_rank = Param.Int(8, ""); - ranks_per_dimm = Param.Int(2, ""); - dimms_per_channel = Param.Int(2, ""); - bank_bit_0 = Param.Int(8, ""); - rank_bit_0 = Param.Int(11, ""); - dimm_bit_0 = Param.Int(12, ""); - bank_queue_size = Param.Int(12, ""); - bank_busy_time = Param.Int(11, ""); - rank_rank_delay = Param.Int(1, ""); - read_write_delay = Param.Int(2, ""); - basic_bus_busy_time = Param.Int(2, ""); - mem_ctl_latency = Param.Cycles(12, ""); - refresh_period = Param.Cycles(1560, ""); - tFaw = Param.Int(0, ""); - mem_random_arbitrate = Param.Int(0, ""); - mem_fixed_delay = Param.Cycles(0, ""); diff --git a/src/mem/ruby/system/RubyPort.hh b/src/mem/ruby/system/RubyPort.hh index fffe6bb97..12e97208f 100644 --- a/src/mem/ruby/system/RubyPort.hh +++ b/src/mem/ruby/system/RubyPort.hh @@ -46,7 +46,7 @@ #include #include "mem/protocol/RequestStatus.hh" -#include "mem/ruby/buffers/MessageBuffer.hh" +#include "mem/ruby/network/MessageBuffer.hh" #include "mem/ruby/system/System.hh" #include "mem/mem_object.hh" #include "mem/tport.hh" diff --git a/src/mem/ruby/system/SConscript b/src/mem/ruby/system/SConscript index 6bb6d2707..55fe11d39 100644 --- a/src/mem/ruby/system/SConscript +++ b/src/mem/ruby/system/SConscript @@ -33,26 +33,12 @@ Import('*') if env['PROTOCOL'] == 'None': Return() -SimObject('Cache.py') SimObject('Sequencer.py') -SimObject('DirectoryMemory.py') -SimObject('MemoryControl.py') -SimObject('WireBuffer.py') SimObject('RubySystem.py') -SimObject('RubyMemoryControl.py') +Source('CacheRecorder.cc') Source('DMASequencer.cc') -Source('DirectoryMemory.cc') -Source('SparseMemory.cc') -Source('CacheMemory.cc') -Source('MemoryControl.cc') -Source('WireBuffer.cc') -Source('RubyMemoryControl.cc') -Source('MemoryNode.cc') -Source('PersistentTable.cc') Source('RubyPort.cc') Source('RubyPortProxy.cc') Source('Sequencer.cc') Source('System.cc') -Source('TimerTable.cc') -Source('BankedArray.cc') diff --git a/src/mem/ruby/system/Sequencer.hh b/src/mem/ruby/system/Sequencer.hh index 49fd8b7bb..d5cd17f5f 100644 --- a/src/mem/ruby/system/Sequencer.hh +++ b/src/mem/ruby/system/Sequencer.hh @@ -36,7 +36,7 @@ #include "mem/protocol/RubyRequestType.hh" #include "mem/protocol/SequencerRequestType.hh" #include "mem/ruby/common/Address.hh" -#include "mem/ruby/system/CacheMemory.hh" +#include "mem/ruby/structures/CacheMemory.hh" #include "mem/ruby/system/RubyPort.hh" #include "params/RubySequencer.hh" diff --git a/src/mem/ruby/system/SparseMemory.cc b/src/mem/ruby/system/SparseMemory.cc deleted file mode 100644 index a16e553a3..000000000 --- a/src/mem/ruby/system/SparseMemory.cc +++ /dev/null @@ -1,417 +0,0 @@ -/* - * Copyright (c) 2009 Advanced Micro Devices, Inc. - * Copyright (c) 2012 Mark D. Hill and David A. Wood - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer; - * redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution; - * neither the name of the copyright holders nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#include - -#include "debug/RubyCache.hh" -#include "mem/ruby/system/SparseMemory.hh" -#include "mem/ruby/system/System.hh" - -using namespace std; - -SparseMemory::SparseMemory(int number_of_levels) -{ - int even_level_bits; - int extra; - m_total_number_of_bits = RubySystem::getMemorySizeBits() - - RubySystem::getBlockSizeBits();; - - m_number_of_levels = number_of_levels; - - // - // Create the array that describes the bits per level - // - m_number_of_bits_per_level = new int[m_number_of_levels]; - even_level_bits = m_total_number_of_bits / m_number_of_levels; - extra = m_total_number_of_bits % m_number_of_levels; - for (int level = 0; level < m_number_of_levels; level++) { - if (level < extra) - m_number_of_bits_per_level[level] = even_level_bits + 1; - else - m_number_of_bits_per_level[level] = even_level_bits; - } - m_map_head = new SparseMapType; -} - -SparseMemory::~SparseMemory() -{ - recursivelyRemoveTables(m_map_head, 0); - delete m_map_head; - delete [] m_number_of_bits_per_level; -} - -// Recursively search table hierarchy for the lowest level table. -// Delete the lowest table first, the tables above -void -SparseMemory::recursivelyRemoveTables(SparseMapType* curTable, int curLevel) -{ - SparseMapType::iterator iter; - - for (iter = curTable->begin(); iter != curTable->end(); iter++) { - SparseMemEntry entry = (*iter).second; - - if (curLevel != (m_number_of_levels - 1)) { - // If the not at the last level, analyze those lower level - // tables first, then delete those next tables - SparseMapType* nextTable = (SparseMapType*)(entry); - recursivelyRemoveTables(nextTable, (curLevel + 1)); - delete nextTable; - } else { - // If at the last level, delete the directory entry - delete (AbstractEntry*)(entry); - } - entry = NULL; - } - - // Once all entries have been deleted, erase the entries - curTable->erase(curTable->begin(), curTable->end()); -} - -// tests to see if an address is present in the memory -bool -SparseMemory::exist(const Address& address) const -{ - SparseMapType* curTable = m_map_head; - Address curAddress; - - // Initiallize the high bit to be the total number of bits plus - // the block offset. However the highest bit index is one less - // than this value. - int highBit = m_total_number_of_bits + RubySystem::getBlockSizeBits(); - int lowBit; - assert(address == line_address(address)); - DPRINTF(RubyCache, "address: %s\n", address); - - for (int level = 0; level < m_number_of_levels; level++) { - // Create the appropriate sub address for this level - // Note: that set Address is inclusive of the specified range, - // thus the high bit is one less than the total number of bits - // used to create the address. - lowBit = highBit - m_number_of_bits_per_level[level]; - curAddress.setAddress(address.bitSelect(lowBit, highBit - 1)); - - DPRINTF(RubyCache, "level: %d, lowBit: %d, highBit - 1: %d, " - "curAddress: %s\n", - level, lowBit, highBit - 1, curAddress); - - // Adjust the highBit value for the next level - highBit -= m_number_of_bits_per_level[level]; - - // If the address is found, move on to the next level. - // Otherwise, return not found - if (curTable->count(curAddress) != 0) { - curTable = (SparseMapType*)((*curTable)[curAddress]); - } else { - DPRINTF(RubyCache, "Not found\n"); - return false; - } - } - - DPRINTF(RubyCache, "Entry found\n"); - return true; -} - -// add an address to memory -void -SparseMemory::add(const Address& address, AbstractEntry* entry) -{ - assert(address == line_address(address)); - assert(!exist(address)); - - m_total_adds++; - - Address curAddress; - SparseMapType* curTable = m_map_head; - - // Initiallize the high bit to be the total number of bits plus - // the block offset. However the highest bit index is one less - // than this value. - int highBit = m_total_number_of_bits + RubySystem::getBlockSizeBits(); - int lowBit; - void* newEntry = NULL; - - for (int level = 0; level < m_number_of_levels; level++) { - // create the appropriate address for this level - // Note: that set Address is inclusive of the specified range, - // thus the high bit is one less than the total number of bits - // used to create the address. - lowBit = highBit - m_number_of_bits_per_level[level]; - curAddress.setAddress(address.bitSelect(lowBit, highBit - 1)); - - // Adjust the highBit value for the next level - highBit -= m_number_of_bits_per_level[level]; - - // if the address exists in the cur table, move on. Otherwise - // create a new table. - if (curTable->count(curAddress) != 0) { - curTable = (SparseMapType*)((*curTable)[curAddress]); - } else { - m_adds_per_level[level]++; - - // if the last level, add a directory entry. Otherwise add a map. - if (level == (m_number_of_levels - 1)) { - entry->getDataBlk().clear(); - newEntry = (void*)entry; - } else { - SparseMapType* tempMap = new SparseMapType; - newEntry = (void*)(tempMap); - } - - // Create the pointer container SparseMemEntry and add it - // to the table. - (*curTable)[curAddress] = newEntry; - - // Move to the next level of the heirarchy - curTable = (SparseMapType*)newEntry; - } - } - - assert(exist(address)); - return; -} - -// recursively search table hierarchy for the lowest level table. -// remove the lowest entry and any empty tables above it. -int -SparseMemory::recursivelyRemoveLevels(const Address& address, - CurNextInfo& curInfo) -{ - Address curAddress; - CurNextInfo nextInfo; - SparseMemEntry entry; - - // create the appropriate address for this level - // Note: that set Address is inclusive of the specified range, - // thus the high bit is one less than the total number of bits - // used to create the address. - curAddress.setAddress(address.bitSelect(curInfo.lowBit, - curInfo.highBit - 1)); - - DPRINTF(RubyCache, "address: %s, curInfo.level: %d, curInfo.lowBit: %d, " - "curInfo.highBit - 1: %d, curAddress: %s\n", - address, curInfo.level, curInfo.lowBit, - curInfo.highBit - 1, curAddress); - - assert(curInfo.curTable->count(curAddress) != 0); - - entry = (*(curInfo.curTable))[curAddress]; - - if (curInfo.level < (m_number_of_levels - 1)) { - // set up next level's info - nextInfo.curTable = (SparseMapType*)(entry); - nextInfo.level = curInfo.level + 1; - - nextInfo.highBit = curInfo.highBit - - m_number_of_bits_per_level[curInfo.level]; - - nextInfo.lowBit = curInfo.lowBit - - m_number_of_bits_per_level[curInfo.level + 1]; - - // recursively search the table hierarchy - int tableSize = recursivelyRemoveLevels(address, nextInfo); - - // If this table below is now empty, we must delete it and - // erase it from our table. - if (tableSize == 0) { - m_removes_per_level[curInfo.level]++; - delete nextInfo.curTable; - entry = NULL; - curInfo.curTable->erase(curAddress); - } - } else { - // if this is the last level, we have reached the Directory - // Entry and thus we should delete it including the - // SparseMemEntry container struct. - delete (AbstractEntry*)(entry); - entry = NULL; - curInfo.curTable->erase(curAddress); - m_removes_per_level[curInfo.level]++; - } - return curInfo.curTable->size(); -} - -// remove an entry from the table -void -SparseMemory::remove(const Address& address) -{ - assert(address == line_address(address)); - assert(exist(address)); - - m_total_removes++; - - CurNextInfo nextInfo; - - // Initialize table pointer and level value - nextInfo.curTable = m_map_head; - nextInfo.level = 0; - - // Initiallize the high bit to be the total number of bits plus - // the block offset. However the highest bit index is one less - // than this value. - nextInfo.highBit = m_total_number_of_bits + RubySystem::getBlockSizeBits(); - nextInfo.lowBit = nextInfo.highBit - m_number_of_bits_per_level[0];; - - // recursively search the table hierarchy for empty tables - // starting from the level 0. Note we do not check the return - // value because the head table is never deleted; - recursivelyRemoveLevels(address, nextInfo); - - assert(!exist(address)); - return; -} - -// looks an address up in memory -AbstractEntry* -SparseMemory::lookup(const Address& address) -{ - assert(address == line_address(address)); - - Address curAddress; - SparseMapType* curTable = m_map_head; - AbstractEntry* entry = NULL; - - // Initiallize the high bit to be the total number of bits plus - // the block offset. However the highest bit index is one less - // than this value. - int highBit = m_total_number_of_bits + RubySystem::getBlockSizeBits(); - int lowBit; - - for (int level = 0; level < m_number_of_levels; level++) { - // create the appropriate address for this level - // Note: that set Address is inclusive of the specified range, - // thus the high bit is one less than the total number of bits - // used to create the address. - lowBit = highBit - m_number_of_bits_per_level[level]; - curAddress.setAddress(address.bitSelect(lowBit, highBit - 1)); - - DPRINTF(RubyCache, "level: %d, lowBit: %d, highBit - 1: %d, " - "curAddress: %s\n", - level, lowBit, highBit - 1, curAddress); - - // Adjust the highBit value for the next level - highBit -= m_number_of_bits_per_level[level]; - - // If the address is found, move on to the next level. - // Otherwise, return not found - if (curTable->count(curAddress) != 0) { - curTable = (SparseMapType*)((*curTable)[curAddress]); - } else { - DPRINTF(RubyCache, "Not found\n"); - return NULL; - } - } - - // The last entry actually points to the Directory entry not a table - entry = (AbstractEntry*)curTable; - - return entry; -} - -void -SparseMemory::recordBlocks(int cntrl_id, CacheRecorder* tr) const -{ - queue unexplored_nodes[2]; - queue address_of_nodes[2]; - - unexplored_nodes[0].push(m_map_head); - address_of_nodes[0].push(0); - - int parity_of_level = 0; - physical_address_t address, temp_address; - Address curAddress; - - // Initiallize the high bit to be the total number of bits plus - // the block offset. However the highest bit index is one less - // than this value. - int highBit = m_total_number_of_bits + RubySystem::getBlockSizeBits(); - int lowBit; - - for (int cur_level = 0; cur_level < m_number_of_levels; cur_level++) { - - // create the appropriate address for this level - // Note: that set Address is inclusive of the specified range, - // thus the high bit is one less than the total number of bits - // used to create the address. - lowBit = highBit - m_number_of_bits_per_level[cur_level]; - - while (!unexplored_nodes[parity_of_level].empty()) { - - SparseMapType* node = unexplored_nodes[parity_of_level].front(); - unexplored_nodes[parity_of_level].pop(); - - address = address_of_nodes[parity_of_level].front(); - address_of_nodes[parity_of_level].pop(); - - SparseMapType::iterator iter; - - for (iter = node->begin(); iter != node->end(); iter++) { - SparseMemEntry entry = (*iter).second; - curAddress = (*iter).first; - - if (cur_level != (m_number_of_levels - 1)) { - // If not at the last level, put this node in the queue - unexplored_nodes[1 - parity_of_level].push( - (SparseMapType*)(entry)); - address_of_nodes[1 - parity_of_level].push(address | - (curAddress.getAddress() << lowBit)); - } else { - // If at the last level, add a trace record - temp_address = address | (curAddress.getAddress() - << lowBit); - DataBlock block = ((AbstractEntry*)entry)->getDataBlk(); - tr->addRecord(cntrl_id, temp_address, 0, RubyRequestType_ST, 0, - block); - } - } - } - - // Adjust the highBit value for the next level - highBit -= m_number_of_bits_per_level[cur_level]; - parity_of_level = 1 - parity_of_level; - } -} - -void -SparseMemory::regStats(const string &name) -{ - m_total_adds.name(name + ".total_adds"); - - m_adds_per_level - .init(m_number_of_levels) - .name(name + ".adds_per_level") - .flags(Stats::pdf | Stats::total) - ; - - m_total_removes.name(name + ".total_removes"); - m_removes_per_level - .init(m_number_of_levels) - .name(name + ".removes_per_level") - .flags(Stats::pdf | Stats::total) - ; -} diff --git a/src/mem/ruby/system/SparseMemory.hh b/src/mem/ruby/system/SparseMemory.hh deleted file mode 100644 index 65e0ae8ad..000000000 --- a/src/mem/ruby/system/SparseMemory.hh +++ /dev/null @@ -1,98 +0,0 @@ -/* - * Copyright (c) 2009 Advanced Micro Devices, Inc. - * Copyright (c) 2012 Mark D. Hill and David A. Wood - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer; - * redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution; - * neither the name of the copyright holders nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#ifndef __MEM_RUBY_SYSTEM_SPARSEMEMORY_HH__ -#define __MEM_RUBY_SYSTEM_SPARSEMEMORY_HH__ - -#include -#include - -#include "base/hashmap.hh" -#include "base/statistics.hh" -#include "mem/ruby/common/Address.hh" -#include "mem/ruby/recorder/CacheRecorder.hh" -#include "mem/ruby/slicc_interface/AbstractEntry.hh" - -typedef void* SparseMemEntry; -typedef m5::hash_map SparseMapType; - -struct CurNextInfo -{ - SparseMapType* curTable; - int level; - int highBit; - int lowBit; -}; - -class SparseMemory -{ - public: - SparseMemory(int number_of_levels); - ~SparseMemory(); - - bool exist(const Address& address) const; - void add(const Address& address, AbstractEntry*); - void remove(const Address& address); - - /*! - * Function for recording the contents of memory. This function walks - * through all the levels of the sparse memory in a breadth first - * fashion. This might need more memory than a depth first approach. - * But breadth first seems easier to me than a depth first approach. - */ - void recordBlocks(int cntrl_id, CacheRecorder *) const; - - AbstractEntry* lookup(const Address& address); - void regStats(const std::string &name); - - private: - // Private copy constructor and assignment operator - SparseMemory(const SparseMemory& obj); - SparseMemory& operator=(const SparseMemory& obj); - - // Used by destructor to recursively remove all tables - void recursivelyRemoveTables(SparseMapType* currentTable, int level); - - // recursive search for address and remove associated entries - int recursivelyRemoveLevels(const Address& address, CurNextInfo& curInfo); - - // Data Members (m_prefix) - SparseMapType* m_map_head; - - int m_total_number_of_bits; - int m_number_of_levels; - int* m_number_of_bits_per_level; - - Stats::Scalar m_total_adds; - Stats::Vector m_adds_per_level; - Stats::Scalar m_total_removes; - Stats::Vector m_removes_per_level; -}; - -#endif // __MEM_RUBY_SYSTEM_SPARSEMEMORY_HH__ diff --git a/src/mem/ruby/system/System.hh b/src/mem/ruby/system/System.hh index 594f7e4f5..c909dc614 100644 --- a/src/mem/ruby/system/System.hh +++ b/src/mem/ruby/system/System.hh @@ -37,13 +37,13 @@ #include "base/callback.hh" #include "base/output.hh" -#include "mem/packet.hh" #include "mem/ruby/profiler/Profiler.hh" -#include "mem/ruby/recorder/CacheRecorder.hh" #include "mem/ruby/slicc_interface/AbstractController.hh" -#include "mem/ruby/system/MemoryControl.hh" -#include "mem/ruby/system/MemoryVector.hh" -#include "mem/ruby/system/SparseMemory.hh" +#include "mem/ruby/structures/MemoryControl.hh" +#include "mem/ruby/structures/MemoryVector.hh" +#include "mem/ruby/structures/SparseMemory.hh" +#include "mem/ruby/system/CacheRecorder.hh" +#include "mem/packet.hh" #include "params/RubySystem.hh" #include "sim/clocked_object.hh" diff --git a/src/mem/ruby/system/TBETable.hh b/src/mem/ruby/system/TBETable.hh deleted file mode 100644 index 018da6cbb..000000000 --- a/src/mem/ruby/system/TBETable.hh +++ /dev/null @@ -1,124 +0,0 @@ -/* - * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer; - * redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution; - * neither the name of the copyright holders nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#ifndef __MEM_RUBY_SYSTEM_TBETABLE_HH__ -#define __MEM_RUBY_SYSTEM_TBETABLE_HH__ - -#include - -#include "base/hashmap.hh" -#include "mem/ruby/common/Address.hh" - -template -class TBETable -{ - public: - TBETable(int number_of_TBEs) - : m_number_of_TBEs(number_of_TBEs) - { - } - - bool isPresent(const Address& address) const; - void allocate(const Address& address); - void deallocate(const Address& address); - bool - areNSlotsAvailable(int n) const - { - return (m_number_of_TBEs - m_map.size()) >= n; - } - - ENTRY* lookup(const Address& address); - - // Print cache contents - void print(std::ostream& out) const; - - private: - // Private copy constructor and assignment operator - TBETable(const TBETable& obj); - TBETable& operator=(const TBETable& obj); - - // Data Members (m_prefix) - m5::hash_map m_map; - - private: - int m_number_of_TBEs; -}; - -template -inline std::ostream& -operator<<(std::ostream& out, const TBETable& obj) -{ - obj.print(out); - out << std::flush; - return out; -} - -template -inline bool -TBETable::isPresent(const Address& address) const -{ - assert(address == line_address(address)); - assert(m_map.size() <= m_number_of_TBEs); - return !!m_map.count(address); -} - -template -inline void -TBETable::allocate(const Address& address) -{ - assert(!isPresent(address)); - assert(m_map.size() < m_number_of_TBEs); - m_map[address] = ENTRY(); -} - -template -inline void -TBETable::deallocate(const Address& address) -{ - assert(isPresent(address)); - assert(m_map.size() > 0); - m_map.erase(address); -} - -// looks an address up in the cache -template -inline ENTRY* -TBETable::lookup(const Address& address) -{ - if(m_map.find(address) != m_map.end()) return &(m_map.find(address)->second); - return NULL; -} - - -template -inline void -TBETable::print(std::ostream& out) const -{ -} - -#endif // __MEM_RUBY_SYSTEM_TBETABLE_HH__ diff --git a/src/mem/ruby/system/TimerTable.cc b/src/mem/ruby/system/TimerTable.cc deleted file mode 100644 index 38e26e5e9..000000000 --- a/src/mem/ruby/system/TimerTable.cc +++ /dev/null @@ -1,129 +0,0 @@ -/* - * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer; - * redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution; - * neither the name of the copyright holders nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#include "mem/ruby/common/Global.hh" -#include "mem/ruby/system/System.hh" -#include "mem/ruby/system/TimerTable.hh" - -TimerTable::TimerTable() - : m_next_time(0) -{ - m_consumer_ptr = NULL; - m_clockobj_ptr = NULL; - - m_next_valid = false; - m_next_address = Address(0); -} - -bool -TimerTable::isReady() const -{ - if (m_map.empty()) - return false; - - if (!m_next_valid) { - updateNext(); - } - assert(m_next_valid); - return (m_clockobj_ptr->curCycle() >= m_next_time); -} - -const Address& -TimerTable::readyAddress() const -{ - assert(isReady()); - - if (!m_next_valid) { - updateNext(); - } - assert(m_next_valid); - return m_next_address; -} - -void -TimerTable::set(const Address& address, Cycles relative_latency) -{ - assert(address == line_address(address)); - assert(relative_latency > 0); - assert(!m_map.count(address)); - - Cycles ready_time = m_clockobj_ptr->curCycle() + relative_latency; - m_map[address] = ready_time; - assert(m_consumer_ptr != NULL); - m_consumer_ptr-> - scheduleEventAbsolute(m_clockobj_ptr->clockPeriod() * ready_time); - m_next_valid = false; - - // Don't always recalculate the next ready address - if (ready_time <= m_next_time) { - m_next_valid = false; - } -} - -void -TimerTable::unset(const Address& address) -{ - assert(address == line_address(address)); - assert(m_map.count(address)); - m_map.erase(address); - - // Don't always recalculate the next ready address - if (address == m_next_address) { - m_next_valid = false; - } -} - -void -TimerTable::print(std::ostream& out) const -{ -} - -void -TimerTable::updateNext() const -{ - if (m_map.empty()) { - assert(!m_next_valid); - return; - } - - AddressMap::const_iterator i = m_map.begin(); - AddressMap::const_iterator end = m_map.end(); - - m_next_address = i->first; - m_next_time = i->second; - ++i; - - for (; i != end; ++i) { - if (i->second < m_next_time) { - m_next_address = i->first; - m_next_time = i->second; - } - } - - m_next_valid = true; -} diff --git a/src/mem/ruby/system/TimerTable.hh b/src/mem/ruby/system/TimerTable.hh deleted file mode 100644 index b271d3e37..000000000 --- a/src/mem/ruby/system/TimerTable.hh +++ /dev/null @@ -1,107 +0,0 @@ -/* - * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer; - * redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution; - * neither the name of the copyright holders nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#ifndef __MEM_RUBY_SYSTEM_TIMERTABLE_HH__ -#define __MEM_RUBY_SYSTEM_TIMERTABLE_HH__ - -#include -#include -#include -#include - -#include "mem/ruby/common/Address.hh" -#include "mem/ruby/common/Consumer.hh" - -class TimerTable -{ - public: - TimerTable(); - - void - setConsumer(Consumer* consumer_ptr) - { - assert(m_consumer_ptr == NULL); - m_consumer_ptr = consumer_ptr; - } - - void setClockObj(ClockedObject* obj) - { - assert(m_clockobj_ptr == NULL); - m_clockobj_ptr = obj; - } - - void - setDescription(const std::string& name) - { - m_name = name; - } - - bool isReady() const; - const Address& readyAddress() const; - bool isSet(const Address& address) const { return !!m_map.count(address); } - void set(const Address& address, Cycles relative_latency); - void set(const Address& address, uint64_t relative_latency) - { set(address, Cycles(relative_latency)); } - - void unset(const Address& address); - void print(std::ostream& out) const; - - private: - void updateNext() const; - - // Private copy constructor and assignment operator - TimerTable(const TimerTable& obj); - TimerTable& operator=(const TimerTable& obj); - - // Data Members (m_prefix) - - // use a std::map for the address map as this container is sorted - // and ensures a well-defined iteration order - typedef std::map AddressMap; - AddressMap m_map; - mutable bool m_next_valid; - mutable Cycles m_next_time; // Only valid if m_next_valid is true - mutable Address m_next_address; // Only valid if m_next_valid is true - - //! Object used for querying time. - ClockedObject* m_clockobj_ptr; - //! Consumer to signal a wakeup() - Consumer* m_consumer_ptr; - - std::string m_name; -}; - -inline std::ostream& -operator<<(std::ostream& out, const TimerTable& obj) -{ - obj.print(out); - out << std::flush; - return out; -} - -#endif // __MEM_RUBY_SYSTEM_TIMERTABLE_HH__ diff --git a/src/mem/ruby/system/WireBuffer.cc b/src/mem/ruby/system/WireBuffer.cc deleted file mode 100644 index f45bd5678..000000000 --- a/src/mem/ruby/system/WireBuffer.cc +++ /dev/null @@ -1,158 +0,0 @@ -/* - * Copyright (c) 2010 Advanced Micro Devices, Inc. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer; - * redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution; - * neither the name of the copyright holders nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - * Author: Lisa Hsu - * - */ - -#include -#include - -#include "base/cprintf.hh" -#include "base/stl_helpers.hh" -#include "mem/ruby/common/Global.hh" -#include "mem/ruby/system/System.hh" -#include "mem/ruby/system/WireBuffer.hh" - -using namespace std; - -// Output operator definition - -ostream& -operator<<(ostream& out, const WireBuffer& obj) -{ - obj.print(out); - out << flush; - return out; -} - - -// **************************************************************** - -// CONSTRUCTOR -WireBuffer::WireBuffer(const Params *p) - : SimObject(p) -{ - m_msg_counter = 0; -} - -void -WireBuffer::init() -{ -} - -WireBuffer::~WireBuffer() -{ -} - -void -WireBuffer::enqueue(MsgPtr message, Cycles latency) -{ - m_msg_counter++; - Cycles current_time = g_system_ptr->curCycle(); - Cycles arrival_time = current_time + latency; - assert(arrival_time > current_time); - - MessageBufferNode thisNode(arrival_time, m_msg_counter, message); - m_message_queue.push_back(thisNode); - if (m_consumer_ptr != NULL) { - m_consumer_ptr-> - scheduleEventAbsolute(g_system_ptr->clockPeriod() * arrival_time); - } else { - panic("No Consumer for WireBuffer! %s\n", *this); - } -} - -void -WireBuffer::dequeue() -{ - assert(isReady()); - pop_heap(m_message_queue.begin(), m_message_queue.end(), - greater()); - m_message_queue.pop_back(); -} - -const Message* -WireBuffer::peek() -{ - MessageBufferNode node = peekNode(); - Message* msg_ptr = node.m_msgptr.get(); - assert(msg_ptr != NULL); - return msg_ptr; -} - -MessageBufferNode -WireBuffer::peekNode() -{ - assert(isReady()); - MessageBufferNode req = m_message_queue.front(); - return req; -} - -void -WireBuffer::recycle() -{ - // Because you don't want anything reordered, make sure the recycle latency - // is just 1 cycle. As a result, you really want to use this only in - // Wire-like situations because you don't want to deadlock as a result of - // being stuck behind something if you're not actually supposed to. - assert(isReady()); - MessageBufferNode node = m_message_queue.front(); - pop_heap(m_message_queue.begin(), m_message_queue.end(), - greater()); - - node.m_time = g_system_ptr->curCycle() + Cycles(1); - m_message_queue.back() = node; - push_heap(m_message_queue.begin(), m_message_queue.end(), - greater()); - m_consumer_ptr-> - scheduleEventAbsolute(g_system_ptr->clockPeriod() * node.m_time); -} - -bool -WireBuffer::isReady() -{ - return ((!m_message_queue.empty()) && - (m_message_queue.front().m_time <= g_system_ptr->curCycle())); -} - -void -WireBuffer::print(ostream& out) const -{ -} - -void -WireBuffer::wakeup() -{ -} - -WireBuffer * -RubyWireBufferParams::create() -{ - return new WireBuffer(this); -} - diff --git a/src/mem/ruby/system/WireBuffer.hh b/src/mem/ruby/system/WireBuffer.hh deleted file mode 100644 index 9fb2d87a8..000000000 --- a/src/mem/ruby/system/WireBuffer.hh +++ /dev/null @@ -1,102 +0,0 @@ -/* - * Copyright (c) 2010 Advanced Micro Devices, Inc. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer; - * redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution; - * neither the name of the copyright holders nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - * Author: Lisa Hsu - * - */ - -#ifndef __MEM_RUBY_SYSTEM_WIREBUFFER_HH__ -#define __MEM_RUBY_SYSTEM_WIREBUFFER_HH__ - -#include -#include -#include - -#include "mem/ruby/buffers/MessageBufferNode.hh" -#include "mem/ruby/common/Consumer.hh" -#include "params/RubyWireBuffer.hh" -#include "sim/sim_object.hh" - -////////////////////////////////////////////////////////////////////////////// -// This object was written to literally mimic a Wire in Ruby, in the sense -// that there is no way for messages to get reordered en route on the WireBuffer. -// With Message Buffers, even if randomization is off and ordered is on, -// messages can arrive in different orders than they were sent because of -// network issues. This mimics a Wire, such that that is not possible. This can -// allow for messages between closely coupled controllers that are not actually -// separated by a network in real systems to simplify coherence. -///////////////////////////////////////////////////////////////////////////// - -class Message; - -class WireBuffer : public SimObject -{ - public: - typedef RubyWireBufferParams Params; - WireBuffer(const Params *p); - void init(); - - ~WireBuffer(); - - void wakeup(); - - void setConsumer(Consumer* consumer_ptr) - { - m_consumer_ptr = consumer_ptr; - } - Consumer* getConsumer() { return m_consumer_ptr; }; - void setDescription(const std::string& name) { m_description = name; }; - std::string getDescription() { return m_description; }; - - void enqueue(MsgPtr message, Cycles latency); - void dequeue(); - const Message* peek(); - MessageBufferNode peekNode(); - void recycle(); - bool isReady(); - bool areNSlotsAvailable(int n) { return true; }; // infinite queue length - - void print(std::ostream& out) const; - uint64_t m_msg_counter; - - private: - // Private copy constructor and assignment operator - WireBuffer (const WireBuffer& obj); - WireBuffer& operator=(const WireBuffer& obj); - - // data members - Consumer* m_consumer_ptr; // Consumer to signal a wakeup() - std::string m_description; - - // queues where memory requests live - std::vector m_message_queue; - -}; - -std::ostream& operator<<(std::ostream& out, const WireBuffer& obj); - -#endif // __MEM_RUBY_SYSTEM_WireBuffer_HH__ diff --git a/src/mem/ruby/system/WireBuffer.py b/src/mem/ruby/system/WireBuffer.py deleted file mode 100644 index f48ab1f95..000000000 --- a/src/mem/ruby/system/WireBuffer.py +++ /dev/null @@ -1,35 +0,0 @@ -# Copyright (c) 2010 Advanced Micro Devices, Inc. -# All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer; -# redistributions in binary form must reproduce the above copyright -# notice, this list of conditions and the following disclaimer in the -# documentation and/or other materials provided with the distribution; -# neither the name of the copyright holders nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -# -# Author: Lisa Hsu - -from m5.params import * -from m5.SimObject import SimObject - -class RubyWireBuffer(SimObject): - type = 'RubyWireBuffer' - cxx_class = 'WireBuffer' - cxx_header = "mem/ruby/system/WireBuffer.hh" diff --git a/src/mem/slicc/symbols/Type.py b/src/mem/slicc/symbols/Type.py index dc5448430..764173916 100644 --- a/src/mem/slicc/symbols/Type.py +++ b/src/mem/slicc/symbols/Type.py @@ -585,7 +585,7 @@ AccessPermission ${{self.c_ident}}_to_permission(const ${{self.c_ident}}& obj) for enum in self.enums.itervalues(): if enum.get("Primary"): code('#include "mem/protocol/${{enum.ident}}_Controller.hh"') - code('#include "mem/ruby/system/MachineID.hh"') + code('#include "mem/ruby/common/MachineID.hh"') code(''' // Code for output operator