* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
+#include "config/the_isa.hh"
+#if THE_ISA == X86_ISA
+#include "arch/x86/insts/microldstop.hh"
+#endif // X86_ISA
#include "cpu/testers/rubytest/RubyTester.hh"
#include "mem/physical.hh"
#include "mem/ruby/slicc_interface/AbstractController.hh"
assert(pkt->isRead());
type = RubyRequestType_Load_Linked;
}
+ } else if (pkt->req->isLocked()) {
+ if (pkt->isWrite()) {
+ DPRINTF(MemoryAccess, "Issuing Locked RMW Write\n");
+ type = RubyRequestType_Locked_RMW_Write;
+ } else {
+ DPRINTF(MemoryAccess, "Issuing Locked RMW Read\n");
+ assert(pkt->isRead());
+ type = RubyRequestType_Locked_RMW_Read;
+ }
} else {
if (pkt->isRead()) {
if (pkt->req->isInstFetch()) {
type = RubyRequestType_IFETCH;
} else {
- type = RubyRequestType_LD;
+#if THE_ISA == X86_ISA
+ uint32_t flags = pkt->req->getFlags();
+ bool storeCheck = flags &
+ (TheISA::StoreCheck << TheISA::FlagShift);
+#else
+ bool storeCheck = false;
+#endif // X86_ISA
+ if (storeCheck) {
+ type = RubyRequestType_RMW_Read;
+ } else {
+ type = RubyRequestType_LD;
+ }
}
} else if (pkt->isWrite()) {
+ //
+ // Note: M5 packets do not differentiate ST from RMW_Write
+ //
type = RubyRequestType_ST;
- } else if (pkt->isReadWrite()) {
- // Fix me. This conditional will never be executed
- // because isReadWrite() is just an OR of isRead() and
- // isWrite(). Furthermore, just because the packet is a
- // read/write request does not necessary mean it is a
- // read-modify-write atomic operation.
- type = RubyRequestType_RMW_Write;
} else {
panic("Unsupported ruby packet type\n");
}
(request->ruby_request.type == RubyRequestType_RMW_Read) ||
(request->ruby_request.type == RubyRequestType_RMW_Write) ||
(request->ruby_request.type == RubyRequestType_Load_Linked) ||
- (request->ruby_request.type == RubyRequestType_Store_Conditional)) {
+ (request->ruby_request.type == RubyRequestType_Store_Conditional) ||
+ (request->ruby_request.type == RubyRequestType_Locked_RMW_Read) ||
+ (request->ruby_request.type == RubyRequestType_Locked_RMW_Write)) {
pair<RequestTable::iterator, bool> r =
m_writeRequestTable.insert(RequestTable::value_type(line_addr, 0));
bool success = r.second;
(ruby_request.type == RubyRequestType_RMW_Read) ||
(ruby_request.type == RubyRequestType_RMW_Write) ||
(ruby_request.type == RubyRequestType_Load_Linked) ||
- (ruby_request.type == RubyRequestType_Store_Conditional)) {
+ (ruby_request.type == RubyRequestType_Store_Conditional) ||
+ (ruby_request.type == RubyRequestType_Locked_RMW_Read) ||
+ (ruby_request.type == RubyRequestType_Locked_RMW_Write)) {
m_writeRequestTable.erase(line_addr);
} else {
m_readRequestTable.erase(line_addr);
(request->ruby_request.type == RubyRequestType_RMW_Read) ||
(request->ruby_request.type == RubyRequestType_RMW_Write) ||
(request->ruby_request.type == RubyRequestType_Load_Linked) ||
- (request->ruby_request.type == RubyRequestType_Store_Conditional));
+ (request->ruby_request.type == RubyRequestType_Store_Conditional) ||
+ (request->ruby_request.type == RubyRequestType_Locked_RMW_Read) ||
+ (request->ruby_request.type == RubyRequestType_Locked_RMW_Write));
//
// For Alpha, properly handle LL, SC, and write requests with respect to
//
bool success = handleLlsc(address, request);
- if (request->ruby_request.type == RubyRequestType_RMW_Read) {
+ if (request->ruby_request.type == RubyRequestType_Locked_RMW_Read) {
m_controller->blockOnQueue(address, m_mandatory_q_ptr);
- } else if (request->ruby_request.type == RubyRequestType_RMW_Write) {
+ } else if (request->ruby_request.type == RubyRequestType_Locked_RMW_Write) {
m_controller->unblock(address);
}
markRemoved();
assert((request->ruby_request.type == RubyRequestType_LD) ||
- (request->ruby_request.type == RubyRequestType_RMW_Read) ||
(request->ruby_request.type == RubyRequestType_IFETCH));
hitCallback(request, mach, data, true,
if ((type == RubyRequestType_LD) ||
(type == RubyRequestType_IFETCH) ||
(type == RubyRequestType_RMW_Read) ||
+ (type == RubyRequestType_Locked_RMW_Read) ||
(type == RubyRequestType_Load_Linked)) {
-
memcpy(ruby_request.data,
data.getData(request_address.getOffset(), ruby_request.len),
ruby_request.len);
ctype = CacheRequestType_LD;
break;
case RubyRequestType_ST:
+ case RubyRequestType_RMW_Read:
+ case RubyRequestType_RMW_Write:
+ //
+ // x86 locked instructions are translated to store cache coherence
+ // requests because these requests should always be treated as read
+ // exclusive operations and should leverage any migratory sharing
+ // optimization built into the protocol.
+ //
+ case RubyRequestType_Locked_RMW_Read:
+ case RubyRequestType_Locked_RMW_Write:
ctype = CacheRequestType_ST;
break;
+ //
+ // Alpha LL/SC instructions need to be handled carefully by the cache
+ // coherence protocol to ensure they follow the proper semantics. In
+ // particular, by identifying the operations as atomic, the protocol
+ // should understand that migratory sharing optimizations should not be
+ // performed (i.e. a load between the LL and SC should not steal away
+ // exclusive permission).
+ //
case RubyRequestType_Load_Linked:
case RubyRequestType_Store_Conditional:
ctype = CacheRequestType_ATOMIC;
break;
- case RubyRequestType_RMW_Read:
- ctype = CacheRequestType_ATOMIC;
- break;
- case RubyRequestType_RMW_Write:
- ctype = CacheRequestType_ATOMIC;
- break;
default:
assert(0);
}