RubySystem::getMemoryVector()->read(Address(paddr), data, len);
}
+bool libruby_isReady(RubyPortHandle p, struct RubyRequest request) {
+ return static_cast<RubyPort*>(p)->isReady(request, true);
+}
+
int64_t libruby_issue_request(RubyPortHandle p, struct RubyRequest request)
{
return static_cast<RubyPort*>(p)->makeRequest(request);
unsigned proc_id;
RubyRequest() {}
- RubyRequest(uint64_t _paddr, uint8_t* _data, int _len, uint64_t _pc, RubyRequestType _type, RubyAccessMode _access_mode, unsigned _proc_id = 0)
+ RubyRequest(uint64_t _paddr, uint8_t* _data, int _len, uint64_t _pc, RubyRequestType _type, RubyAccessMode _access_mode, unsigned _proc_id = 100)
: paddr(_paddr), data(_data), len(_len), pc(_pc), type(_type), access_mode(_access_mode), proc_id(_proc_id)
{}
};
*/
int64_t libruby_issue_request(RubyPortHandle p, struct RubyRequest request);
+
+/**
+ *
+ */
+bool libruby_isReady(RubyPortHandle p, struct RubyRequest request);
+
/**
* writes data directly into Ruby's data array. Note that this
* ignores caches, and should be considered incoherent after
void init(const vector<string> & argv);
/* external interface */
int64_t makeRequest(const RubyRequest & request);
+ bool isReady(const RubyRequest & request, bool dont_set = false) { assert(0); return false;};
// void issueRequest(uint64_t paddr, uint8* data, int len, bool rw);
bool busy() { return m_is_busy;}
virtual int64_t makeRequest(const RubyRequest & request) = 0;
+ virtual bool isReady(const RubyRequest & request, bool dont_set = false) = 0;
+
void registerHitCallback(void (*hit_callback)(int64_t request_id)) {
assert(m_hit_callback == NULL); // can't assign hit_callback twice
m_hit_callback = hit_callback;
m_instCache_ptr = NULL;
m_dataCache_ptr = NULL;
m_controller = NULL;
- m_servicing_atomic = -1;
+ m_servicing_atomic = 200;
m_atomics_counter = 0;
for (size_t i=0; i<argv.size(); i+=2) {
if ( argv[i] == "controller") {
WARN_MSG("Possible Deadlock detected");
WARN_EXPR(request);
WARN_EXPR(m_version);
+ WARN_EXPR(request->ruby_request.paddr);
WARN_EXPR(keys.size());
WARN_EXPR(current_time);
WARN_EXPR(request->issue_time);
data.setData(ruby_request.data, request_address.getOffset(), ruby_request.len);
}
}
-
+ if (type == RubyRequestType_RMW_Write) {
+ if (m_servicing_atomic != ruby_request.proc_id) {
+ assert(0);
+ }
+ assert(m_atomics_counter > 0);
+ m_atomics_counter--;
+ if (m_atomics_counter == 0) {
+ m_servicing_atomic = 200;
+ }
+ }
m_hit_callback(srequest->id);
delete srequest;
}
// Returns true if the sequencer already has a load or store outstanding
-bool Sequencer::isReady(const RubyRequest& request) {
+bool Sequencer::isReady(const RubyRequest& request, bool dont_set) {
// POLINA: check if we are currently flushing the write buffer, if so Ruby is returned as not ready
// to simulate stalling of the front-end
// Do we stall all the sequencers? If it is atomic instruction - yes!
return false;
}
- if (m_servicing_atomic != -1 && m_servicing_atomic != (int)request.proc_id) {
+ assert(request.proc_id != 100);
+ if (m_servicing_atomic != 200 && m_servicing_atomic != request.proc_id) {
assert(m_atomics_counter > 0);
return false;
}
else {
- if (request.type == RubyRequestType_RMW_Read) {
- if (m_servicing_atomic == -1) {
- assert(m_atomics_counter == 0);
- m_servicing_atomic = (int)request.proc_id;
+ if (!dont_set) {
+ if (request.type == RubyRequestType_RMW_Read) {
+ if (m_servicing_atomic == 200) {
+ assert(m_atomics_counter == 0);
+ m_servicing_atomic = request.proc_id;
+ }
+ else {
+ assert(m_servicing_atomic == request.proc_id);
+ }
+ m_atomics_counter++;
}
else {
- assert(m_servicing_atomic == (int)request.proc_id);
- }
- m_atomics_counter++;
- }
- else if (request.type == RubyRequestType_RMW_Write) {
- assert(m_servicing_atomic == (int)request.proc_id);
- assert(m_atomics_counter > 0);
- m_atomics_counter--;
- if (m_atomics_counter == 0) {
- m_servicing_atomic = -1;
+ if (m_servicing_atomic == request.proc_id) {
+ if (request.type != RubyRequestType_RMW_Write) {
+ m_servicing_atomic = 200;
+ m_atomics_counter = 0;
+ }
+ }
}
}
}
int64_t id = makeUniqueRequestID();
SequencerRequest *srequest = new SequencerRequest(request, id, g_eventQueue_ptr->getTime());
bool found = insertRequest(srequest);
- if (!found)
+ if (!found) {
if (request.type == RubyRequestType_Locked_Write) {
// NOTE: it is OK to check the locked flag here as the mandatory queue will be checked first
// ensuring that nothing comes between checking the flag and servicing the store
// TODO: issue hardware prefetches here
return id;
+ }
+ else {
+ assert(0);
+ }
}
else {
return -1;
// called by Tester or Simics
int64_t makeRequest(const RubyRequest & request);
- bool isReady(const RubyRequest& request);
+ bool isReady(const RubyRequest& request, bool dont_set = false);
bool empty() const;
void print(ostream& out) const;
// Global outstanding request count, across all request tables
int m_outstanding_count;
bool m_deadlock_check_scheduled;
- int m_servicing_atomic;
+ unsigned m_servicing_atomic;
int m_atomics_counter;
};
assert(0); \n \
} \n \
} \n \
- } \n \
+ } \n \
+ else { \n \
+ if (servicing_atomic > 0) { \n \
+ // reset \n \
+ servicing_atomic = 0; \n \
+ read_counter = 0; \n \
+ started_receiving_writes = false; \n \
+ locked_read_request1 = Address(-1); \n \
+ locked_read_request2 = Address(-1); \n \
+ locked_read_request3 = Address(-1); \n \
+ locked_read_request4 = Address(-1); \n \
+ } \n \
+ } \n \
";
output.insert(pos, atomics_string);
/*string foo = "// Cannot do anything with this transition, go check next doable transition (mostly likely of next port)\n";
if filename.startswith("SCons"):
return True
- return False
+ return True
format_types = ( 'C', 'C++' )
def format_file(filename):
def checkwhite_line(line):
match = lead.search(line)
if match and match.group(1).find('\t') != -1:
- return False
+ return True
match = trail.search(line)
if match:
- return False
+ return True
return True