de = readdir(subdir);
}
+
+ closedir(subdir);
}
// try to force recognition that we deleted the files in the directory
struct sockaddr_in sockaddr;
sockaddr.sin_family = PF_INET;
sockaddr.sin_addr.s_addr = INADDR_ANY;
-
sockaddr.sin_port = htons(port);
+ // finally clear sin_zero
+ memset(&sockaddr.sin_zero, 0, sizeof(sockaddr.sin_zero));
int ret = ::bind(fd, (struct sockaddr *)&sockaddr, sizeof (sockaddr));
if (ret != 0) {
if (ret == -1 && errno != EADDRINUSE)
int sfd = ::accept(fd, (struct sockaddr *)&sockaddr, &slen);
if (sfd != -1 && nodelay) {
int i = 1;
- ::setsockopt(sfd, IPPROTO_TCP, TCP_NODELAY, (char *)&i, sizeof(i));
+ if (::setsockopt(sfd, IPPROTO_TCP, TCP_NODELAY, (char *)&i,
+ sizeof(i)) < 0)
+ warn("ListenSocket(accept): setsockopt() TCP_NODELAY failed!");
}
return sfd;
RubyTester::RubyTester(const Params *p)
: MemObject(p), checkStartEvent(this),
_masterId(p->system->getMasterId(name())),
+ m_checkTable_ptr(nullptr),
m_num_cpus(p->num_cpus),
m_checks_to_complete(p->checks_to_complete),
m_deadlock_threshold(p->deadlock_threshold),
+ m_num_writers(0),
+ m_num_readers(0),
m_wakeup_frequency(p->wakeup_frequency),
m_check_flush(p->check_flush),
m_num_inst_ports(p->port_cpuInstPort_connection_count)
if (pkt->cmd == MemCmd::WriteInvalidateReq) {
memSidePort->sendAtomic(pkt); // complete writeback
if (isTopLevel) {
+ // @todo Static analysis suggests this can actually happen
+ assert(blk);
+
// top level caches allocate and write the data
assert(blk->isDirty());
assert(!blk->isWritable());
name(), samplePeriodTicks, samplePeriod.msec());
}
+CommMonitor::~CommMonitor()
+{
+ // if not already done, close the stream
+ closeStreams();
+}
+
void
CommMonitor::closeStreams()
{
CommMonitor(Params* params);
/** Destructor */
- ~CommMonitor() {}
+ ~CommMonitor();
/**
* Callback to flush and close all open output streams on exit. If
*/
Packet(Request *_req, MemCmd _cmd)
: cmd(_cmd), req(_req), data(nullptr), addr(0), _isSecure(false),
- src(InvalidPortID), dest(InvalidPortID),
+ size(0), src(InvalidPortID), dest(InvalidPortID),
bytesValidStart(0), bytesValidEnd(0),
firstWordDelay(0), lastWordDelay(0),
senderState(NULL)
// write memory file
string filepath = Checkpoint::dir() + "/" + filename.c_str();
- int fd = creat(filepath.c_str(), 0664);
- if (fd < 0) {
- perror("creat");
- fatal("Can't open physical memory checkpoint file '%s'\n",
- filename);
- }
-
- gzFile compressed_mem = gzdopen(fd, "wb");
+ gzFile compressed_mem = gzopen(filepath.c_str(), "wb");
if (compressed_mem == NULL)
- fatal("Insufficient memory to allocate compression state for %s\n",
+ fatal("Can't open physical memory checkpoint file '%s'\n",
filename);
uint64_t pass_size = 0;
string filepath = cp->cptDir + "/" + filename;
// mmap memoryfile
- int fd = open(filepath.c_str(), O_RDONLY);
- if (fd < 0) {
- perror("open");
- fatal("Can't open physical memory checkpoint file '%s'", filename);
- }
-
- gzFile compressed_mem = gzdopen(fd, "rb");
+ gzFile compressed_mem = gzopen(filepath.c_str(), "rb");
if (compressed_mem == NULL)
- fatal("Insufficient memory to allocate compression state for %s\n",
- filename);
+ fatal("Can't open physical memory checkpoint file '%s'", filename);
// we've already got the actual backing store mapped
uint8_t* pmem = backingStore[store_id].second;
AddrRangeList l = ruby_port->master_ports[i]->getAddrRanges();
for (auto it = l.begin(); it != l.end(); ++it) {
if (it->contains(pkt->getAddr())) {
- ruby_port->master_ports[i]->sendTimingReq(pkt);
+ // generally it is not safe to assume success here as
+ // the port could be blocked
+ bool M5_VAR_USED success =
+ ruby_port->master_ports[i]->sendTimingReq(pkt);
+ assert(success);
return true;
}
}
}
EventQueue::EventQueue(const string &n)
- : objName(n), head(NULL), _curTick(0),
- async_queue_mutex(new std::mutex())
+ : objName(n), head(NULL), _curTick(0)
{
}
void
EventQueue::asyncInsert(Event *event)
{
- async_queue_mutex->lock();
+ async_queue_mutex.lock();
async_queue.push_back(event);
- async_queue_mutex->unlock();
+ async_queue_mutex.unlock();
}
void
EventQueue::handleAsyncInsertions()
{
assert(this == curEventQueue());
- async_queue_mutex->lock();
+ async_queue_mutex.lock();
while (!async_queue.empty()) {
insert(async_queue.front());
async_queue.pop_front();
}
- async_queue_mutex->unlock();
+ async_queue_mutex.unlock();
}
#include <cassert>
#include <climits>
#include <iosfwd>
+#include <memory>
#include <mutex>
#include <string>
Tick _curTick;
//! Mutex to protect async queue.
- std::mutex *async_queue_mutex;
+ std::mutex async_queue_mutex;
//! List of events added by other threads to this event queue.
std::list<Event*> async_queue;