ruby: eliminate non-determinism from ruby.stats output
[gem5.git] / src / mem / ruby / profiler / Profiler.cc
index e9c2aed2985630fec1bdf77eadcd987ca0c02948..d0ea7921c224f05abcbc2dbfb6ebd46452063ab3 100644 (file)
@@ -42,9 +42,6 @@
    ----------------------------------------------------------------------
 */
 
-// Allows use of times() library call, which determines virtual runtime
-#include <sys/resource.h>
-#include <sys/times.h>
 #include <sys/types.h>
 #include <unistd.h>
 
 using namespace std;
 using m5::stl_helpers::operator<<;
 
-static double process_memory_total();
-static double process_memory_resident();
-
 Profiler::Profiler(const Params *p)
-    : SimObject(p), m_event(this)
+    : SimObject(p)
 {
     m_inst_profiler_ptr = NULL;
     m_address_profiler_ptr = NULL;
-
     m_real_time_start_time = time(NULL); // Not reset in clearStats()
-    m_stats_period = 1000000; // Default
-    m_periodic_output_file_ptr = &cerr;
 
     m_hot_lines = p->hot_lines;
     m_all_instructions = p->all_instructions;
@@ -100,69 +91,6 @@ Profiler::Profiler(const Params *p)
 
 Profiler::~Profiler()
 {
-    if (m_periodic_output_file_ptr != &cerr) {
-        delete m_periodic_output_file_ptr;
-    }
-}
-
-void
-Profiler::wakeup()
-{
-    // FIXME - avoid the repeated code
-
-    vector<int64_t> perProcCycleCount(m_num_of_sequencers);
-
-    for (int i = 0; i < m_num_of_sequencers; i++) {
-        perProcCycleCount[i] =
-            g_system_ptr->curCycle() - m_cycles_executed_at_start[i] + 1;
-        // The +1 allows us to avoid division by zero
-    }
-
-    ostream &out = *m_periodic_output_file_ptr;
-
-    out << "ruby_cycles: " << g_system_ptr->curCycle()-m_ruby_start << endl
-        << "mbytes_resident: " << process_memory_resident() << endl
-        << "mbytes_total: " << process_memory_total() << endl;
-
-    if (process_memory_total() > 0) {
-        out << "resident_ratio: "
-            << process_memory_resident() / process_memory_total() << endl;
-    }
-
-    out << "miss_latency: " << m_allMissLatencyHistogram << endl;
-
-    out << endl;
-
-    if (m_all_instructions) {
-        m_inst_profiler_ptr->printStats(out);
-    }
-
-    //g_system_ptr->getNetwork()->printStats(out);
-    schedule(m_event, g_system_ptr->clockEdge(Cycles(m_stats_period)));
-}
-
-void
-Profiler::setPeriodicStatsFile(const string& filename)
-{
-    cout << "Recording periodic statistics to file '" << filename << "' every "
-         << m_stats_period << " Ruby cycles" << endl;
-
-    if (m_periodic_output_file_ptr != &cerr) {
-        delete m_periodic_output_file_ptr;
-    }
-
-    m_periodic_output_file_ptr = new ofstream(filename.c_str());
-    schedule(m_event, g_system_ptr->clockEdge(Cycles(1)));
-}
-
-void
-Profiler::setPeriodicStatsInterval(int64_t period)
-{
-    cout << "Recording periodic statistics every " << m_stats_period
-         << " Ruby cycles" << endl;
-
-    m_stats_period = period;
-    schedule(m_event, g_system_ptr->clockEdge(Cycles(1)));
 }
 
 void
@@ -279,68 +207,183 @@ Profiler::printOutstandingReqProfile(ostream &out) const
 }
 
 void
-Profiler::printStats(ostream& out, bool short_stats)
+Profiler::printMissLatencyProfile(ostream &out) const
 {
-    out << endl;
-    if (short_stats) {
-        out << "SHORT ";
+    // Collate the miss latencies histograms from all the sequencers
+    Histogram latency_hist;
+    std::vector<Histogram> type_latency_hist(RubyRequestType_NUM);
+
+    Histogram hit_latency_hist;
+    std::vector<Histogram> hit_type_latency_hist(RubyRequestType_NUM);
+
+    std::vector<Histogram> hit_mach_latency_hist(MachineType_NUM);
+    std::vector<std::vector<Histogram> >
+        hit_type_mach_latency_hist(RubyRequestType_NUM,
+                               std::vector<Histogram>(MachineType_NUM));
+
+    Histogram miss_latency_hist;
+    std::vector<Histogram> miss_type_latency_hist(RubyRequestType_NUM);
+
+    std::vector<Histogram> miss_mach_latency_hist(MachineType_NUM);
+    std::vector<std::vector<Histogram> >
+        miss_type_mach_latency_hist(RubyRequestType_NUM,
+                               std::vector<Histogram>(MachineType_NUM));
+
+    std::vector<Histogram> issue_to_initial_delay_hist(MachineType_NUM);
+    std::vector<Histogram> initial_to_forward_delay_hist(MachineType_NUM);
+    std::vector<Histogram>
+        forward_to_first_response_delay_hist(MachineType_NUM);
+    std::vector<Histogram>
+        first_response_to_completion_delay_hist(MachineType_NUM);
+    std::vector<uint64_t> incomplete_times(MachineType_NUM);
+
+    for (uint32_t i = 0; i < MachineType_NUM; i++) {
+        for (map<uint32_t, AbstractController*>::iterator it =
+                  g_abs_controls[i].begin();
+             it != g_abs_controls[i].end(); ++it) {
+
+            AbstractController *ctr = (*it).second;
+            Sequencer *seq = ctr->getSequencer();
+            if (seq != NULL) {
+                // add all the latencies
+                latency_hist.add(seq->getLatencyHist());
+                hit_latency_hist.add(seq->getHitLatencyHist());
+                miss_latency_hist.add(seq->getMissLatencyHist());
+
+                // add the per request type latencies
+                for (uint32_t j = 0; j < RubyRequestType_NUM; ++j) {
+                    type_latency_hist[j]
+                        .add(seq->getTypeLatencyHist(j));
+                    hit_type_latency_hist[j]
+                        .add(seq->getHitTypeLatencyHist(j));
+                    miss_type_latency_hist[j]
+                        .add(seq->getMissTypeLatencyHist(j));
+                }
+
+                // add the per machine type miss latencies
+                for (uint32_t j = 0; j < MachineType_NUM; ++j) {
+                    hit_mach_latency_hist[j]
+                        .add(seq->getHitMachLatencyHist(j));
+                    miss_mach_latency_hist[j]
+                        .add(seq->getMissMachLatencyHist(j));
+
+                    issue_to_initial_delay_hist[j].add(
+                        seq->getIssueToInitialDelayHist(MachineType(j)));
+
+                    initial_to_forward_delay_hist[j].add(
+                        seq->getInitialToForwardDelayHist(MachineType(j)));
+                    forward_to_first_response_delay_hist[j].add(seq->
+                        getForwardRequestToFirstResponseHist(MachineType(j)));
+
+                    first_response_to_completion_delay_hist[j].add(seq->
+                        getFirstResponseToCompletionDelayHist(MachineType(j)));
+                    incomplete_times[j] +=
+                        seq->getIncompleteTimes(MachineType(j));
+                }
+
+                // add the per (request, machine) type miss latencies
+                for (uint32_t j = 0; j < RubyRequestType_NUM; j++) {
+                    for (uint32_t k = 0; k < MachineType_NUM; k++) {
+                        hit_type_mach_latency_hist[j][k].add(
+                            seq->getHitTypeMachLatencyHist(j,k));
+                        miss_type_mach_latency_hist[j][k].add(
+                            seq->getMissTypeMachLatencyHist(j,k));
+                    }
+                }
+            }
+        }
     }
-    out << "Profiler Stats" << endl;
-    out << "--------------" << endl;
 
-    time_t real_time_current = time(NULL);
-    double seconds = difftime(real_time_current, m_real_time_start_time);
-    double minutes = seconds / 60.0;
-    double hours = minutes / 60.0;
-    double days = hours / 24.0;
-    Cycles ruby_cycles = g_system_ptr->curCycle()-m_ruby_start;
+    out << "latency: " << latency_hist << endl;
+    for (int i = 0; i < RubyRequestType_NUM; i++) {
+        if (type_latency_hist[i].size() > 0) {
+            out << "latency: " << RubyRequestType(i) << ": "
+                << type_latency_hist[i] << endl;
+        }
+    }
 
-    if (!short_stats) {
-        out << "Elapsed_time_in_seconds: " << seconds << endl;
-        out << "Elapsed_time_in_minutes: " << minutes << endl;
-        out << "Elapsed_time_in_hours: " << hours << endl;
-        out << "Elapsed_time_in_days: " << days << endl;
-        out << endl;
+    out << "hit latency: " << hit_latency_hist << endl;
+    for (int i = 0; i < RubyRequestType_NUM; i++) {
+        if (hit_type_latency_hist[i].size() > 0) {
+            out << "hit latency: " << RubyRequestType(i) << ": "
+                << hit_type_latency_hist[i] << endl;
+        }
     }
 
-    // print the virtual runtimes as well
-    struct tms vtime;
-    times(&vtime);
-    seconds = (vtime.tms_utime + vtime.tms_stime) / 100.0;
-    minutes = seconds / 60.0;
-    hours = minutes / 60.0;
-    days = hours / 24.0;
-    out << "Virtual_time_in_seconds: " << seconds << endl;
-    out << "Virtual_time_in_minutes: " << minutes << endl;
-    out << "Virtual_time_in_hours:   " << hours << endl;
-    out << "Virtual_time_in_days:    " << days << endl;
-    out << endl;
+    for (int i = 0; i < MachineType_NUM; i++) {
+        if (hit_mach_latency_hist[i].size() > 0) {
+            out << "hit latency: " << MachineType(i) << ": "
+                << hit_mach_latency_hist[i] << endl;
+        }
+    }
 
-    out << "Ruby_current_time: " << g_system_ptr->curCycle() << endl;
-    out << "Ruby_start_time: " << m_ruby_start << endl;
-    out << "Ruby_cycles: " << ruby_cycles << endl;
-    out << endl;
+    for (int i = 0; i < RubyRequestType_NUM; i++) {
+        for (int j = 0; j < MachineType_NUM; j++) {
+            if (hit_type_mach_latency_hist[i][j].size() > 0) {
+                out << "hit latency: " << RubyRequestType(i)
+                    << ": " << MachineType(j) << ": "
+                    << hit_type_mach_latency_hist[i][j] << endl;
+            }
+        }
+    }
 
-    if (!short_stats) {
-        out << "mbytes_resident: " << process_memory_resident() << endl;
-        out << "mbytes_total: " << process_memory_total() << endl;
-        if (process_memory_total() > 0) {
-            out << "resident_ratio: "
-                << process_memory_resident()/process_memory_total() << endl;
+    out << "miss latency: " << miss_latency_hist << endl;
+    for (int i = 0; i < RubyRequestType_NUM; i++) {
+        if (miss_type_latency_hist[i].size() > 0) {
+            out << "miss latency: " << RubyRequestType(i) << ": "
+                << miss_type_latency_hist[i] << endl;
         }
-        out << endl;
     }
 
-    vector<int64_t> perProcCycleCount(m_num_of_sequencers);
+    for (int i = 0; i < MachineType_NUM; i++) {
+        if (miss_mach_latency_hist[i].size() > 0) {
+            out << "miss latency: " << MachineType(i) << ": "
+                << miss_mach_latency_hist[i] << endl;
+
+            out << "miss latency: " << MachineType(i)
+                << "::issue_to_initial_request: "
+                << issue_to_initial_delay_hist[i] << endl;
+            out << "miss latency: " << MachineType(i)
+                << "::initial_to_forward_request: "
+                << initial_to_forward_delay_hist[i] << endl;
+            out << "miss latency: " << MachineType(i)
+                << "::forward_to_first_response: "
+                << forward_to_first_response_delay_hist[i] << endl;
+            out << "miss latency: " << MachineType(i)
+                << "::first_response_to_completion: "
+                << first_response_to_completion_delay_hist[i] << endl;
+            out << "incomplete times: " << incomplete_times[i] << endl;
+        }
+    }
 
-    for (int i = 0; i < m_num_of_sequencers; i++) {
-        perProcCycleCount[i] =
-            g_system_ptr->curCycle() - m_cycles_executed_at_start[i] + 1;
-        // The +1 allows us to avoid division by zero
+    for (int i = 0; i < RubyRequestType_NUM; i++) {
+        for (int j = 0; j < MachineType_NUM; j++) {
+            if (miss_type_mach_latency_hist[i][j].size() > 0) {
+                out << "miss latency: " << RubyRequestType(i)
+                    << ": " << MachineType(j) << ": "
+                    << miss_type_mach_latency_hist[i][j] << endl;
+            }
+        }
     }
 
-    out << "ruby_cycles_executed: " << perProcCycleCount << endl;
+    out << endl;
+}
 
+void
+Profiler::printStats(ostream& out, bool short_stats)
+{
+    out << endl;
+    if (short_stats) {
+        out << "SHORT ";
+    }
+    out << "Profiler Stats" << endl;
+    out << "--------------" << endl;
+
+    Cycles ruby_cycles = g_system_ptr->curCycle()-m_ruby_start;
+
+    out << "Ruby_current_time: " << g_system_ptr->curCycle() << endl;
+    out << "Ruby_start_time: " << m_ruby_start << endl;
+    out << "Ruby_cycles: " << ruby_cycles << endl;
     out << endl;
 
     if (!short_stats) {
@@ -374,68 +417,7 @@ Profiler::printStats(ostream& out, bool short_stats)
     if (!short_stats) {
         out << "All Non-Zero Cycle Demand Cache Accesses" << endl;
         out << "----------------------------------------" << endl;
-        out << "miss_latency: " << m_allMissLatencyHistogram << endl;
-        for (int i = 0; i < m_missLatencyHistograms.size(); i++) {
-            if (m_missLatencyHistograms[i].size() > 0) {
-                out << "miss_latency_" << RubyRequestType(i) << ": "
-                    << m_missLatencyHistograms[i] << endl;
-            }
-        }
-        for (int i = 0; i < m_machLatencyHistograms.size(); i++) {
-            if (m_machLatencyHistograms[i].size() > 0) {
-                out << "miss_latency_" << GenericMachineType(i) << ": "
-                    << m_machLatencyHistograms[i] << endl;
-            }
-        }
-
-        out << "miss_latency_wCC_issue_to_initial_request: " 
-            << m_wCCIssueToInitialRequestHistogram << endl;
-        out << "miss_latency_wCC_initial_forward_request: " 
-            << m_wCCInitialRequestToForwardRequestHistogram << endl;
-        out << "miss_latency_wCC_forward_to_first_response: " 
-            << m_wCCForwardRequestToFirstResponseHistogram << endl;
-        out << "miss_latency_wCC_first_response_to_completion: " 
-            << m_wCCFirstResponseToCompleteHistogram << endl;
-        out << "imcomplete_wCC_Times: " << m_wCCIncompleteTimes << endl;
-        out << "miss_latency_dir_issue_to_initial_request: " 
-            << m_dirIssueToInitialRequestHistogram << endl;
-        out << "miss_latency_dir_initial_forward_request: " 
-            << m_dirInitialRequestToForwardRequestHistogram << endl;
-        out << "miss_latency_dir_forward_to_first_response: " 
-            << m_dirForwardRequestToFirstResponseHistogram << endl;
-        out << "miss_latency_dir_first_response_to_completion: " 
-            << m_dirFirstResponseToCompleteHistogram << endl;
-        out << "imcomplete_dir_Times: " << m_dirIncompleteTimes << endl;
-
-        for (int i = 0; i < m_missMachLatencyHistograms.size(); i++) {
-            for (int j = 0; j < m_missMachLatencyHistograms[i].size(); j++) {
-                if (m_missMachLatencyHistograms[i][j].size() > 0) {
-                    out << "miss_latency_" << RubyRequestType(i) 
-                        << "_" << GenericMachineType(j) << ": "
-                        << m_missMachLatencyHistograms[i][j] << endl;
-                }
-            }
-        }
-
-        out << endl;
-
-        out << "All Non-Zero Cycle SW Prefetch Requests" << endl;
-        out << "------------------------------------" << endl;
-        out << "prefetch_latency: " << m_allSWPrefetchLatencyHistogram << endl;
-        for (int i = 0; i < m_SWPrefetchLatencyHistograms.size(); i++) {
-            if (m_SWPrefetchLatencyHistograms[i].size() > 0) {
-                out << "prefetch_latency_" << RubyRequestType(i) << ": "
-                    << m_SWPrefetchLatencyHistograms[i] << endl;
-            }
-        }
-        for (int i = 0; i < m_SWPrefetchMachLatencyHistograms.size(); i++) {
-            if (m_SWPrefetchMachLatencyHistograms[i].size() > 0) {
-                out << "prefetch_latency_" << GenericMachineType(i) << ": "
-                    << m_SWPrefetchMachLatencyHistograms[i] << endl;
-            }
-        }
-        out << "prefetch_latency_L2Miss:"
-            << m_SWPrefetchL2MissLatencyHistogram << endl;
+        printMissLatencyProfile(out);
 
         if (m_all_sharing_histogram.size() > 0) {
             out << "all_sharing: " << m_all_sharing_histogram << endl;
@@ -469,8 +451,6 @@ Profiler::printStats(ostream& out, bool short_stats)
 
         printRequestProfile(out);
 
-        out << "filter_action: " << m_filter_action_histogram << endl;
-
         if (!m_all_instructions) {
             m_address_profiler_ptr->printStats(out);
         }
@@ -481,82 +461,16 @@ Profiler::printStats(ostream& out, bool short_stats)
 
         out << endl;
         printDelayProfile(out);
-        printResourceUsage(out);
     }
 }
 
-void
-Profiler::printResourceUsage(ostream& out) const
-{
-    out << endl;
-    out << "Resource Usage" << endl;
-    out << "--------------" << endl;
-
-    int64_t pagesize = getpagesize(); // page size in bytes
-    out << "page_size: " << pagesize << endl;
-
-    rusage usage;
-    getrusage (RUSAGE_SELF, &usage);
-
-    out << "user_time: " << usage.ru_utime.tv_sec << endl;
-    out << "system_time: " << usage.ru_stime.tv_sec << endl;
-    out << "page_reclaims: " << usage.ru_minflt << endl;
-    out << "page_faults: " << usage.ru_majflt << endl;
-    out << "swaps: " << usage.ru_nswap << endl;
-    out << "block_inputs: " << usage.ru_inblock << endl;
-    out << "block_outputs: " << usage.ru_oublock << endl;
-}
-
 void
 Profiler::clearStats()
 {
     m_ruby_start = g_system_ptr->curCycle();
     m_real_time_start_time = time(NULL);
 
-    m_cycles_executed_at_start.resize(m_num_of_sequencers);
-    for (int i = 0; i < m_num_of_sequencers; i++) {
-        m_cycles_executed_at_start[i] = g_system_ptr->curCycle();
-    }
-
     m_busyBankCount = 0;
-
-    m_missLatencyHistograms.resize(RubyRequestType_NUM);
-    for (int i = 0; i < m_missLatencyHistograms.size(); i++) {
-        m_missLatencyHistograms[i].clear(200);
-    }
-    m_machLatencyHistograms.resize(GenericMachineType_NUM+1);
-    for (int i = 0; i < m_machLatencyHistograms.size(); i++) {
-        m_machLatencyHistograms[i].clear(200);
-    }
-    m_missMachLatencyHistograms.resize(RubyRequestType_NUM);
-    for (int i = 0; i < m_missLatencyHistograms.size(); i++) {
-        m_missMachLatencyHistograms[i].resize(GenericMachineType_NUM+1);
-        for (int j = 0; j < m_missMachLatencyHistograms[i].size(); j++) {
-            m_missMachLatencyHistograms[i][j].clear(200);
-        }
-    }
-    m_allMissLatencyHistogram.clear(200);
-    m_wCCIssueToInitialRequestHistogram.clear(200);
-    m_wCCInitialRequestToForwardRequestHistogram.clear(200);
-    m_wCCForwardRequestToFirstResponseHistogram.clear(200);
-    m_wCCFirstResponseToCompleteHistogram.clear(200);
-    m_wCCIncompleteTimes = 0;
-    m_dirIssueToInitialRequestHistogram.clear(200);
-    m_dirInitialRequestToForwardRequestHistogram.clear(200);
-    m_dirForwardRequestToFirstResponseHistogram.clear(200);
-    m_dirFirstResponseToCompleteHistogram.clear(200);
-    m_dirIncompleteTimes = 0;
-
-    m_SWPrefetchLatencyHistograms.resize(RubyRequestType_NUM);
-    for (int i = 0; i < m_SWPrefetchLatencyHistograms.size(); i++) {
-        m_SWPrefetchLatencyHistograms[i].clear(200);
-    }
-    m_SWPrefetchMachLatencyHistograms.resize(GenericMachineType_NUM+1);
-    for (int i = 0; i < m_SWPrefetchMachLatencyHistograms.size(); i++) {
-        m_SWPrefetchMachLatencyHistograms[i].clear(200);
-    }
-    m_allSWPrefetchLatencyHistogram.clear(200);
-
     m_read_sharing_histogram.clear();
     m_write_sharing_histogram.clear();
     m_all_sharing_histogram.clear();
@@ -609,126 +523,12 @@ Profiler::profileSharing(const Address& addr, AccessType type,
     }
 }
 
-void
-Profiler::profilePFWait(Cycles waitTime)
-{
-    m_prefetchWaitHistogram.add(waitTime);
-}
-
 void
 Profiler::bankBusy()
 {
     m_busyBankCount++;
 }
 
-// non-zero cycle demand request
-void
-Profiler::missLatency(Cycles cycles,
-                      RubyRequestType type,
-                      const GenericMachineType respondingMach)
-{
-    m_allMissLatencyHistogram.add(cycles);
-    m_missLatencyHistograms[type].add(cycles);
-    m_machLatencyHistograms[respondingMach].add(cycles);
-    m_missMachLatencyHistograms[type][respondingMach].add(cycles);
-}
-
-void
-Profiler::missLatencyWcc(Cycles issuedTime,
-                         Cycles initialRequestTime,
-                         Cycles forwardRequestTime,
-                         Cycles firstResponseTime,
-                         Cycles completionTime)
-{
-    if ((issuedTime <= initialRequestTime) &&
-        (initialRequestTime <= forwardRequestTime) &&
-        (forwardRequestTime <= firstResponseTime) &&
-        (firstResponseTime <= completionTime)) {
-        m_wCCIssueToInitialRequestHistogram.add(initialRequestTime - issuedTime);
-        
-        m_wCCInitialRequestToForwardRequestHistogram.add(forwardRequestTime - 
-                                                         initialRequestTime);
-        
-        m_wCCForwardRequestToFirstResponseHistogram.add(firstResponseTime - 
-                                                        forwardRequestTime);
-        
-        m_wCCFirstResponseToCompleteHistogram.add(completionTime - 
-                                                  firstResponseTime);
-    } else {
-        m_wCCIncompleteTimes++;
-    }
-}
-
-void
-Profiler::missLatencyDir(Cycles issuedTime,
-                         Cycles initialRequestTime,
-                         Cycles forwardRequestTime,
-                         Cycles firstResponseTime,
-                         Cycles completionTime)
-{
-    if ((issuedTime <= initialRequestTime) &&
-        (initialRequestTime <= forwardRequestTime) &&
-        (forwardRequestTime <= firstResponseTime) &&
-        (firstResponseTime <= completionTime)) {
-        m_dirIssueToInitialRequestHistogram.add(initialRequestTime - issuedTime);
-        
-        m_dirInitialRequestToForwardRequestHistogram.add(forwardRequestTime - 
-                                                         initialRequestTime);
-        
-        m_dirForwardRequestToFirstResponseHistogram.add(firstResponseTime - 
-                                                        forwardRequestTime);
-        
-        m_dirFirstResponseToCompleteHistogram.add(completionTime - 
-                                                  firstResponseTime);
-    } else {
-        m_dirIncompleteTimes++;
-    }
-}
-
-// non-zero cycle prefetch request
-void
-Profiler::swPrefetchLatency(Cycles cycles, RubyRequestType type,
-                            const GenericMachineType respondingMach)
-{
-    m_allSWPrefetchLatencyHistogram.add(cycles);
-    m_SWPrefetchLatencyHistograms[type].add(cycles);
-    m_SWPrefetchMachLatencyHistograms[respondingMach].add(cycles);
-
-    if (respondingMach == GenericMachineType_Directory ||
-        respondingMach == GenericMachineType_NUM) {
-        m_SWPrefetchL2MissLatencyHistogram.add(cycles);
-    }
-}
-
-// Helper function
-static double
-process_memory_total()
-{
-    // 4kB page size, 1024*1024 bytes per MB,
-    const double MULTIPLIER = 4096.0 / (1024.0 * 1024.0);
-    ifstream proc_file;
-    proc_file.open("/proc/self/statm");
-    int total_size_in_pages = 0;
-    int res_size_in_pages = 0;
-    proc_file >> total_size_in_pages;
-    proc_file >> res_size_in_pages;
-    return double(total_size_in_pages) * MULTIPLIER; // size in megabytes
-}
-
-static double
-process_memory_resident()
-{
-    // 4kB page size, 1024*1024 bytes per MB,
-    const double MULTIPLIER = 4096.0 / (1024.0 * 1024.0);
-    ifstream proc_file;
-    proc_file.open("/proc/self/statm");
-    int total_size_in_pages = 0;
-    int res_size_in_pages = 0;
-    proc_file >> total_size_in_pages;
-    proc_file >> res_size_in_pages;
-    return double(res_size_in_pages) * MULTIPLIER; // size in megabytes
-}
-
 void
 Profiler::rubyWatch(int id)
 {