2 * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 This file has been modified by Kevin Moore and Dan Nussbaum of the
31 Scalable Systems Research Group at Sun Microsystems Laboratories
32 (http://research.sun.com/scalable/) to support the Adaptive
33 Transactional Memory Test Platform (ATMTP).
35 Please send email to atmtp-interest@sun.com with feedback, questions, or
36 to request future announcements about ATMTP.
38 ----------------------------------------------------------------------
40 File modification date: 2008-02-23
42 ----------------------------------------------------------------------
45 // Allows use of times() library call, which determines virtual runtime
46 #include <sys/resource.h>
47 #include <sys/times.h>
48 #include <sys/types.h>
54 #include "base/stl_helpers.hh"
55 #include "base/str.hh"
56 #include "mem/protocol/MachineType.hh"
57 #include "mem/protocol/RubyRequest.hh"
58 #include "mem/ruby/network/Network.hh"
59 #include "mem/ruby/profiler/AddressProfiler.hh"
60 #include "mem/ruby/profiler/Profiler.hh"
61 #include "mem/ruby/system/Sequencer.hh"
62 #include "mem/ruby/system/System.hh"
65 using m5::stl_helpers::operator<<;
67 static double process_memory_total();
68 static double process_memory_resident();
70 Profiler::Profiler(const Params
*p
)
73 m_inst_profiler_ptr
= NULL
;
74 m_address_profiler_ptr
= NULL
;
75 m_real_time_start_time
= time(NULL
); // Not reset in clearStats()
77 m_hot_lines
= p
->hot_lines
;
78 m_all_instructions
= p
->all_instructions
;
80 m_num_of_sequencers
= p
->num_of_sequencers
;
83 m_all_instructions
= false;
85 m_address_profiler_ptr
= new AddressProfiler(m_num_of_sequencers
);
86 m_address_profiler_ptr
->setHotLines(m_hot_lines
);
87 m_address_profiler_ptr
->setAllInstructions(m_all_instructions
);
89 if (m_all_instructions
) {
90 m_inst_profiler_ptr
= new AddressProfiler(m_num_of_sequencers
);
91 m_inst_profiler_ptr
->setHotLines(m_hot_lines
);
92 m_inst_profiler_ptr
->setAllInstructions(m_all_instructions
);
95 p
->ruby_system
->registerProfiler(this);
103 Profiler::print(ostream
& out
) const
109 Profiler::printRequestProfile(ostream
&out
) const
111 out
<< "Request vs. RubySystem State Profile" << endl
;
112 out
<< "--------------------------------" << endl
;
115 map
<string
, uint64_t> m_requestProfileMap
;
116 uint64_t m_requests
= 0;
118 for (uint32_t i
= 0; i
< MachineType_NUM
; i
++) {
119 for (map
<uint32_t, AbstractController
*>::iterator it
=
120 g_abs_controls
[i
].begin();
121 it
!= g_abs_controls
[i
].end(); ++it
) {
123 AbstractController
*ctr
= (*it
).second
;
124 map
<string
, uint64_t> mp
= ctr
->getRequestProfileMap();
126 for (map
<string
, uint64_t>::iterator jt
= mp
.begin();
127 jt
!= mp
.end(); ++jt
) {
129 map
<string
, uint64_t>::iterator kt
=
130 m_requestProfileMap
.find((*jt
).first
);
131 if (kt
!= m_requestProfileMap
.end()) {
132 (*kt
).second
+= (*jt
).second
;
134 m_requestProfileMap
[(*jt
).first
] = (*jt
).second
;
138 m_requests
+= ctr
->getRequestCount();
142 map
<string
, uint64_t>::const_iterator i
= m_requestProfileMap
.begin();
143 map
<string
, uint64_t>::const_iterator end
= m_requestProfileMap
.end();
144 for (; i
!= end
; ++i
) {
145 const string
&key
= i
->first
;
146 uint64_t count
= i
->second
;
148 double percent
= (100.0 * double(count
)) / double(m_requests
);
149 vector
<string
> items
;
150 tokenize(items
, key
, ':');
151 vector
<string
>::iterator j
= items
.begin();
152 vector
<string
>::iterator end
= items
.end();
153 for (; j
!= end
; ++i
)
154 out
<< setw(10) << *j
;
155 out
<< setw(11) << count
;
156 out
<< setw(14) << percent
<< endl
;
162 Profiler::printDelayProfile(ostream
&out
) const
164 out
<< "Message Delayed Cycles" << endl
;
165 out
<< "----------------------" << endl
;
167 uint32_t numVNets
= Network::getNumberOfVirtualNetworks();
168 Histogram delayHistogram
;
169 std::vector
<Histogram
> delayVCHistogram(numVNets
);
171 for (uint32_t i
= 0; i
< MachineType_NUM
; i
++) {
172 for (map
<uint32_t, AbstractController
*>::iterator it
=
173 g_abs_controls
[i
].begin();
174 it
!= g_abs_controls
[i
].end(); ++it
) {
176 AbstractController
*ctr
= (*it
).second
;
177 delayHistogram
.add(ctr
->getDelayHist());
179 for (uint32_t i
= 0; i
< numVNets
; i
++) {
180 delayVCHistogram
[i
].add(ctr
->getDelayVCHist(i
));
185 out
<< "Total_delay_cycles: " << delayHistogram
<< endl
;
187 for (int i
= 0; i
< numVNets
; i
++) {
188 out
<< " virtual_network_" << i
<< "_delay_cycles: "
189 << delayVCHistogram
[i
] << endl
;
194 Profiler::printOutstandingReqProfile(ostream
&out
) const
196 Histogram sequencerRequests
;
198 for (uint32_t i
= 0; i
< MachineType_NUM
; i
++) {
199 for (map
<uint32_t, AbstractController
*>::iterator it
=
200 g_abs_controls
[i
].begin();
201 it
!= g_abs_controls
[i
].end(); ++it
) {
203 AbstractController
*ctr
= (*it
).second
;
204 Sequencer
*seq
= ctr
->getSequencer();
206 sequencerRequests
.add(seq
->getOutstandReqHist());
211 out
<< "sequencer_requests_outstanding: "
212 << sequencerRequests
<< endl
;
216 Profiler::printStats(ostream
& out
, bool short_stats
)
222 out
<< "Profiler Stats" << endl
;
223 out
<< "--------------" << endl
;
225 time_t real_time_current
= time(NULL
);
226 double seconds
= difftime(real_time_current
, m_real_time_start_time
);
227 double minutes
= seconds
/ 60.0;
228 double hours
= minutes
/ 60.0;
229 double days
= hours
/ 24.0;
230 Cycles ruby_cycles
= g_system_ptr
->curCycle()-m_ruby_start
;
233 out
<< "Elapsed_time_in_seconds: " << seconds
<< endl
;
234 out
<< "Elapsed_time_in_minutes: " << minutes
<< endl
;
235 out
<< "Elapsed_time_in_hours: " << hours
<< endl
;
236 out
<< "Elapsed_time_in_days: " << days
<< endl
;
240 // print the virtual runtimes as well
243 seconds
= (vtime
.tms_utime
+ vtime
.tms_stime
) / 100.0;
244 minutes
= seconds
/ 60.0;
245 hours
= minutes
/ 60.0;
247 out
<< "Virtual_time_in_seconds: " << seconds
<< endl
;
248 out
<< "Virtual_time_in_minutes: " << minutes
<< endl
;
249 out
<< "Virtual_time_in_hours: " << hours
<< endl
;
250 out
<< "Virtual_time_in_days: " << days
<< endl
;
253 out
<< "Ruby_current_time: " << g_system_ptr
->curCycle() << endl
;
254 out
<< "Ruby_start_time: " << m_ruby_start
<< endl
;
255 out
<< "Ruby_cycles: " << ruby_cycles
<< endl
;
259 out
<< "mbytes_resident: " << process_memory_resident() << endl
;
260 out
<< "mbytes_total: " << process_memory_total() << endl
;
261 if (process_memory_total() > 0) {
262 out
<< "resident_ratio: "
263 << process_memory_resident()/process_memory_total() << endl
;
268 vector
<int64_t> perProcCycleCount(m_num_of_sequencers
);
270 for (int i
= 0; i
< m_num_of_sequencers
; i
++) {
271 perProcCycleCount
[i
] =
272 g_system_ptr
->curCycle() - m_cycles_executed_at_start
[i
] + 1;
273 // The +1 allows us to avoid division by zero
276 out
<< "ruby_cycles_executed: " << perProcCycleCount
<< endl
;
281 out
<< "Busy Controller Counts:" << endl
;
282 for (uint32_t i
= 0; i
< MachineType_NUM
; i
++) {
283 uint32_t size
= MachineType_base_count((MachineType
)i
);
285 for (uint32_t j
= 0; j
< size
; j
++) {
287 machID
.type
= (MachineType
)i
;
290 AbstractController
*ctr
=
291 (*(g_abs_controls
[i
].find(j
))).second
;
292 out
<< machID
<< ":" << ctr
->getFullyBusyCycles() << " ";
293 if ((j
+ 1) % 8 == 0) {
301 out
<< "Busy Bank Count:" << m_busyBankCount
<< endl
;
304 printOutstandingReqProfile(out
);
309 out
<< "All Non-Zero Cycle Demand Cache Accesses" << endl
;
310 out
<< "----------------------------------------" << endl
;
311 out
<< "miss_latency: " << m_allMissLatencyHistogram
<< endl
;
312 for (int i
= 0; i
< m_missLatencyHistograms
.size(); i
++) {
313 if (m_missLatencyHistograms
[i
].size() > 0) {
314 out
<< "miss_latency_" << RubyRequestType(i
) << ": "
315 << m_missLatencyHistograms
[i
] << endl
;
318 for (int i
= 0; i
< m_machLatencyHistograms
.size(); i
++) {
319 if (m_machLatencyHistograms
[i
].size() > 0) {
320 out
<< "miss_latency_" << GenericMachineType(i
) << ": "
321 << m_machLatencyHistograms
[i
] << endl
;
325 out
<< "miss_latency_wCC_issue_to_initial_request: "
326 << m_wCCIssueToInitialRequestHistogram
<< endl
;
327 out
<< "miss_latency_wCC_initial_forward_request: "
328 << m_wCCInitialRequestToForwardRequestHistogram
<< endl
;
329 out
<< "miss_latency_wCC_forward_to_first_response: "
330 << m_wCCForwardRequestToFirstResponseHistogram
<< endl
;
331 out
<< "miss_latency_wCC_first_response_to_completion: "
332 << m_wCCFirstResponseToCompleteHistogram
<< endl
;
333 out
<< "imcomplete_wCC_Times: " << m_wCCIncompleteTimes
<< endl
;
334 out
<< "miss_latency_dir_issue_to_initial_request: "
335 << m_dirIssueToInitialRequestHistogram
<< endl
;
336 out
<< "miss_latency_dir_initial_forward_request: "
337 << m_dirInitialRequestToForwardRequestHistogram
<< endl
;
338 out
<< "miss_latency_dir_forward_to_first_response: "
339 << m_dirForwardRequestToFirstResponseHistogram
<< endl
;
340 out
<< "miss_latency_dir_first_response_to_completion: "
341 << m_dirFirstResponseToCompleteHistogram
<< endl
;
342 out
<< "imcomplete_dir_Times: " << m_dirIncompleteTimes
<< endl
;
344 for (int i
= 0; i
< m_missMachLatencyHistograms
.size(); i
++) {
345 for (int j
= 0; j
< m_missMachLatencyHistograms
[i
].size(); j
++) {
346 if (m_missMachLatencyHistograms
[i
][j
].size() > 0) {
347 out
<< "miss_latency_" << RubyRequestType(i
)
348 << "_" << GenericMachineType(j
) << ": "
349 << m_missMachLatencyHistograms
[i
][j
] << endl
;
356 out
<< "All Non-Zero Cycle SW Prefetch Requests" << endl
;
357 out
<< "------------------------------------" << endl
;
358 out
<< "prefetch_latency: " << m_allSWPrefetchLatencyHistogram
<< endl
;
359 for (int i
= 0; i
< m_SWPrefetchLatencyHistograms
.size(); i
++) {
360 if (m_SWPrefetchLatencyHistograms
[i
].size() > 0) {
361 out
<< "prefetch_latency_" << RubyRequestType(i
) << ": "
362 << m_SWPrefetchLatencyHistograms
[i
] << endl
;
365 for (int i
= 0; i
< m_SWPrefetchMachLatencyHistograms
.size(); i
++) {
366 if (m_SWPrefetchMachLatencyHistograms
[i
].size() > 0) {
367 out
<< "prefetch_latency_" << GenericMachineType(i
) << ": "
368 << m_SWPrefetchMachLatencyHistograms
[i
] << endl
;
371 out
<< "prefetch_latency_L2Miss:"
372 << m_SWPrefetchL2MissLatencyHistogram
<< endl
;
374 if (m_all_sharing_histogram
.size() > 0) {
375 out
<< "all_sharing: " << m_all_sharing_histogram
<< endl
;
376 out
<< "read_sharing: " << m_read_sharing_histogram
<< endl
;
377 out
<< "write_sharing: " << m_write_sharing_histogram
<< endl
;
379 out
<< "all_sharing_percent: ";
380 m_all_sharing_histogram
.printPercent(out
);
383 out
<< "read_sharing_percent: ";
384 m_read_sharing_histogram
.printPercent(out
);
387 out
<< "write_sharing_percent: ";
388 m_write_sharing_histogram
.printPercent(out
);
391 int64 total_miss
= m_cache_to_cache
+ m_memory_to_cache
;
392 out
<< "all_misses: " << total_miss
<< endl
;
393 out
<< "cache_to_cache_misses: " << m_cache_to_cache
<< endl
;
394 out
<< "memory_to_cache_misses: " << m_memory_to_cache
<< endl
;
395 out
<< "cache_to_cache_percent: "
396 << 100.0 * (double(m_cache_to_cache
) / double(total_miss
))
398 out
<< "memory_to_cache_percent: "
399 << 100.0 * (double(m_memory_to_cache
) / double(total_miss
))
404 printRequestProfile(out
);
406 out
<< "filter_action: " << m_filter_action_histogram
<< endl
;
408 if (!m_all_instructions
) {
409 m_address_profiler_ptr
->printStats(out
);
412 if (m_all_instructions
) {
413 m_inst_profiler_ptr
->printStats(out
);
417 printDelayProfile(out
);
418 printResourceUsage(out
);
423 Profiler::printResourceUsage(ostream
& out
) const
426 out
<< "Resource Usage" << endl
;
427 out
<< "--------------" << endl
;
429 int64_t pagesize
= getpagesize(); // page size in bytes
430 out
<< "page_size: " << pagesize
<< endl
;
433 getrusage (RUSAGE_SELF
, &usage
);
435 out
<< "user_time: " << usage
.ru_utime
.tv_sec
<< endl
;
436 out
<< "system_time: " << usage
.ru_stime
.tv_sec
<< endl
;
437 out
<< "page_reclaims: " << usage
.ru_minflt
<< endl
;
438 out
<< "page_faults: " << usage
.ru_majflt
<< endl
;
439 out
<< "swaps: " << usage
.ru_nswap
<< endl
;
440 out
<< "block_inputs: " << usage
.ru_inblock
<< endl
;
441 out
<< "block_outputs: " << usage
.ru_oublock
<< endl
;
445 Profiler::clearStats()
447 m_ruby_start
= g_system_ptr
->curCycle();
448 m_real_time_start_time
= time(NULL
);
450 m_cycles_executed_at_start
.resize(m_num_of_sequencers
);
451 for (int i
= 0; i
< m_num_of_sequencers
; i
++) {
452 m_cycles_executed_at_start
[i
] = g_system_ptr
->curCycle();
457 m_missLatencyHistograms
.resize(RubyRequestType_NUM
);
458 for (int i
= 0; i
< m_missLatencyHistograms
.size(); i
++) {
459 m_missLatencyHistograms
[i
].clear(200);
461 m_machLatencyHistograms
.resize(GenericMachineType_NUM
+1);
462 for (int i
= 0; i
< m_machLatencyHistograms
.size(); i
++) {
463 m_machLatencyHistograms
[i
].clear(200);
465 m_missMachLatencyHistograms
.resize(RubyRequestType_NUM
);
466 for (int i
= 0; i
< m_missLatencyHistograms
.size(); i
++) {
467 m_missMachLatencyHistograms
[i
].resize(GenericMachineType_NUM
+1);
468 for (int j
= 0; j
< m_missMachLatencyHistograms
[i
].size(); j
++) {
469 m_missMachLatencyHistograms
[i
][j
].clear(200);
472 m_allMissLatencyHistogram
.clear(200);
473 m_wCCIssueToInitialRequestHistogram
.clear(200);
474 m_wCCInitialRequestToForwardRequestHistogram
.clear(200);
475 m_wCCForwardRequestToFirstResponseHistogram
.clear(200);
476 m_wCCFirstResponseToCompleteHistogram
.clear(200);
477 m_wCCIncompleteTimes
= 0;
478 m_dirIssueToInitialRequestHistogram
.clear(200);
479 m_dirInitialRequestToForwardRequestHistogram
.clear(200);
480 m_dirForwardRequestToFirstResponseHistogram
.clear(200);
481 m_dirFirstResponseToCompleteHistogram
.clear(200);
482 m_dirIncompleteTimes
= 0;
484 m_SWPrefetchLatencyHistograms
.resize(RubyRequestType_NUM
);
485 for (int i
= 0; i
< m_SWPrefetchLatencyHistograms
.size(); i
++) {
486 m_SWPrefetchLatencyHistograms
[i
].clear(200);
488 m_SWPrefetchMachLatencyHistograms
.resize(GenericMachineType_NUM
+1);
489 for (int i
= 0; i
< m_SWPrefetchMachLatencyHistograms
.size(); i
++) {
490 m_SWPrefetchMachLatencyHistograms
[i
].clear(200);
492 m_allSWPrefetchLatencyHistogram
.clear(200);
494 m_read_sharing_histogram
.clear();
495 m_write_sharing_histogram
.clear();
496 m_all_sharing_histogram
.clear();
497 m_cache_to_cache
= 0;
498 m_memory_to_cache
= 0;
500 // update the start time
501 m_ruby_start
= g_system_ptr
->curCycle();
505 Profiler::addAddressTraceSample(const RubyRequest
& msg
, NodeID id
)
507 if (msg
.getType() != RubyRequestType_IFETCH
) {
508 // Note: The following line should be commented out if you
509 // want to use the special profiling that is part of the GS320
512 // NOTE: Unless PROFILE_HOT_LINES is enabled, nothing will be
513 // profiled by the AddressProfiler
514 m_address_profiler_ptr
->
515 addTraceSample(msg
.getLineAddress(), msg
.getProgramCounter(),
516 msg
.getType(), msg
.getAccessMode(), id
, false);
521 Profiler::profileSharing(const Address
& addr
, AccessType type
,
522 NodeID requestor
, const Set
& sharers
,
525 Set
set_contacted(owner
);
526 if (type
== AccessType_Write
) {
527 set_contacted
.addSet(sharers
);
529 set_contacted
.remove(requestor
);
530 int number_contacted
= set_contacted
.count();
532 if (type
== AccessType_Write
) {
533 m_write_sharing_histogram
.add(number_contacted
);
535 m_read_sharing_histogram
.add(number_contacted
);
537 m_all_sharing_histogram
.add(number_contacted
);
539 if (number_contacted
== 0) {
547 Profiler::profilePFWait(Cycles waitTime
)
549 m_prefetchWaitHistogram
.add(waitTime
);
558 // non-zero cycle demand request
560 Profiler::missLatency(Cycles cycles
,
561 RubyRequestType type
,
562 const GenericMachineType respondingMach
)
564 m_allMissLatencyHistogram
.add(cycles
);
565 m_missLatencyHistograms
[type
].add(cycles
);
566 m_machLatencyHistograms
[respondingMach
].add(cycles
);
567 m_missMachLatencyHistograms
[type
][respondingMach
].add(cycles
);
571 Profiler::missLatencyWcc(Cycles issuedTime
,
572 Cycles initialRequestTime
,
573 Cycles forwardRequestTime
,
574 Cycles firstResponseTime
,
575 Cycles completionTime
)
577 if ((issuedTime
<= initialRequestTime
) &&
578 (initialRequestTime
<= forwardRequestTime
) &&
579 (forwardRequestTime
<= firstResponseTime
) &&
580 (firstResponseTime
<= completionTime
)) {
581 m_wCCIssueToInitialRequestHistogram
.add(initialRequestTime
- issuedTime
);
583 m_wCCInitialRequestToForwardRequestHistogram
.add(forwardRequestTime
-
586 m_wCCForwardRequestToFirstResponseHistogram
.add(firstResponseTime
-
589 m_wCCFirstResponseToCompleteHistogram
.add(completionTime
-
592 m_wCCIncompleteTimes
++;
597 Profiler::missLatencyDir(Cycles issuedTime
,
598 Cycles initialRequestTime
,
599 Cycles forwardRequestTime
,
600 Cycles firstResponseTime
,
601 Cycles completionTime
)
603 if ((issuedTime
<= initialRequestTime
) &&
604 (initialRequestTime
<= forwardRequestTime
) &&
605 (forwardRequestTime
<= firstResponseTime
) &&
606 (firstResponseTime
<= completionTime
)) {
607 m_dirIssueToInitialRequestHistogram
.add(initialRequestTime
- issuedTime
);
609 m_dirInitialRequestToForwardRequestHistogram
.add(forwardRequestTime
-
612 m_dirForwardRequestToFirstResponseHistogram
.add(firstResponseTime
-
615 m_dirFirstResponseToCompleteHistogram
.add(completionTime
-
618 m_dirIncompleteTimes
++;
622 // non-zero cycle prefetch request
624 Profiler::swPrefetchLatency(Cycles cycles
, RubyRequestType type
,
625 const GenericMachineType respondingMach
)
627 m_allSWPrefetchLatencyHistogram
.add(cycles
);
628 m_SWPrefetchLatencyHistograms
[type
].add(cycles
);
629 m_SWPrefetchMachLatencyHistograms
[respondingMach
].add(cycles
);
631 if (respondingMach
== GenericMachineType_Directory
||
632 respondingMach
== GenericMachineType_NUM
) {
633 m_SWPrefetchL2MissLatencyHistogram
.add(cycles
);
639 process_memory_total()
641 // 4kB page size, 1024*1024 bytes per MB,
642 const double MULTIPLIER
= 4096.0 / (1024.0 * 1024.0);
644 proc_file
.open("/proc/self/statm");
645 int total_size_in_pages
= 0;
646 int res_size_in_pages
= 0;
647 proc_file
>> total_size_in_pages
;
648 proc_file
>> res_size_in_pages
;
649 return double(total_size_in_pages
) * MULTIPLIER
; // size in megabytes
653 process_memory_resident()
655 // 4kB page size, 1024*1024 bytes per MB,
656 const double MULTIPLIER
= 4096.0 / (1024.0 * 1024.0);
658 proc_file
.open("/proc/self/statm");
659 int total_size_in_pages
= 0;
660 int res_size_in_pages
= 0;
661 proc_file
>> total_size_in_pages
;
662 proc_file
>> res_size_in_pages
;
663 return double(res_size_in_pages
) * MULTIPLIER
; // size in megabytes
667 Profiler::rubyWatch(int id
)
670 Address watch_address
= Address(tr
);
672 DPRINTFN("%7s %3s RUBY WATCH %d\n", g_system_ptr
->curCycle(), id
,
675 // don't care about success or failure
676 m_watch_address_set
.insert(watch_address
);
680 Profiler::watchAddress(Address addr
)
682 return m_watch_address_set
.count(addr
) > 0;
686 RubyProfilerParams::create()
688 return new Profiler(this);