2 * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 This file has been modified by Kevin Moore and Dan Nussbaum of the
31 Scalable Systems Research Group at Sun Microsystems Laboratories
32 (http://research.sun.com/scalable/) to support the Adaptive
33 Transactional Memory Test Platform (ATMTP).
35 Please send email to atmtp-interest@sun.com with feedback, questions, or
36 to request future announcements about ATMTP.
38 ----------------------------------------------------------------------
40 File modification date: 2008-02-23
42 ----------------------------------------------------------------------
48 * Description: See Profiler.hh
54 // Allows use of times() library call, which determines virtual runtime
55 #include <sys/resource.h>
56 #include <sys/times.h>
58 #include "mem/ruby/profiler/Profiler.hh"
59 #include "mem/ruby/profiler/AddressProfiler.hh"
60 #include "mem/ruby/system/System.hh"
61 #include "mem/ruby/network/Network.hh"
62 #include "mem/gems_common/PrioHeap.hh"
63 #include "mem/protocol/CacheMsg.hh"
64 #include "mem/protocol/Protocol.hh"
65 #include "mem/gems_common/util.hh"
66 #include "mem/gems_common/Map.hh"
67 #include "mem/ruby/common/Debug.hh"
68 #include "mem/protocol/MachineType.hh"
70 #include "mem/ruby/system/System.hh"
72 extern std::ostream
* debug_cout_ptr
;
74 static double process_memory_total();
75 static double process_memory_resident();
77 Profiler::Profiler(const Params
*p
)
80 m_requestProfileMap_ptr
= new Map
<string
, int>;
82 m_inst_profiler_ptr
= NULL
;
83 m_address_profiler_ptr
= NULL
;
85 m_real_time_start_time
= time(NULL
); // Not reset in clearStats()
86 m_stats_period
= 1000000; // Default
87 m_periodic_output_file_ptr
= &cerr
;
89 m_hot_lines
= p
->hot_lines
;
90 m_all_instructions
= p
->all_instructions
;
92 m_num_of_sequencers
= p
->num_of_sequencers
;
95 m_all_instructions
= false;
97 m_address_profiler_ptr
= new AddressProfiler(m_num_of_sequencers
);
98 m_address_profiler_ptr
-> setHotLines(m_hot_lines
);
99 m_address_profiler_ptr
-> setAllInstructions(m_all_instructions
);
101 if (m_all_instructions
) {
102 m_inst_profiler_ptr
= new AddressProfiler(m_num_of_sequencers
);
103 m_inst_profiler_ptr
-> setHotLines(m_hot_lines
);
104 m_inst_profiler_ptr
-> setAllInstructions(m_all_instructions
);
108 Profiler::~Profiler()
110 if (m_periodic_output_file_ptr
!= &cerr
) {
111 delete m_periodic_output_file_ptr
;
114 delete m_requestProfileMap_ptr
;
117 void Profiler::wakeup()
119 // FIXME - avoid the repeated code
121 Vector
<integer_t
> perProcCycleCount
;
122 perProcCycleCount
.setSize(m_num_of_sequencers
);
124 for(int i
=0; i
< m_num_of_sequencers
; i
++) {
125 perProcCycleCount
[i
] = g_system_ptr
->getCycleCount(i
) - m_cycles_executed_at_start
[i
] + 1;
126 // The +1 allows us to avoid division by zero
129 (*m_periodic_output_file_ptr
) << "ruby_cycles: "
130 << g_eventQueue_ptr
->getTime()-m_ruby_start
133 (*m_periodic_output_file_ptr
) << "mbytes_resident: "
134 << process_memory_resident()
137 (*m_periodic_output_file_ptr
) << "mbytes_total: "
138 << process_memory_total()
141 if (process_memory_total() > 0) {
142 (*m_periodic_output_file_ptr
) << "resident_ratio: "
143 << process_memory_resident()/process_memory_total()
147 (*m_periodic_output_file_ptr
) << "miss_latency: "
148 << m_allMissLatencyHistogram
151 *m_periodic_output_file_ptr
<< endl
;
153 if (m_all_instructions
) {
154 m_inst_profiler_ptr
->printStats(*m_periodic_output_file_ptr
);
157 //g_system_ptr->getNetwork()->printStats(*m_periodic_output_file_ptr);
158 g_eventQueue_ptr
->scheduleEvent(this, m_stats_period
);
161 void Profiler::setPeriodicStatsFile(const string
& filename
)
163 cout
<< "Recording periodic statistics to file '" << filename
<< "' every "
164 << m_stats_period
<< " Ruby cycles" << endl
;
166 if (m_periodic_output_file_ptr
!= &cerr
) {
167 delete m_periodic_output_file_ptr
;
170 m_periodic_output_file_ptr
= new ofstream(filename
.c_str());
171 g_eventQueue_ptr
->scheduleEvent(this, 1);
174 void Profiler::setPeriodicStatsInterval(integer_t period
)
176 cout
<< "Recording periodic statistics every " << m_stats_period
177 << " Ruby cycles" << endl
;
179 m_stats_period
= period
;
180 g_eventQueue_ptr
->scheduleEvent(this, 1);
183 void Profiler::printConfig(ostream
& out
) const
186 out
<< "Profiler Configuration" << endl
;
187 out
<< "----------------------" << endl
;
188 out
<< "periodic_stats_period: " << m_stats_period
<< endl
;
191 void Profiler::print(ostream
& out
) const
196 void Profiler::printStats(ostream
& out
, bool short_stats
)
202 out
<< "Profiler Stats" << endl
;
203 out
<< "--------------" << endl
;
205 time_t real_time_current
= time(NULL
);
206 double seconds
= difftime(real_time_current
, m_real_time_start_time
);
207 double minutes
= seconds
/60.0;
208 double hours
= minutes
/60.0;
209 double days
= hours
/24.0;
210 Time ruby_cycles
= g_eventQueue_ptr
->getTime()-m_ruby_start
;
213 out
<< "Elapsed_time_in_seconds: " << seconds
<< endl
;
214 out
<< "Elapsed_time_in_minutes: " << minutes
<< endl
;
215 out
<< "Elapsed_time_in_hours: " << hours
<< endl
;
216 out
<< "Elapsed_time_in_days: " << days
<< endl
;
220 // print the virtual runtimes as well
223 seconds
= (vtime
.tms_utime
+ vtime
.tms_stime
) / 100.0;
224 minutes
= seconds
/ 60.0;
225 hours
= minutes
/ 60.0;
227 out
<< "Virtual_time_in_seconds: " << seconds
<< endl
;
228 out
<< "Virtual_time_in_minutes: " << minutes
<< endl
;
229 out
<< "Virtual_time_in_hours: " << hours
<< endl
;
230 out
<< "Virtual_time_in_days: " << days
<< endl
;
233 out
<< "Ruby_current_time: " << g_eventQueue_ptr
->getTime() << endl
;
234 out
<< "Ruby_start_time: " << m_ruby_start
<< endl
;
235 out
<< "Ruby_cycles: " << ruby_cycles
<< endl
;
239 out
<< "mbytes_resident: " << process_memory_resident() << endl
;
240 out
<< "mbytes_total: " << process_memory_total() << endl
;
241 if (process_memory_total() > 0) {
242 out
<< "resident_ratio: "
243 << process_memory_resident()/process_memory_total() << endl
;
249 Vector
<integer_t
> perProcCycleCount
;
250 perProcCycleCount
.setSize(m_num_of_sequencers
);
252 for(int i
=0; i
< m_num_of_sequencers
; i
++) {
253 perProcCycleCount
[i
] = g_system_ptr
->getCycleCount(i
) - m_cycles_executed_at_start
[i
] + 1;
254 // The +1 allows us to avoid division by zero
257 out
<< "ruby_cycles_executed: " << perProcCycleCount
<< endl
;
262 out
<< "Busy Controller Counts:" << endl
;
263 for(int i
=0; i
< MachineType_NUM
; i
++) {
264 for(int j
=0; j
< MachineType_base_count((MachineType
)i
); j
++) {
266 machID
.type
= (MachineType
)i
;
268 out
<< machID
<< ":" << m_busyControllerCount
[i
][j
] << " ";
277 out
<< "Busy Bank Count:" << m_busyBankCount
<< endl
;
280 out
<< "sequencer_requests_outstanding: " << m_sequencer_requests
<< endl
;
285 out
<< "All Non-Zero Cycle Demand Cache Accesses" << endl
;
286 out
<< "----------------------------------------" << endl
;
287 out
<< "miss_latency: " << m_allMissLatencyHistogram
<< endl
;
288 for(int i
=0; i
<m_missLatencyHistograms
.size(); i
++) {
289 if (m_missLatencyHistograms
[i
].size() > 0) {
290 out
<< "miss_latency_" << RubyRequestType(i
) << ": " << m_missLatencyHistograms
[i
] << endl
;
293 for(int i
=0; i
<m_machLatencyHistograms
.size(); i
++) {
294 if (m_machLatencyHistograms
[i
].size() > 0) {
295 out
<< "miss_latency_" << GenericMachineType(i
) << ": " << m_machLatencyHistograms
[i
] << endl
;
301 out
<< "All Non-Zero Cycle SW Prefetch Requests" << endl
;
302 out
<< "------------------------------------" << endl
;
303 out
<< "prefetch_latency: " << m_allSWPrefetchLatencyHistogram
<< endl
;
304 for(int i
=0; i
<m_SWPrefetchLatencyHistograms
.size(); i
++) {
305 if (m_SWPrefetchLatencyHistograms
[i
].size() > 0) {
306 out
<< "prefetch_latency_" << CacheRequestType(i
) << ": " << m_SWPrefetchLatencyHistograms
[i
] << endl
;
309 for(int i
=0; i
<m_SWPrefetchMachLatencyHistograms
.size(); i
++) {
310 if (m_SWPrefetchMachLatencyHistograms
[i
].size() > 0) {
311 out
<< "prefetch_latency_" << GenericMachineType(i
) << ": " << m_SWPrefetchMachLatencyHistograms
[i
] << endl
;
314 out
<< "prefetch_latency_L2Miss:" << m_SWPrefetchL2MissLatencyHistogram
<< endl
;
316 if (m_all_sharing_histogram
.size() > 0) {
317 out
<< "all_sharing: " << m_all_sharing_histogram
<< endl
;
318 out
<< "read_sharing: " << m_read_sharing_histogram
<< endl
;
319 out
<< "write_sharing: " << m_write_sharing_histogram
<< endl
;
321 out
<< "all_sharing_percent: "; m_all_sharing_histogram
.printPercent(out
); out
<< endl
;
322 out
<< "read_sharing_percent: "; m_read_sharing_histogram
.printPercent(out
); out
<< endl
;
323 out
<< "write_sharing_percent: "; m_write_sharing_histogram
.printPercent(out
); out
<< endl
;
325 int64 total_miss
= m_cache_to_cache
+ m_memory_to_cache
;
326 out
<< "all_misses: " << total_miss
<< endl
;
327 out
<< "cache_to_cache_misses: " << m_cache_to_cache
<< endl
;
328 out
<< "memory_to_cache_misses: " << m_memory_to_cache
<< endl
;
329 out
<< "cache_to_cache_percent: " << 100.0 * (double(m_cache_to_cache
) / double(total_miss
)) << endl
;
330 out
<< "memory_to_cache_percent: " << 100.0 * (double(m_memory_to_cache
) / double(total_miss
)) << endl
;
334 if (m_outstanding_requests
.size() > 0) {
335 out
<< "outstanding_requests: "; m_outstanding_requests
.printPercent(out
); out
<< endl
;
341 out
<< "Request vs. RubySystem State Profile" << endl
;
342 out
<< "--------------------------------" << endl
;
345 Vector
<string
> requestProfileKeys
= m_requestProfileMap_ptr
->keys();
346 requestProfileKeys
.sortVector();
348 for(int i
=0; i
<requestProfileKeys
.size(); i
++) {
349 int temp_int
= m_requestProfileMap_ptr
->lookup(requestProfileKeys
[i
]);
350 double percent
= (100.0*double(temp_int
))/double(m_requests
);
351 while (requestProfileKeys
[i
] != "") {
352 out
<< setw(10) << string_split(requestProfileKeys
[i
], ':');
354 out
<< setw(11) << temp_int
;
355 out
<< setw(14) << percent
<< endl
;
359 out
<< "filter_action: " << m_filter_action_histogram
<< endl
;
361 if (!m_all_instructions
) {
362 m_address_profiler_ptr
->printStats(out
);
365 if (m_all_instructions
) {
366 m_inst_profiler_ptr
->printStats(out
);
370 out
<< "Message Delayed Cycles" << endl
;
371 out
<< "----------------------" << endl
;
372 out
<< "Total_delay_cycles: " << m_delayedCyclesHistogram
<< endl
;
373 out
<< "Total_nonPF_delay_cycles: " << m_delayedCyclesNonPFHistogram
<< endl
;
374 for (int i
= 0; i
< m_delayedCyclesVCHistograms
.size(); i
++) {
375 out
<< " virtual_network_" << i
<< "_delay_cycles: " << m_delayedCyclesVCHistograms
[i
] << endl
;
378 printResourceUsage(out
);
383 void Profiler::printResourceUsage(ostream
& out
) const
386 out
<< "Resource Usage" << endl
;
387 out
<< "--------------" << endl
;
389 integer_t pagesize
= getpagesize(); // page size in bytes
390 out
<< "page_size: " << pagesize
<< endl
;
393 getrusage (RUSAGE_SELF
, &usage
);
395 out
<< "user_time: " << usage
.ru_utime
.tv_sec
<< endl
;
396 out
<< "system_time: " << usage
.ru_stime
.tv_sec
<< endl
;
397 out
<< "page_reclaims: " << usage
.ru_minflt
<< endl
;
398 out
<< "page_faults: " << usage
.ru_majflt
<< endl
;
399 out
<< "swaps: " << usage
.ru_nswap
<< endl
;
400 out
<< "block_inputs: " << usage
.ru_inblock
<< endl
;
401 out
<< "block_outputs: " << usage
.ru_oublock
<< endl
;
404 void Profiler::clearStats()
406 m_ruby_start
= g_eventQueue_ptr
->getTime();
408 m_cycles_executed_at_start
.setSize(m_num_of_sequencers
);
409 for (int i
=0; i
< m_num_of_sequencers
; i
++) {
410 if (g_system_ptr
== NULL
) {
411 m_cycles_executed_at_start
[i
] = 0;
413 m_cycles_executed_at_start
[i
] = g_system_ptr
->getCycleCount(i
);
417 m_busyControllerCount
.setSize(MachineType_NUM
); // all machines
418 for(int i
=0; i
< MachineType_NUM
; i
++) {
419 m_busyControllerCount
[i
].setSize(MachineType_base_count((MachineType
)i
));
420 for(int j
=0; j
< MachineType_base_count((MachineType
)i
); j
++) {
421 m_busyControllerCount
[i
][j
] = 0;
426 m_delayedCyclesHistogram
.clear();
427 m_delayedCyclesNonPFHistogram
.clear();
428 m_delayedCyclesVCHistograms
.setSize(RubySystem::getNetwork()->getNumberOfVirtualNetworks());
429 for (int i
= 0; i
< RubySystem::getNetwork()->getNumberOfVirtualNetworks(); i
++) {
430 m_delayedCyclesVCHistograms
[i
].clear();
433 m_missLatencyHistograms
.setSize(RubyRequestType_NUM
);
434 for(int i
=0; i
<m_missLatencyHistograms
.size(); i
++) {
435 m_missLatencyHistograms
[i
].clear(200);
437 m_machLatencyHistograms
.setSize(GenericMachineType_NUM
+1);
438 for(int i
=0; i
<m_machLatencyHistograms
.size(); i
++) {
439 m_machLatencyHistograms
[i
].clear(200);
441 m_allMissLatencyHistogram
.clear(200);
443 m_SWPrefetchLatencyHistograms
.setSize(CacheRequestType_NUM
);
444 for(int i
=0; i
<m_SWPrefetchLatencyHistograms
.size(); i
++) {
445 m_SWPrefetchLatencyHistograms
[i
].clear(200);
447 m_SWPrefetchMachLatencyHistograms
.setSize(GenericMachineType_NUM
+1);
448 for(int i
=0; i
<m_SWPrefetchMachLatencyHistograms
.size(); i
++) {
449 m_SWPrefetchMachLatencyHistograms
[i
].clear(200);
451 m_allSWPrefetchLatencyHistogram
.clear(200);
453 m_sequencer_requests
.clear();
454 m_read_sharing_histogram
.clear();
455 m_write_sharing_histogram
.clear();
456 m_all_sharing_histogram
.clear();
457 m_cache_to_cache
= 0;
458 m_memory_to_cache
= 0;
461 m_requestProfileMap_ptr
->clear();
463 // count requests profiled
466 m_outstanding_requests
.clear();
467 m_outstanding_persistent_requests
.clear();
469 // Flush the prefetches through the system - used so that there are no outstanding requests after stats are cleared
470 //g_eventQueue_ptr->triggerAllEvents();
472 // update the start time
473 m_ruby_start
= g_eventQueue_ptr
->getTime();
476 void Profiler::addAddressTraceSample(const CacheMsg
& msg
, NodeID id
)
478 if (msg
.getType() != CacheRequestType_IFETCH
) {
480 // Note: The following line should be commented out if you want to
481 // use the special profiling that is part of the GS320 protocol
483 // NOTE: Unless PROFILE_HOT_LINES is enabled, nothing will be profiled by the AddressProfiler
484 m_address_profiler_ptr
->addTraceSample(msg
.getLineAddress(), msg
.getProgramCounter(), msg
.getType(), msg
.getAccessMode(), id
, false);
488 void Profiler::profileSharing(const Address
& addr
, AccessType type
, NodeID requestor
, const Set
& sharers
, const Set
& owner
)
490 Set
set_contacted(owner
);
491 if (type
== AccessType_Write
) {
492 set_contacted
.addSet(sharers
);
494 set_contacted
.remove(requestor
);
495 int number_contacted
= set_contacted
.count();
497 if (type
== AccessType_Write
) {
498 m_write_sharing_histogram
.add(number_contacted
);
500 m_read_sharing_histogram
.add(number_contacted
);
502 m_all_sharing_histogram
.add(number_contacted
);
504 if (number_contacted
== 0) {
512 void Profiler::profileMsgDelay(int virtualNetwork
, int delayCycles
) {
513 assert(virtualNetwork
< m_delayedCyclesVCHistograms
.size());
514 m_delayedCyclesHistogram
.add(delayCycles
);
515 m_delayedCyclesVCHistograms
[virtualNetwork
].add(delayCycles
);
516 if (virtualNetwork
!= 0) {
517 m_delayedCyclesNonPFHistogram
.add(delayCycles
);
521 // profiles original cache requests including PUTs
522 void Profiler::profileRequest(const string
& requestStr
)
526 if (m_requestProfileMap_ptr
->exist(requestStr
)) {
527 (m_requestProfileMap_ptr
->lookup(requestStr
))++;
529 m_requestProfileMap_ptr
->add(requestStr
, 1);
533 void Profiler::controllerBusy(MachineID machID
)
535 m_busyControllerCount
[(int)machID
.type
][(int)machID
.num
]++;
538 void Profiler::profilePFWait(Time waitTime
)
540 m_prefetchWaitHistogram
.add(waitTime
);
543 void Profiler::bankBusy()
548 // non-zero cycle demand request
549 void Profiler::missLatency(Time t
, RubyRequestType type
)
551 m_allMissLatencyHistogram
.add(t
);
552 m_missLatencyHistograms
[type
].add(t
);
555 // non-zero cycle prefetch request
556 void Profiler::swPrefetchLatency(Time t
, CacheRequestType type
, GenericMachineType respondingMach
)
558 m_allSWPrefetchLatencyHistogram
.add(t
);
559 m_SWPrefetchLatencyHistograms
[type
].add(t
);
560 m_SWPrefetchMachLatencyHistograms
[respondingMach
].add(t
);
561 if(respondingMach
== GenericMachineType_Directory
|| respondingMach
== GenericMachineType_NUM
) {
562 m_SWPrefetchL2MissLatencyHistogram
.add(t
);
566 void Profiler::profileTransition(const string
& component
, NodeID version
, Address addr
,
567 const string
& state
, const string
& event
,
568 const string
& next_state
, const string
& note
)
570 const int EVENT_SPACES
= 20;
571 const int ID_SPACES
= 3;
572 const int TIME_SPACES
= 7;
573 const int COMP_SPACES
= 10;
574 const int STATE_SPACES
= 6;
576 if ((g_debug_ptr
->getDebugTime() > 0) &&
577 (g_eventQueue_ptr
->getTime() >= g_debug_ptr
->getDebugTime())) {
578 (* debug_cout_ptr
).flags(ios::right
);
579 (* debug_cout_ptr
) << setw(TIME_SPACES
) << g_eventQueue_ptr
->getTime() << " ";
580 (* debug_cout_ptr
) << setw(ID_SPACES
) << version
<< " ";
581 (* debug_cout_ptr
) << setw(COMP_SPACES
) << component
;
582 (* debug_cout_ptr
) << setw(EVENT_SPACES
) << event
<< " ";
584 (* debug_cout_ptr
).flags(ios::right
);
585 (* debug_cout_ptr
) << setw(STATE_SPACES
) << state
;
586 (* debug_cout_ptr
) << ">";
587 (* debug_cout_ptr
).flags(ios::left
);
588 (* debug_cout_ptr
) << setw(STATE_SPACES
) << next_state
;
590 (* debug_cout_ptr
) << " " << addr
<< " " << note
;
592 (* debug_cout_ptr
) << endl
;
597 static double process_memory_total()
599 const double MULTIPLIER
= 4096.0/(1024.0*1024.0); // 4kB page size, 1024*1024 bytes per MB,
601 proc_file
.open("/proc/self/statm");
602 int total_size_in_pages
= 0;
603 int res_size_in_pages
= 0;
604 proc_file
>> total_size_in_pages
;
605 proc_file
>> res_size_in_pages
;
606 return double(total_size_in_pages
)*MULTIPLIER
; // size in megabytes
609 static double process_memory_resident()
611 const double MULTIPLIER
= 4096.0/(1024.0*1024.0); // 4kB page size, 1024*1024 bytes per MB,
613 proc_file
.open("/proc/self/statm");
614 int total_size_in_pages
= 0;
615 int res_size_in_pages
= 0;
616 proc_file
>> total_size_in_pages
;
617 proc_file
>> res_size_in_pages
;
618 return double(res_size_in_pages
)*MULTIPLIER
; // size in megabytes
621 void Profiler::rubyWatch(int id
){
623 Address watch_address
= Address(tr
);
624 const int ID_SPACES
= 3;
625 const int TIME_SPACES
= 7;
627 (* debug_cout_ptr
).flags(ios::right
);
628 (* debug_cout_ptr
) << setw(TIME_SPACES
) << g_eventQueue_ptr
->getTime() << " ";
629 (* debug_cout_ptr
) << setw(ID_SPACES
) << id
<< " "
634 if(!m_watch_address_list_ptr
->exist(watch_address
)){
635 m_watch_address_list_ptr
->add(watch_address
, 1);
639 bool Profiler::watchAddress(Address addr
){
640 if (m_watch_address_list_ptr
->exist(addr
))
647 RubyProfilerParams::create()
649 return new Profiler(this);