2004-02-12 Michael Snyder <msnyder@redhat.com>
[binutils-gdb.git] / sim / frv / profile.c
1 /* frv simulator machine independent profiling code.
2
3 Copyright (C) 1998, 1999, 2000, 2001, 2003 Free Software Foundation, Inc.
4 Contributed by Red Hat
5
6 This file is part of the GNU simulators.
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 2, or (at your option)
11 any later version.
12
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License along
19 with this program; if not, write to the Free Software Foundation, Inc.,
20 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
21
22 */
23 #define WANT_CPU
24 #define WANT_CPU_FRVBF
25
26 #include "sim-main.h"
27 #include "bfd.h"
28
29 #if WITH_PROFILE_MODEL_P
30
31 #include "profile.h"
32 #include "profile-fr400.h"
33 #include "profile-fr500.h"
34 #include "profile-fr550.h"
35
36 static void
37 reset_gr_flags (SIM_CPU *cpu, INT gr)
38 {
39 SIM_DESC sd = CPU_STATE (cpu);
40 if (STATE_ARCHITECTURE (sd)->mach == bfd_mach_fr400)
41 fr400_reset_gr_flags (cpu, gr);
42 /* Other machines have no gr flags right now. */
43 }
44
45 static void
46 reset_fr_flags (SIM_CPU *cpu, INT fr)
47 {
48 SIM_DESC sd = CPU_STATE (cpu);
49 if (STATE_ARCHITECTURE (sd)->mach == bfd_mach_fr400)
50 fr400_reset_fr_flags (cpu, fr);
51 else if (STATE_ARCHITECTURE (sd)->mach == bfd_mach_fr500)
52 fr500_reset_fr_flags (cpu, fr);
53 }
54
55 static void
56 reset_acc_flags (SIM_CPU *cpu, INT acc)
57 {
58 SIM_DESC sd = CPU_STATE (cpu);
59 if (STATE_ARCHITECTURE (sd)->mach == bfd_mach_fr400)
60 fr400_reset_acc_flags (cpu, acc);
61 /* Other machines have no acc flags right now. */
62 }
63
64 static void
65 reset_cc_flags (SIM_CPU *cpu, INT cc)
66 {
67 SIM_DESC sd = CPU_STATE (cpu);
68 if (STATE_ARCHITECTURE (sd)->mach == bfd_mach_fr500)
69 fr500_reset_cc_flags (cpu, cc);
70 /* Other machines have no cc flags. */
71 }
72
73 void
74 set_use_is_gr_complex (SIM_CPU *cpu, INT gr)
75 {
76 if (gr != -1)
77 {
78 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
79 reset_gr_flags (cpu, gr);
80 ps->cur_gr_complex |= (((DI)1) << gr);
81 }
82 }
83
84 void
85 set_use_not_gr_complex (SIM_CPU *cpu, INT gr)
86 {
87 if (gr != -1)
88 {
89 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
90 ps->cur_gr_complex &= ~(((DI)1) << gr);
91 }
92 }
93
94 int
95 use_is_gr_complex (SIM_CPU *cpu, INT gr)
96 {
97 if (gr != -1)
98 {
99 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
100 return ps->cur_gr_complex & (((DI)1) << gr);
101 }
102 return 0;
103 }
104
105 /* Globals flag indicates whether this insn is being modeled. */
106 enum FRV_INSN_MODELING model_insn = FRV_INSN_NO_MODELING;
107
108 /* static buffer for the name of the currently most restrictive hazard. */
109 static char hazard_name[100] = "";
110
111 /* Print information about the wait applied to an entire VLIW insn. */
112 FRV_INSN_FETCH_BUFFER frv_insn_fetch_buffer[]
113 = {
114 {1, NO_REQNO}, {1, NO_REQNO} /* init with impossible address. */
115 };
116
117 enum cache_request
118 {
119 cache_load,
120 cache_invalidate,
121 cache_flush,
122 cache_preload,
123 cache_unlock
124 };
125
126 /* A queue of load requests from the data cache. Use to keep track of loads
127 which are still pending. */
128 /* TODO -- some of these are mutually exclusive and can use a union. */
129 typedef struct
130 {
131 FRV_CACHE *cache;
132 unsigned reqno;
133 SI address;
134 int length;
135 int is_signed;
136 int regnum;
137 int cycles;
138 int regtype;
139 int lock;
140 int all;
141 int slot;
142 int active;
143 enum cache_request request;
144 } CACHE_QUEUE_ELEMENT;
145
146 #define CACHE_QUEUE_SIZE 64 /* TODO -- make queue dynamic */
147 struct
148 {
149 unsigned reqno;
150 int ix;
151 CACHE_QUEUE_ELEMENT q[CACHE_QUEUE_SIZE];
152 } cache_queue = {0, 0};
153
154 /* Queue a request for a load from the cache. The load will be queued as
155 'inactive' and will be requested after the given number
156 of cycles have passed from the point the load is activated. */
157 void
158 request_cache_load (SIM_CPU *cpu, INT regnum, int regtype, int cycles)
159 {
160 CACHE_QUEUE_ELEMENT *q;
161 FRV_VLIW *vliw;
162 int slot;
163
164 /* For a conditional load which was not executed, CPU_LOAD_LENGTH will be
165 zero. */
166 if (CPU_LOAD_LENGTH (cpu) == 0)
167 return;
168
169 if (cache_queue.ix >= CACHE_QUEUE_SIZE)
170 abort (); /* TODO: Make the queue dynamic */
171
172 q = & cache_queue.q[cache_queue.ix];
173 ++cache_queue.ix;
174
175 q->reqno = cache_queue.reqno++;
176 q->request = cache_load;
177 q->cache = CPU_DATA_CACHE (cpu);
178 q->address = CPU_LOAD_ADDRESS (cpu);
179 q->length = CPU_LOAD_LENGTH (cpu);
180 q->is_signed = CPU_LOAD_SIGNED (cpu);
181 q->regnum = regnum;
182 q->regtype = regtype;
183 q->cycles = cycles;
184 q->active = 0;
185
186 vliw = CPU_VLIW (cpu);
187 slot = vliw->next_slot - 1;
188 q->slot = (*vliw->current_vliw)[slot];
189
190 CPU_LOAD_LENGTH (cpu) = 0;
191 }
192
193 /* Queue a request to flush the cache. The request will be queued as
194 'inactive' and will be requested after the given number
195 of cycles have passed from the point the request is activated. */
196 void
197 request_cache_flush (SIM_CPU *cpu, FRV_CACHE *cache, int cycles)
198 {
199 CACHE_QUEUE_ELEMENT *q;
200 FRV_VLIW *vliw;
201 int slot;
202
203 if (cache_queue.ix >= CACHE_QUEUE_SIZE)
204 abort (); /* TODO: Make the queue dynamic */
205
206 q = & cache_queue.q[cache_queue.ix];
207 ++cache_queue.ix;
208
209 q->reqno = cache_queue.reqno++;
210 q->request = cache_flush;
211 q->cache = cache;
212 q->address = CPU_LOAD_ADDRESS (cpu);
213 q->all = CPU_PROFILE_STATE (cpu)->all_cache_entries;
214 q->cycles = cycles;
215 q->active = 0;
216
217 vliw = CPU_VLIW (cpu);
218 slot = vliw->next_slot - 1;
219 q->slot = (*vliw->current_vliw)[slot];
220 }
221
222 /* Queue a request to invalidate the cache. The request will be queued as
223 'inactive' and will be requested after the given number
224 of cycles have passed from the point the request is activated. */
225 void
226 request_cache_invalidate (SIM_CPU *cpu, FRV_CACHE *cache, int cycles)
227 {
228 CACHE_QUEUE_ELEMENT *q;
229 FRV_VLIW *vliw;
230 int slot;
231
232 if (cache_queue.ix >= CACHE_QUEUE_SIZE)
233 abort (); /* TODO: Make the queue dynamic */
234
235 q = & cache_queue.q[cache_queue.ix];
236 ++cache_queue.ix;
237
238 q->reqno = cache_queue.reqno++;
239 q->request = cache_invalidate;
240 q->cache = cache;
241 q->address = CPU_LOAD_ADDRESS (cpu);
242 q->all = CPU_PROFILE_STATE (cpu)->all_cache_entries;
243 q->cycles = cycles;
244 q->active = 0;
245
246 vliw = CPU_VLIW (cpu);
247 slot = vliw->next_slot - 1;
248 q->slot = (*vliw->current_vliw)[slot];
249 }
250
251 /* Queue a request to preload the cache. The request will be queued as
252 'inactive' and will be requested after the given number
253 of cycles have passed from the point the request is activated. */
254 void
255 request_cache_preload (SIM_CPU *cpu, FRV_CACHE *cache, int cycles)
256 {
257 CACHE_QUEUE_ELEMENT *q;
258 FRV_VLIW *vliw;
259 int slot;
260
261 if (cache_queue.ix >= CACHE_QUEUE_SIZE)
262 abort (); /* TODO: Make the queue dynamic */
263
264 q = & cache_queue.q[cache_queue.ix];
265 ++cache_queue.ix;
266
267 q->reqno = cache_queue.reqno++;
268 q->request = cache_preload;
269 q->cache = cache;
270 q->address = CPU_LOAD_ADDRESS (cpu);
271 q->length = CPU_LOAD_LENGTH (cpu);
272 q->lock = CPU_LOAD_LOCK (cpu);
273 q->cycles = cycles;
274 q->active = 0;
275
276 vliw = CPU_VLIW (cpu);
277 slot = vliw->next_slot - 1;
278 q->slot = (*vliw->current_vliw)[slot];
279
280 CPU_LOAD_LENGTH (cpu) = 0;
281 }
282
283 /* Queue a request to unlock the cache. The request will be queued as
284 'inactive' and will be requested after the given number
285 of cycles have passed from the point the request is activated. */
286 void
287 request_cache_unlock (SIM_CPU *cpu, FRV_CACHE *cache, int cycles)
288 {
289 CACHE_QUEUE_ELEMENT *q;
290 FRV_VLIW *vliw;
291 int slot;
292
293 if (cache_queue.ix >= CACHE_QUEUE_SIZE)
294 abort (); /* TODO: Make the queue dynamic */
295
296 q = & cache_queue.q[cache_queue.ix];
297 ++cache_queue.ix;
298
299 q->reqno = cache_queue.reqno++;
300 q->request = cache_unlock;
301 q->cache = cache;
302 q->address = CPU_LOAD_ADDRESS (cpu);
303 q->cycles = cycles;
304 q->active = 0;
305
306 vliw = CPU_VLIW (cpu);
307 slot = vliw->next_slot - 1;
308 q->slot = (*vliw->current_vliw)[slot];
309 }
310
311 static void
312 submit_cache_request (CACHE_QUEUE_ELEMENT *q)
313 {
314 switch (q->request)
315 {
316 case cache_load:
317 frv_cache_request_load (q->cache, q->reqno, q->address, q->slot);
318 break;
319 case cache_flush:
320 frv_cache_request_invalidate (q->cache, q->reqno, q->address, q->slot,
321 q->all, 1/*flush*/);
322 break;
323 case cache_invalidate:
324 frv_cache_request_invalidate (q->cache, q->reqno, q->address, q->slot,
325 q->all, 0/*flush*/);
326 break;
327 case cache_preload:
328 frv_cache_request_preload (q->cache, q->address, q->slot,
329 q->length, q->lock);
330 break;
331 case cache_unlock:
332 frv_cache_request_unlock (q->cache, q->address, q->slot);
333 break;
334 default:
335 abort ();
336 }
337 }
338
339 /* Activate all inactive load requests. */
340 static void
341 activate_cache_requests (SIM_CPU *cpu)
342 {
343 int i;
344 for (i = 0; i < cache_queue.ix; ++i)
345 {
346 CACHE_QUEUE_ELEMENT *q = & cache_queue.q[i];
347 if (! q->active)
348 {
349 q->active = 1;
350 /* Submit the request now if the cycle count is zero. */
351 if (q->cycles == 0)
352 submit_cache_request (q);
353 }
354 }
355 }
356
357 /* Check to see if a load is pending which affects the given register(s).
358 */
359 int
360 load_pending_for_register (SIM_CPU *cpu, int regnum, int words, int regtype)
361 {
362 int i;
363 for (i = 0; i < cache_queue.ix; ++i)
364 {
365 CACHE_QUEUE_ELEMENT *q = & cache_queue.q[i];
366
367 /* Must be the same kind of register. */
368 if (! q->active || q->request != cache_load || q->regtype != regtype)
369 continue;
370
371 /* If the registers numbers are equal, then we have a match. */
372 if (q->regnum == regnum)
373 return 1; /* load pending */
374
375 /* Check for overlap of a load with a multi-word register. */
376 if (regnum < q->regnum)
377 {
378 if (regnum + words > q->regnum)
379 return 1;
380 }
381 /* Check for overlap of a multi-word load with the register. */
382 else
383 {
384 int data_words = (q->length + sizeof (SI) - 1) / sizeof (SI);
385 if (q->regnum + data_words > regnum)
386 return 1;
387 }
388 }
389
390 return 0; /* no load pending */
391 }
392
393 /* Check to see if a cache flush pending which affects the given address. */
394 static int
395 flush_pending_for_address (SIM_CPU *cpu, SI address)
396 {
397 int line_mask = ~(CPU_DATA_CACHE (cpu)->line_size - 1);
398 int i;
399 for (i = 0; i < cache_queue.ix; ++i)
400 {
401 CACHE_QUEUE_ELEMENT *q = & cache_queue.q[i];
402
403 /* Must be the same kind of request and active. */
404 if (! q->active || q->request != cache_flush)
405 continue;
406
407 /* If the addresses are equal, then we have a match. */
408 if ((q->address & line_mask) == (address & line_mask))
409 return 1; /* flush pending */
410 }
411
412 return 0; /* no flush pending */
413 }
414
415 static void
416 remove_cache_queue_element (SIM_CPU *cpu, int i)
417 {
418 /* If we are removing the load of a FR register, then remember which one(s).
419 */
420 CACHE_QUEUE_ELEMENT q = cache_queue.q[i];
421
422 for (--cache_queue.ix; i < cache_queue.ix; ++i)
423 cache_queue.q[i] = cache_queue.q[i + 1];
424
425 /* If we removed a load of a FR register, check to see if any other loads
426 of that register is still queued. If not, then apply the queued post
427 processing time of that register to its latency. Also apply
428 1 extra cycle of latency to the register since it was a floating point
429 load. */
430 if (q.request == cache_load && q.regtype != REGTYPE_NONE)
431 {
432 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
433 int data_words = (q.length + sizeof (SI) - 1) / sizeof (SI);
434 int j;
435 for (j = 0; j < data_words; ++j)
436 {
437 int regnum = q.regnum + j;
438 if (! load_pending_for_register (cpu, regnum, 1, q.regtype))
439 {
440 if (q.regtype == REGTYPE_FR)
441 {
442 int *fr = ps->fr_busy;
443 fr[regnum] += 1 + ps->fr_ptime[regnum];
444 ps->fr_ptime[regnum] = 0;
445 }
446 }
447 }
448 }
449 }
450
451 /* Copy data from the cache buffer to the target register(s). */
452 static void
453 copy_load_data (SIM_CPU *current_cpu, FRV_CACHE *cache, int slot,
454 CACHE_QUEUE_ELEMENT *q)
455 {
456 switch (q->length)
457 {
458 case 1:
459 if (q->regtype == REGTYPE_FR)
460 {
461 if (q->is_signed)
462 {
463 QI value = CACHE_RETURN_DATA (cache, slot, q->address, QI, 1);
464 SET_H_FR (q->regnum, value);
465 }
466 else
467 {
468 UQI value = CACHE_RETURN_DATA (cache, slot, q->address, UQI, 1);
469 SET_H_FR (q->regnum, value);
470 }
471 }
472 else
473 {
474 if (q->is_signed)
475 {
476 QI value = CACHE_RETURN_DATA (cache, slot, q->address, QI, 1);
477 SET_H_GR (q->regnum, value);
478 }
479 else
480 {
481 UQI value = CACHE_RETURN_DATA (cache, slot, q->address, UQI, 1);
482 SET_H_GR (q->regnum, value);
483 }
484 }
485 break;
486 case 2:
487 if (q->regtype == REGTYPE_FR)
488 {
489 if (q->is_signed)
490 {
491 HI value = CACHE_RETURN_DATA (cache, slot, q->address, HI, 2);
492 SET_H_FR (q->regnum, value);
493 }
494 else
495 {
496 UHI value = CACHE_RETURN_DATA (cache, slot, q->address, UHI, 2);
497 SET_H_FR (q->regnum, value);
498 }
499 }
500 else
501 {
502 if (q->is_signed)
503 {
504 HI value = CACHE_RETURN_DATA (cache, slot, q->address, HI, 2);
505 SET_H_GR (q->regnum, value);
506 }
507 else
508 {
509 UHI value = CACHE_RETURN_DATA (cache, slot, q->address, UHI, 2);
510 SET_H_GR (q->regnum, value);
511 }
512 }
513 break;
514 case 4:
515 if (q->regtype == REGTYPE_FR)
516 {
517 SET_H_FR (q->regnum,
518 CACHE_RETURN_DATA (cache, slot, q->address, SF, 4));
519 }
520 else
521 {
522 SET_H_GR (q->regnum,
523 CACHE_RETURN_DATA (cache, slot, q->address, SI, 4));
524 }
525 break;
526 case 8:
527 if (q->regtype == REGTYPE_FR)
528 {
529 SET_H_FR_DOUBLE (q->regnum,
530 CACHE_RETURN_DATA (cache, slot, q->address, DF, 8));
531 }
532 else
533 {
534 SET_H_GR_DOUBLE (q->regnum,
535 CACHE_RETURN_DATA (cache, slot, q->address, DI, 8));
536 }
537 break;
538 case 16:
539 if (q->regtype == REGTYPE_FR)
540 frvbf_h_fr_quad_set_handler (current_cpu, q->regnum,
541 CACHE_RETURN_DATA_ADDRESS (cache, slot,
542 q->address,
543 16));
544 else
545 frvbf_h_gr_quad_set_handler (current_cpu, q->regnum,
546 CACHE_RETURN_DATA_ADDRESS (cache, slot,
547 q->address,
548 16));
549 break;
550 default:
551 abort ();
552 }
553 }
554
555 static int
556 request_complete (SIM_CPU *cpu, CACHE_QUEUE_ELEMENT *q)
557 {
558 FRV_CACHE* cache;
559 if (! q->active || q->cycles > 0)
560 return 0;
561
562 cache = CPU_DATA_CACHE (cpu);
563 switch (q->request)
564 {
565 case cache_load:
566 /* For loads, we must wait until the data is returned from the cache. */
567 if (frv_cache_data_in_buffer (cache, 0, q->address, q->reqno))
568 {
569 copy_load_data (cpu, cache, 0, q);
570 return 1;
571 }
572 if (frv_cache_data_in_buffer (cache, 1, q->address, q->reqno))
573 {
574 copy_load_data (cpu, cache, 1, q);
575 return 1;
576 }
577 break;
578
579 case cache_flush:
580 /* We must wait until the data is flushed. */
581 if (frv_cache_data_flushed (cache, 0, q->address, q->reqno))
582 return 1;
583 if (frv_cache_data_flushed (cache, 1, q->address, q->reqno))
584 return 1;
585 break;
586
587 default:
588 /* All other requests are complete once they've been made. */
589 return 1;
590 }
591
592 return 0;
593 }
594
595 /* Run the insn and data caches through the given number of cycles, taking
596 note of load requests which are fullfilled as a result. */
597 static void
598 run_caches (SIM_CPU *cpu, int cycles)
599 {
600 FRV_CACHE* data_cache = CPU_DATA_CACHE (cpu);
601 FRV_CACHE* insn_cache = CPU_INSN_CACHE (cpu);
602 int i;
603 /* For each cycle, run the caches, noting which requests have been fullfilled
604 and submitting new requests on their designated cycles. */
605 for (i = 0; i < cycles; ++i)
606 {
607 int j;
608 /* Run the caches through 1 cycle. */
609 frv_cache_run (data_cache, 1);
610 frv_cache_run (insn_cache, 1);
611
612 /* Note whether prefetched insn data has been loaded yet. */
613 for (j = LS; j < FRV_CACHE_PIPELINES; ++j)
614 {
615 if (frv_insn_fetch_buffer[j].reqno != NO_REQNO
616 && frv_cache_data_in_buffer (insn_cache, j,
617 frv_insn_fetch_buffer[j].address,
618 frv_insn_fetch_buffer[j].reqno))
619 frv_insn_fetch_buffer[j].reqno = NO_REQNO;
620 }
621
622 /* Check to see which requests have been satisfied and which should
623 be submitted now. */
624 for (j = 0; j < cache_queue.ix; ++j)
625 {
626 CACHE_QUEUE_ELEMENT *q = & cache_queue.q[j];
627 if (! q->active)
628 continue;
629
630 /* If a load has been satisfied, complete the operation and remove it
631 from the queue. */
632 if (request_complete (cpu, q))
633 {
634 remove_cache_queue_element (cpu, j);
635 --j;
636 continue;
637 }
638
639 /* Decrease the cycle count of each queued request.
640 Submit a request for each queued request whose cycle count has
641 become zero. */
642 --q->cycles;
643 if (q->cycles == 0)
644 submit_cache_request (q);
645 }
646 }
647 }
648
649 static void
650 apply_latency_adjustments (SIM_CPU *cpu)
651 {
652 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
653 int i;
654 /* update the latencies of the registers. */
655 int *fr = ps->fr_busy;
656 int *acc = ps->acc_busy;
657 for (i = 0; i < 64; ++i)
658 {
659 if (ps->fr_busy_adjust[i] > 0)
660 *fr -= ps->fr_busy_adjust[i]; /* OK if it goes negative. */
661 if (ps->acc_busy_adjust[i] > 0)
662 *acc -= ps->acc_busy_adjust[i]; /* OK if it goes negative. */
663 ++fr;
664 ++acc;
665 }
666 }
667
668 /* Account for the number of cycles which have just passed in the latency of
669 various system elements. Works for negative cycles too so that latency
670 can be extended in the case of insn fetch latency.
671 If negative or zero, then no adjustment is necessary. */
672 static void
673 update_latencies (SIM_CPU *cpu, int cycles)
674 {
675 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
676 int i;
677 /* update the latencies of the registers. */
678 int *fdiv;
679 int *fsqrt;
680 int *idiv;
681 int *flt;
682 int *media;
683 int *ccr;
684 int *gr = ps->gr_busy;
685 int *fr = ps->fr_busy;
686 int *acc = ps->acc_busy;
687 int *spr;
688 /* This loop handles GR, FR and ACC registers. */
689 for (i = 0; i < 64; ++i)
690 {
691 if (*gr <= cycles)
692 {
693 *gr = 0;
694 reset_gr_flags (cpu, i);
695 }
696 else
697 *gr -= cycles;
698 /* If the busy drops to 0, then mark the register as
699 "not in use". */
700 if (*fr <= cycles)
701 {
702 int *fr_lat = ps->fr_latency + i;
703 *fr = 0;
704 ps->fr_busy_adjust[i] = 0;
705 /* Only clear flags if this register has no target latency. */
706 if (*fr_lat == 0)
707 reset_fr_flags (cpu, i);
708 }
709 else
710 *fr -= cycles;
711 /* If the busy drops to 0, then mark the register as
712 "not in use". */
713 if (*acc <= cycles)
714 {
715 int *acc_lat = ps->acc_latency + i;
716 *acc = 0;
717 ps->acc_busy_adjust[i] = 0;
718 /* Only clear flags if this register has no target latency. */
719 if (*acc_lat == 0)
720 reset_acc_flags (cpu, i);
721 }
722 else
723 *acc -= cycles;
724 ++gr;
725 ++fr;
726 ++acc;
727 }
728 /* This loop handles CCR registers. */
729 ccr = ps->ccr_busy;
730 for (i = 0; i < 8; ++i)
731 {
732 if (*ccr <= cycles)
733 {
734 *ccr = 0;
735 reset_cc_flags (cpu, i);
736 }
737 else
738 *ccr -= cycles;
739 ++ccr;
740 }
741 /* This loop handles SPR registers. */
742 spr = ps->spr_busy;
743 for (i = 0; i < 4096; ++i)
744 {
745 if (*spr <= cycles)
746 *spr = 0;
747 else
748 *spr -= cycles;
749 ++spr;
750 }
751 /* This loop handles resources. */
752 idiv = ps->idiv_busy;
753 fdiv = ps->fdiv_busy;
754 fsqrt = ps->fsqrt_busy;
755 for (i = 0; i < 2; ++i)
756 {
757 *idiv = (*idiv <= cycles) ? 0 : (*idiv - cycles);
758 *fdiv = (*fdiv <= cycles) ? 0 : (*fdiv - cycles);
759 *fsqrt = (*fsqrt <= cycles) ? 0 : (*fsqrt - cycles);
760 ++idiv;
761 ++fdiv;
762 ++fsqrt;
763 }
764 /* Float and media units can occur in 4 slots on some machines. */
765 flt = ps->float_busy;
766 media = ps->media_busy;
767 for (i = 0; i < 4; ++i)
768 {
769 *flt = (*flt <= cycles) ? 0 : (*flt - cycles);
770 *media = (*media <= cycles) ? 0 : (*media - cycles);
771 ++flt;
772 ++media;
773 }
774 }
775
776 /* Print information about the wait for the given number of cycles. */
777 void
778 frv_model_trace_wait_cycles (SIM_CPU *cpu, int cycles, const char *hazard_name)
779 {
780 if (TRACE_INSN_P (cpu) && cycles > 0)
781 {
782 SIM_DESC sd = CPU_STATE (cpu);
783 trace_printf (sd, cpu, "**** %s wait %d cycles ***\n",
784 hazard_name, cycles);
785 }
786 }
787
788 void
789 trace_vliw_wait_cycles (SIM_CPU *cpu)
790 {
791 if (TRACE_INSN_P (cpu))
792 {
793 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
794 frv_model_trace_wait_cycles (cpu, ps->vliw_wait, hazard_name);
795 }
796 }
797
798 /* Wait for the given number of cycles. */
799 void
800 frv_model_advance_cycles (SIM_CPU *cpu, int cycles)
801 {
802 PROFILE_DATA *p = CPU_PROFILE_DATA (cpu);
803 update_latencies (cpu, cycles);
804 run_caches (cpu, cycles);
805 PROFILE_MODEL_TOTAL_CYCLES (p) += cycles;
806 }
807
808 void
809 handle_resource_wait (SIM_CPU *cpu)
810 {
811 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
812 if (ps->vliw_wait != 0)
813 frv_model_advance_cycles (cpu, ps->vliw_wait);
814 if (ps->vliw_load_stall > ps->vliw_wait)
815 ps->vliw_load_stall -= ps->vliw_wait;
816 else
817 ps->vliw_load_stall = 0;
818 }
819
820 /* Account for the number of cycles until these resources will be available
821 again. */
822 static void
823 update_target_latencies (SIM_CPU *cpu)
824 {
825 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
826 int i;
827 /* update the latencies of the registers. */
828 int *ccr_lat;
829 int *gr_lat = ps->gr_latency;
830 int *fr_lat = ps->fr_latency;
831 int *acc_lat = ps->acc_latency;
832 int *spr_lat;
833 int *ccr;
834 int *gr = ps->gr_busy;
835 int *fr = ps->fr_busy;
836 int *acc = ps->acc_busy;
837 int *spr;
838 /* This loop handles GR, FR and ACC registers. */
839 for (i = 0; i < 64; ++i)
840 {
841 if (*gr_lat)
842 {
843 *gr = *gr_lat;
844 *gr_lat = 0;
845 }
846 if (*fr_lat)
847 {
848 *fr = *fr_lat;
849 *fr_lat = 0;
850 }
851 if (*acc_lat)
852 {
853 *acc = *acc_lat;
854 *acc_lat = 0;
855 }
856 ++gr; ++gr_lat;
857 ++fr; ++fr_lat;
858 ++acc; ++acc_lat;
859 }
860 /* This loop handles CCR registers. */
861 ccr = ps->ccr_busy;
862 ccr_lat = ps->ccr_latency;
863 for (i = 0; i < 8; ++i)
864 {
865 if (*ccr_lat)
866 {
867 *ccr = *ccr_lat;
868 *ccr_lat = 0;
869 }
870 ++ccr; ++ccr_lat;
871 }
872 /* This loop handles SPR registers. */
873 spr = ps->spr_busy;
874 spr_lat = ps->spr_latency;
875 for (i = 0; i < 4096; ++i)
876 {
877 if (*spr_lat)
878 {
879 *spr = *spr_lat;
880 *spr_lat = 0;
881 }
882 ++spr; ++spr_lat;
883 }
884 }
885
886 /* Run the caches until all pending cache flushes are complete. */
887 static void
888 wait_for_flush (SIM_CPU *cpu)
889 {
890 SI address = CPU_LOAD_ADDRESS (cpu);
891 int wait = 0;
892 while (flush_pending_for_address (cpu, address))
893 {
894 frv_model_advance_cycles (cpu, 1);
895 ++wait;
896 }
897 if (TRACE_INSN_P (cpu) && wait)
898 {
899 sprintf (hazard_name, "Data cache flush address %p:", address);
900 frv_model_trace_wait_cycles (cpu, wait, hazard_name);
901 }
902 }
903
904 /* Initialize cycle counting for an insn.
905 FIRST_P is non-zero if this is the first insn in a set of parallel
906 insns. */
907 void
908 frvbf_model_insn_before (SIM_CPU *cpu, int first_p)
909 {
910 SIM_DESC sd = CPU_STATE (cpu);
911 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
912
913 ps->vliw_wait = 0;
914 ps->post_wait = 0;
915 memset (ps->fr_busy_adjust, 0, sizeof (ps->fr_busy_adjust));
916 memset (ps->acc_busy_adjust, 0, sizeof (ps->acc_busy_adjust));
917
918 if (first_p)
919 {
920 ps->vliw_insns++;
921 ps->vliw_cycles = 0;
922 ps->vliw_branch_taken = 0;
923 ps->vliw_load_stall = 0;
924 }
925
926 switch (STATE_ARCHITECTURE (sd)->mach)
927 {
928 case bfd_mach_fr400:
929 fr400_model_insn_before (cpu, first_p);
930 break;
931 case bfd_mach_fr500:
932 fr500_model_insn_before (cpu, first_p);
933 break;
934 case bfd_mach_fr550:
935 fr550_model_insn_before (cpu, first_p);
936 break;
937 default:
938 break;
939 }
940
941 if (first_p)
942 wait_for_flush (cpu);
943 }
944
945 /* Record the cycles computed for an insn.
946 LAST_P is non-zero if this is the last insn in a set of parallel insns,
947 and we update the total cycle count.
948 CYCLES is the cycle count of the insn. */
949
950 void
951 frvbf_model_insn_after (SIM_CPU *cpu, int last_p, int cycles)
952 {
953 PROFILE_DATA *p = CPU_PROFILE_DATA (cpu);
954 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
955 SIM_DESC sd = CPU_STATE (cpu);
956
957 PROFILE_MODEL_CUR_INSN_CYCLES (p) = cycles;
958
959 /* The number of cycles for a VLIW insn is the maximum number of cycles
960 used by any individual insn within it. */
961 if (cycles > ps->vliw_cycles)
962 ps->vliw_cycles = cycles;
963
964 if (last_p)
965 {
966 /* This is the last insn in a VLIW insn. */
967 struct frv_interrupt_timer *timer = & frv_interrupt_state.timer;
968
969 activate_cache_requests (cpu); /* before advancing cycles. */
970 apply_latency_adjustments (cpu); /* must go first. */
971 update_target_latencies (cpu); /* must go next. */
972 frv_model_advance_cycles (cpu, ps->vliw_cycles);
973
974 PROFILE_MODEL_LOAD_STALL_CYCLES (p) += ps->vliw_load_stall;
975
976 /* Check the interrupt timer. cycles contains the total cycle count. */
977 if (timer->enabled)
978 {
979 cycles = PROFILE_MODEL_TOTAL_CYCLES (p);
980 if (timer->current % timer->value
981 + (cycles - timer->current) >= timer->value)
982 frv_queue_external_interrupt (cpu, timer->interrupt);
983 timer->current = cycles;
984 }
985
986 ps->past_first_p = 0; /* Next one will be the first in a new VLIW. */
987 ps->branch_address = -1;
988 }
989 else
990 ps->past_first_p = 1;
991
992 switch (STATE_ARCHITECTURE (sd)->mach)
993 {
994 case bfd_mach_fr400:
995 fr400_model_insn_after (cpu, last_p, cycles);
996 break;
997 case bfd_mach_fr500:
998 fr500_model_insn_after (cpu, last_p, cycles);
999 break;
1000 case bfd_mach_fr550:
1001 fr550_model_insn_after (cpu, last_p, cycles);
1002 break;
1003 default:
1004 break;
1005 }
1006 }
1007
1008 USI
1009 frvbf_model_branch (SIM_CPU *current_cpu, PCADDR target, int hint)
1010 {
1011 /* Record the hint and branch address for use in profiling. */
1012 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (current_cpu);
1013 ps->branch_hint = hint;
1014 ps->branch_address = target;
1015 }
1016
1017 /* Top up the latency of the given GR by the given number of cycles. */
1018 void
1019 update_GR_latency (SIM_CPU *cpu, INT out_GR, int cycles)
1020 {
1021 if (out_GR >= 0)
1022 {
1023 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1024 int *gr = ps->gr_latency;
1025 if (gr[out_GR] < cycles)
1026 gr[out_GR] = cycles;
1027 }
1028 }
1029
1030 void
1031 decrease_GR_busy (SIM_CPU *cpu, INT in_GR, int cycles)
1032 {
1033 if (in_GR >= 0)
1034 {
1035 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1036 int *gr = ps->gr_busy;
1037 gr[in_GR] -= cycles;
1038 }
1039 }
1040
1041 /* Top up the latency of the given double GR by the number of cycles. */
1042 void
1043 update_GRdouble_latency (SIM_CPU *cpu, INT out_GR, int cycles)
1044 {
1045 if (out_GR >= 0)
1046 {
1047 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1048 int *gr = ps->gr_latency;
1049 if (gr[out_GR] < cycles)
1050 gr[out_GR] = cycles;
1051 if (out_GR < 63 && gr[out_GR + 1] < cycles)
1052 gr[out_GR + 1] = cycles;
1053 }
1054 }
1055
1056 void
1057 update_GR_latency_for_load (SIM_CPU *cpu, INT out_GR, int cycles)
1058 {
1059 if (out_GR >= 0)
1060 {
1061 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1062 int *gr = ps->gr_latency;
1063
1064 /* The latency of the GR will be at least the number of cycles used
1065 by the insn. */
1066 if (gr[out_GR] < cycles)
1067 gr[out_GR] = cycles;
1068
1069 /* The latency will also depend on how long it takes to retrieve the
1070 data from the cache or memory. Assume that the load is issued
1071 after the last cycle of the insn. */
1072 request_cache_load (cpu, out_GR, REGTYPE_NONE, cycles);
1073 }
1074 }
1075
1076 void
1077 update_GRdouble_latency_for_load (SIM_CPU *cpu, INT out_GR, int cycles)
1078 {
1079 if (out_GR >= 0)
1080 {
1081 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1082 int *gr = ps->gr_latency;
1083
1084 /* The latency of the GR will be at least the number of cycles used
1085 by the insn. */
1086 if (gr[out_GR] < cycles)
1087 gr[out_GR] = cycles;
1088 if (out_GR < 63 && gr[out_GR + 1] < cycles)
1089 gr[out_GR + 1] = cycles;
1090
1091 /* The latency will also depend on how long it takes to retrieve the
1092 data from the cache or memory. Assume that the load is issued
1093 after the last cycle of the insn. */
1094 request_cache_load (cpu, out_GR, REGTYPE_NONE, cycles);
1095 }
1096 }
1097
1098 void
1099 update_GR_latency_for_swap (SIM_CPU *cpu, INT out_GR, int cycles)
1100 {
1101 update_GR_latency_for_load (cpu, out_GR, cycles);
1102 }
1103
1104 /* Top up the latency of the given FR by the given number of cycles. */
1105 void
1106 update_FR_latency (SIM_CPU *cpu, INT out_FR, int cycles)
1107 {
1108 if (out_FR >= 0)
1109 {
1110 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1111 int *fr = ps->fr_latency;
1112 if (fr[out_FR] < cycles)
1113 fr[out_FR] = cycles;
1114 }
1115 }
1116
1117 /* Top up the latency of the given double FR by the number of cycles. */
1118 void
1119 update_FRdouble_latency (SIM_CPU *cpu, INT out_FR, int cycles)
1120 {
1121 if (out_FR >= 0)
1122 {
1123 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1124 int *fr = ps->fr_latency;
1125 if (fr[out_FR] < cycles)
1126 fr[out_FR] = cycles;
1127 if (out_FR < 63 && fr[out_FR + 1] < cycles)
1128 fr[out_FR + 1] = cycles;
1129 }
1130 }
1131
1132 void
1133 update_FR_latency_for_load (SIM_CPU *cpu, INT out_FR, int cycles)
1134 {
1135 if (out_FR >= 0)
1136 {
1137 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1138 int *fr = ps->fr_latency;
1139
1140 /* The latency of the FR will be at least the number of cycles used
1141 by the insn. */
1142 if (fr[out_FR] < cycles)
1143 fr[out_FR] = cycles;
1144
1145 /* The latency will also depend on how long it takes to retrieve the
1146 data from the cache or memory. Assume that the load is issued
1147 after the last cycle of the insn. */
1148 request_cache_load (cpu, out_FR, REGTYPE_FR, cycles);
1149 }
1150 }
1151
1152 void
1153 update_FRdouble_latency_for_load (SIM_CPU *cpu, INT out_FR, int cycles)
1154 {
1155 if (out_FR >= 0)
1156 {
1157 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1158 int *fr = ps->fr_latency;
1159
1160 /* The latency of the FR will be at least the number of cycles used
1161 by the insn. */
1162 if (fr[out_FR] < cycles)
1163 fr[out_FR] = cycles;
1164 if (out_FR < 63 && fr[out_FR + 1] < cycles)
1165 fr[out_FR + 1] = cycles;
1166
1167 /* The latency will also depend on how long it takes to retrieve the
1168 data from the cache or memory. Assume that the load is issued
1169 after the last cycle of the insn. */
1170 request_cache_load (cpu, out_FR, REGTYPE_FR, cycles);
1171 }
1172 }
1173
1174 /* Top up the post-processing time of the given FR by the given number of
1175 cycles. */
1176 void
1177 update_FR_ptime (SIM_CPU *cpu, INT out_FR, int cycles)
1178 {
1179 if (out_FR >= 0)
1180 {
1181 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1182 /* If a load is pending on this register, then add the cycles to
1183 the post processing time for this register. Otherwise apply it
1184 directly to the latency of the register. */
1185 if (! load_pending_for_register (cpu, out_FR, 1, REGTYPE_FR))
1186 {
1187 int *fr = ps->fr_latency;
1188 fr[out_FR] += cycles;
1189 }
1190 else
1191 ps->fr_ptime[out_FR] += cycles;
1192 }
1193 }
1194
1195 void
1196 update_FRdouble_ptime (SIM_CPU *cpu, INT out_FR, int cycles)
1197 {
1198 if (out_FR >= 0)
1199 {
1200 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1201 /* If a load is pending on this register, then add the cycles to
1202 the post processing time for this register. Otherwise apply it
1203 directly to the latency of the register. */
1204 if (! load_pending_for_register (cpu, out_FR, 2, REGTYPE_FR))
1205 {
1206 int *fr = ps->fr_latency;
1207 fr[out_FR] += cycles;
1208 if (out_FR < 63)
1209 fr[out_FR + 1] += cycles;
1210 }
1211 else
1212 {
1213 ps->fr_ptime[out_FR] += cycles;
1214 if (out_FR < 63)
1215 ps->fr_ptime[out_FR + 1] += cycles;
1216 }
1217 }
1218 }
1219
1220 /* Top up the post-processing time of the given ACC by the given number of
1221 cycles. */
1222 void
1223 update_ACC_ptime (SIM_CPU *cpu, INT out_ACC, int cycles)
1224 {
1225 if (out_ACC >= 0)
1226 {
1227 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1228 /* No load can be pending on this register. Apply the cycles
1229 directly to the latency of the register. */
1230 int *acc = ps->acc_latency;
1231 acc[out_ACC] += cycles;
1232 }
1233 }
1234
1235 /* Top up the post-processing time of the given SPR by the given number of
1236 cycles. */
1237 void
1238 update_SPR_ptime (SIM_CPU *cpu, INT out_SPR, int cycles)
1239 {
1240 if (out_SPR >= 0)
1241 {
1242 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1243 /* No load can be pending on this register. Apply the cycles
1244 directly to the latency of the register. */
1245 int *spr = ps->spr_latency;
1246 spr[out_SPR] += cycles;
1247 }
1248 }
1249
1250 void
1251 decrease_ACC_busy (SIM_CPU *cpu, INT out_ACC, int cycles)
1252 {
1253 if (out_ACC >= 0)
1254 {
1255 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1256 int *acc = ps->acc_busy;
1257 acc[out_ACC] -= cycles;
1258 if (ps->acc_busy_adjust[out_ACC] >= 0
1259 && cycles > ps->acc_busy_adjust[out_ACC])
1260 ps->acc_busy_adjust[out_ACC] = cycles;
1261 }
1262 }
1263
1264 void
1265 increase_ACC_busy (SIM_CPU *cpu, INT out_ACC, int cycles)
1266 {
1267 if (out_ACC >= 0)
1268 {
1269 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1270 int *acc = ps->acc_busy;
1271 acc[out_ACC] += cycles;
1272 }
1273 }
1274
1275 void
1276 enforce_full_acc_latency (SIM_CPU *cpu, INT in_ACC)
1277 {
1278 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1279 ps->acc_busy_adjust [in_ACC] = -1;
1280 }
1281
1282 void
1283 decrease_FR_busy (SIM_CPU *cpu, INT out_FR, int cycles)
1284 {
1285 if (out_FR >= 0)
1286 {
1287 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1288 int *fr = ps->fr_busy;
1289 fr[out_FR] -= cycles;
1290 if (ps->fr_busy_adjust[out_FR] >= 0
1291 && cycles > ps->fr_busy_adjust[out_FR])
1292 ps->fr_busy_adjust[out_FR] = cycles;
1293 }
1294 }
1295
1296 void
1297 increase_FR_busy (SIM_CPU *cpu, INT out_FR, int cycles)
1298 {
1299 if (out_FR >= 0)
1300 {
1301 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1302 int *fr = ps->fr_busy;
1303 fr[out_FR] += cycles;
1304 }
1305 }
1306
1307 /* Top up the latency of the given ACC by the given number of cycles. */
1308 void
1309 update_ACC_latency (SIM_CPU *cpu, INT out_ACC, int cycles)
1310 {
1311 if (out_ACC >= 0)
1312 {
1313 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1314 int *acc = ps->acc_latency;
1315 if (acc[out_ACC] < cycles)
1316 acc[out_ACC] = cycles;
1317 }
1318 }
1319
1320 /* Top up the latency of the given CCR by the given number of cycles. */
1321 void
1322 update_CCR_latency (SIM_CPU *cpu, INT out_CCR, int cycles)
1323 {
1324 if (out_CCR >= 0)
1325 {
1326 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1327 int *ccr = ps->ccr_latency;
1328 if (ccr[out_CCR] < cycles)
1329 ccr[out_CCR] = cycles;
1330 }
1331 }
1332
1333 /* Top up the latency of the given SPR by the given number of cycles. */
1334 void
1335 update_SPR_latency (SIM_CPU *cpu, INT out_SPR, int cycles)
1336 {
1337 if (out_SPR >= 0)
1338 {
1339 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1340 int *spr = ps->spr_latency;
1341 if (spr[out_SPR] < cycles)
1342 spr[out_SPR] = cycles;
1343 }
1344 }
1345
1346 /* Top up the latency of the given integer division resource by the given
1347 number of cycles. */
1348 void
1349 update_idiv_resource_latency (SIM_CPU *cpu, INT in_resource, int cycles)
1350 {
1351 /* operate directly on the busy cycles since each resource can only
1352 be used once in a VLIW insn. */
1353 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1354 int *r = ps->idiv_busy;
1355 r[in_resource] = cycles;
1356 }
1357
1358 /* Set the latency of the given resource to the given number of cycles. */
1359 void
1360 update_fdiv_resource_latency (SIM_CPU *cpu, INT in_resource, int cycles)
1361 {
1362 /* operate directly on the busy cycles since each resource can only
1363 be used once in a VLIW insn. */
1364 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1365 int *r = ps->fdiv_busy;
1366 r[in_resource] = cycles;
1367 }
1368
1369 /* Set the latency of the given resource to the given number of cycles. */
1370 void
1371 update_fsqrt_resource_latency (SIM_CPU *cpu, INT in_resource, int cycles)
1372 {
1373 /* operate directly on the busy cycles since each resource can only
1374 be used once in a VLIW insn. */
1375 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1376 int *r = ps->fsqrt_busy;
1377 r[in_resource] = cycles;
1378 }
1379
1380 /* Set the latency of the given resource to the given number of cycles. */
1381 void
1382 update_float_resource_latency (SIM_CPU *cpu, INT in_resource, int cycles)
1383 {
1384 /* operate directly on the busy cycles since each resource can only
1385 be used once in a VLIW insn. */
1386 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1387 int *r = ps->float_busy;
1388 r[in_resource] = cycles;
1389 }
1390
1391 void
1392 update_media_resource_latency (SIM_CPU *cpu, INT in_resource, int cycles)
1393 {
1394 /* operate directly on the busy cycles since each resource can only
1395 be used once in a VLIW insn. */
1396 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1397 int *r = ps->media_busy;
1398 r[in_resource] = cycles;
1399 }
1400
1401 /* Set the branch penalty to the given number of cycles. */
1402 void
1403 update_branch_penalty (SIM_CPU *cpu, int cycles)
1404 {
1405 /* operate directly on the busy cycles since only one branch can occur
1406 in a VLIW insn. */
1407 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1408 ps->branch_penalty = cycles;
1409 }
1410
1411 /* Check the availability of the given GR register and update the number
1412 of cycles the current VLIW insn must wait until it is available. */
1413 void
1414 vliw_wait_for_GR (SIM_CPU *cpu, INT in_GR)
1415 {
1416 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1417 int *gr = ps->gr_busy;
1418 /* If the latency of the register is greater than the current wait
1419 then update the current wait. */
1420 if (in_GR >= 0 && gr[in_GR] > ps->vliw_wait)
1421 {
1422 if (TRACE_INSN_P (cpu))
1423 sprintf (hazard_name, "Data hazard for gr%d:", in_GR);
1424 ps->vliw_wait = gr[in_GR];
1425 }
1426 }
1427
1428 /* Check the availability of the given GR register and update the number
1429 of cycles the current VLIW insn must wait until it is available. */
1430 void
1431 vliw_wait_for_GRdouble (SIM_CPU *cpu, INT in_GR)
1432 {
1433 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1434 int *gr = ps->gr_busy;
1435 /* If the latency of the register is greater than the current wait
1436 then update the current wait. */
1437 if (in_GR >= 0)
1438 {
1439 if (gr[in_GR] > ps->vliw_wait)
1440 {
1441 if (TRACE_INSN_P (cpu))
1442 sprintf (hazard_name, "Data hazard for gr%d:", in_GR);
1443 ps->vliw_wait = gr[in_GR];
1444 }
1445 if (in_GR < 63 && gr[in_GR + 1] > ps->vliw_wait)
1446 {
1447 if (TRACE_INSN_P (cpu))
1448 sprintf (hazard_name, "Data hazard for gr%d:", in_GR + 1);
1449 ps->vliw_wait = gr[in_GR + 1];
1450 }
1451 }
1452 }
1453
1454 /* Check the availability of the given FR register and update the number
1455 of cycles the current VLIW insn must wait until it is available. */
1456 void
1457 vliw_wait_for_FR (SIM_CPU *cpu, INT in_FR)
1458 {
1459 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1460 int *fr = ps->fr_busy;
1461 /* If the latency of the register is greater than the current wait
1462 then update the current wait. */
1463 if (in_FR >= 0 && fr[in_FR] > ps->vliw_wait)
1464 {
1465 if (TRACE_INSN_P (cpu))
1466 sprintf (hazard_name, "Data hazard for fr%d:", in_FR);
1467 ps->vliw_wait = fr[in_FR];
1468 }
1469 }
1470
1471 /* Check the availability of the given GR register and update the number
1472 of cycles the current VLIW insn must wait until it is available. */
1473 void
1474 vliw_wait_for_FRdouble (SIM_CPU *cpu, INT in_FR)
1475 {
1476 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1477 int *fr = ps->fr_busy;
1478 /* If the latency of the register is greater than the current wait
1479 then update the current wait. */
1480 if (in_FR >= 0)
1481 {
1482 if (fr[in_FR] > ps->vliw_wait)
1483 {
1484 if (TRACE_INSN_P (cpu))
1485 sprintf (hazard_name, "Data hazard for fr%d:", in_FR);
1486 ps->vliw_wait = fr[in_FR];
1487 }
1488 if (in_FR < 63 && fr[in_FR + 1] > ps->vliw_wait)
1489 {
1490 if (TRACE_INSN_P (cpu))
1491 sprintf (hazard_name, "Data hazard for fr%d:", in_FR + 1);
1492 ps->vliw_wait = fr[in_FR + 1];
1493 }
1494 }
1495 }
1496
1497 /* Check the availability of the given CCR register and update the number
1498 of cycles the current VLIW insn must wait until it is available. */
1499 void
1500 vliw_wait_for_CCR (SIM_CPU *cpu, INT in_CCR)
1501 {
1502 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1503 int *ccr = ps->ccr_busy;
1504 /* If the latency of the register is greater than the current wait
1505 then update the current wait. */
1506 if (in_CCR >= 0 && ccr[in_CCR] > ps->vliw_wait)
1507 {
1508 if (TRACE_INSN_P (cpu))
1509 {
1510 if (in_CCR > 3)
1511 sprintf (hazard_name, "Data hazard for icc%d:", in_CCR-4);
1512 else
1513 sprintf (hazard_name, "Data hazard for fcc%d:", in_CCR);
1514 }
1515 ps->vliw_wait = ccr[in_CCR];
1516 }
1517 }
1518
1519 /* Check the availability of the given ACC register and update the number
1520 of cycles the current VLIW insn must wait until it is available. */
1521 void
1522 vliw_wait_for_ACC (SIM_CPU *cpu, INT in_ACC)
1523 {
1524 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1525 int *acc = ps->acc_busy;
1526 /* If the latency of the register is greater than the current wait
1527 then update the current wait. */
1528 if (in_ACC >= 0 && acc[in_ACC] > ps->vliw_wait)
1529 {
1530 if (TRACE_INSN_P (cpu))
1531 sprintf (hazard_name, "Data hazard for acc%d:", in_ACC);
1532 ps->vliw_wait = acc[in_ACC];
1533 }
1534 }
1535
1536 /* Check the availability of the given SPR register and update the number
1537 of cycles the current VLIW insn must wait until it is available. */
1538 void
1539 vliw_wait_for_SPR (SIM_CPU *cpu, INT in_SPR)
1540 {
1541 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1542 int *spr = ps->spr_busy;
1543 /* If the latency of the register is greater than the current wait
1544 then update the current wait. */
1545 if (in_SPR >= 0 && spr[in_SPR] > ps->vliw_wait)
1546 {
1547 if (TRACE_INSN_P (cpu))
1548 sprintf (hazard_name, "Data hazard for spr %d:", in_SPR);
1549 ps->vliw_wait = spr[in_SPR];
1550 }
1551 }
1552
1553 /* Check the availability of the given integer division resource and update
1554 the number of cycles the current VLIW insn must wait until it is available.
1555 */
1556 void
1557 vliw_wait_for_idiv_resource (SIM_CPU *cpu, INT in_resource)
1558 {
1559 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1560 int *r = ps->idiv_busy;
1561 /* If the latency of the resource is greater than the current wait
1562 then update the current wait. */
1563 if (r[in_resource] > ps->vliw_wait)
1564 {
1565 if (TRACE_INSN_P (cpu))
1566 {
1567 sprintf (hazard_name, "Resource hazard for integer division in slot I%d:", in_resource);
1568 }
1569 ps->vliw_wait = r[in_resource];
1570 }
1571 }
1572
1573 /* Check the availability of the given float division resource and update
1574 the number of cycles the current VLIW insn must wait until it is available.
1575 */
1576 void
1577 vliw_wait_for_fdiv_resource (SIM_CPU *cpu, INT in_resource)
1578 {
1579 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1580 int *r = ps->fdiv_busy;
1581 /* If the latency of the resource is greater than the current wait
1582 then update the current wait. */
1583 if (r[in_resource] > ps->vliw_wait)
1584 {
1585 if (TRACE_INSN_P (cpu))
1586 {
1587 sprintf (hazard_name, "Resource hazard for floating point division in slot F%d:", in_resource);
1588 }
1589 ps->vliw_wait = r[in_resource];
1590 }
1591 }
1592
1593 /* Check the availability of the given float square root resource and update
1594 the number of cycles the current VLIW insn must wait until it is available.
1595 */
1596 void
1597 vliw_wait_for_fsqrt_resource (SIM_CPU *cpu, INT in_resource)
1598 {
1599 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1600 int *r = ps->fsqrt_busy;
1601 /* If the latency of the resource is greater than the current wait
1602 then update the current wait. */
1603 if (r[in_resource] > ps->vliw_wait)
1604 {
1605 if (TRACE_INSN_P (cpu))
1606 {
1607 sprintf (hazard_name, "Resource hazard for square root in slot F%d:", in_resource);
1608 }
1609 ps->vliw_wait = r[in_resource];
1610 }
1611 }
1612
1613 /* Check the availability of the given float unit resource and update
1614 the number of cycles the current VLIW insn must wait until it is available.
1615 */
1616 void
1617 vliw_wait_for_float_resource (SIM_CPU *cpu, INT in_resource)
1618 {
1619 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1620 int *r = ps->float_busy;
1621 /* If the latency of the resource is greater than the current wait
1622 then update the current wait. */
1623 if (r[in_resource] > ps->vliw_wait)
1624 {
1625 if (TRACE_INSN_P (cpu))
1626 {
1627 sprintf (hazard_name, "Resource hazard for floating point unit in slot F%d:", in_resource);
1628 }
1629 ps->vliw_wait = r[in_resource];
1630 }
1631 }
1632
1633 /* Check the availability of the given media unit resource and update
1634 the number of cycles the current VLIW insn must wait until it is available.
1635 */
1636 void
1637 vliw_wait_for_media_resource (SIM_CPU *cpu, INT in_resource)
1638 {
1639 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1640 int *r = ps->media_busy;
1641 /* If the latency of the resource is greater than the current wait
1642 then update the current wait. */
1643 if (r[in_resource] > ps->vliw_wait)
1644 {
1645 if (TRACE_INSN_P (cpu))
1646 {
1647 sprintf (hazard_name, "Resource hazard for media unit in slot M%d:", in_resource);
1648 }
1649 ps->vliw_wait = r[in_resource];
1650 }
1651 }
1652
1653 /* Run the caches until all requests for the given register(s) are satisfied. */
1654 void
1655 load_wait_for_GR (SIM_CPU *cpu, INT in_GR)
1656 {
1657 if (in_GR >= 0)
1658 {
1659 int wait = 0;
1660 while (load_pending_for_register (cpu, in_GR, 1/*words*/, REGTYPE_NONE))
1661 {
1662 frv_model_advance_cycles (cpu, 1);
1663 ++wait;
1664 }
1665 if (wait)
1666 {
1667 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1668 ps->vliw_wait += wait;
1669 ps->vliw_load_stall += wait;
1670 if (TRACE_INSN_P (cpu))
1671 sprintf (hazard_name, "Data hazard for gr%d:", in_GR);
1672 }
1673 }
1674 }
1675
1676 void
1677 load_wait_for_FR (SIM_CPU *cpu, INT in_FR)
1678 {
1679 if (in_FR >= 0)
1680 {
1681 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1682 int *fr;
1683 int wait = 0;
1684 while (load_pending_for_register (cpu, in_FR, 1/*words*/, REGTYPE_FR))
1685 {
1686 frv_model_advance_cycles (cpu, 1);
1687 ++wait;
1688 }
1689 /* Post processing time may have been added to the register's
1690 latency after the loads were processed. Account for that too.
1691 */
1692 fr = ps->fr_busy;
1693 if (fr[in_FR])
1694 {
1695 wait += fr[in_FR];
1696 frv_model_advance_cycles (cpu, fr[in_FR]);
1697 }
1698 /* Update the vliw_wait with the number of cycles we waited for the
1699 load and any post-processing. */
1700 if (wait)
1701 {
1702 ps->vliw_wait += wait;
1703 ps->vliw_load_stall += wait;
1704 if (TRACE_INSN_P (cpu))
1705 sprintf (hazard_name, "Data hazard for fr%d:", in_FR);
1706 }
1707 }
1708 }
1709
1710 void
1711 load_wait_for_GRdouble (SIM_CPU *cpu, INT in_GR)
1712 {
1713 if (in_GR >= 0)
1714 {
1715 int wait = 0;
1716 while (load_pending_for_register (cpu, in_GR, 2/*words*/, REGTYPE_NONE))
1717 {
1718 frv_model_advance_cycles (cpu, 1);
1719 ++wait;
1720 }
1721 if (wait)
1722 {
1723 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1724 ps->vliw_wait += wait;
1725 ps->vliw_load_stall += wait;
1726 if (TRACE_INSN_P (cpu))
1727 sprintf (hazard_name, "Data hazard for gr%d:", in_GR);
1728 }
1729 }
1730 }
1731
1732 void
1733 load_wait_for_FRdouble (SIM_CPU *cpu, INT in_FR)
1734 {
1735 if (in_FR >= 0)
1736 {
1737 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1738 int *fr;
1739 int wait = 0;
1740 while (load_pending_for_register (cpu, in_FR, 2/*words*/, REGTYPE_FR))
1741 {
1742 frv_model_advance_cycles (cpu, 1);
1743 ++wait;
1744 }
1745 /* Post processing time may have been added to the registers'
1746 latencies after the loads were processed. Account for that too.
1747 */
1748 fr = ps->fr_busy;
1749 if (fr[in_FR])
1750 {
1751 wait += fr[in_FR];
1752 frv_model_advance_cycles (cpu, fr[in_FR]);
1753 }
1754 if (in_FR < 63)
1755 {
1756 if (fr[in_FR + 1])
1757 {
1758 wait += fr[in_FR + 1];
1759 frv_model_advance_cycles (cpu, fr[in_FR + 1]);
1760 }
1761 }
1762 /* Update the vliw_wait with the number of cycles we waited for the
1763 load and any post-processing. */
1764 if (wait)
1765 {
1766 ps->vliw_wait += wait;
1767 ps->vliw_load_stall += wait;
1768 if (TRACE_INSN_P (cpu))
1769 sprintf (hazard_name, "Data hazard for fr%d:", in_FR);
1770 }
1771 }
1772 }
1773
1774 void
1775 enforce_full_fr_latency (SIM_CPU *cpu, INT in_FR)
1776 {
1777 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1778 ps->fr_busy_adjust [in_FR] = -1;
1779 }
1780
1781 /* Calculate how long the post processing for a floating point insn must
1782 wait for resources to become available. */
1783 int
1784 post_wait_for_FR (SIM_CPU *cpu, INT in_FR)
1785 {
1786 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1787 int *fr = ps->fr_busy;
1788
1789 if (in_FR >= 0 && fr[in_FR] > ps->post_wait)
1790 {
1791 ps->post_wait = fr[in_FR];
1792 if (TRACE_INSN_P (cpu))
1793 sprintf (hazard_name, "Data hazard for fr%d:", in_FR);
1794 }
1795 }
1796
1797 /* Calculate how long the post processing for a floating point insn must
1798 wait for resources to become available. */
1799 int
1800 post_wait_for_FRdouble (SIM_CPU *cpu, INT in_FR)
1801 {
1802 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1803 int *fr = ps->fr_busy;
1804
1805 if (in_FR >= 0)
1806 {
1807 if (fr[in_FR] > ps->post_wait)
1808 {
1809 ps->post_wait = fr[in_FR];
1810 if (TRACE_INSN_P (cpu))
1811 sprintf (hazard_name, "Data hazard for fr%d:", in_FR);
1812 }
1813 if (in_FR < 63 && fr[in_FR + 1] > ps->post_wait)
1814 {
1815 ps->post_wait = fr[in_FR + 1];
1816 if (TRACE_INSN_P (cpu))
1817 sprintf (hazard_name, "Data hazard for fr%d:", in_FR + 1);
1818 }
1819 }
1820 }
1821
1822 int
1823 post_wait_for_ACC (SIM_CPU *cpu, INT in_ACC)
1824 {
1825 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1826 int *acc = ps->acc_busy;
1827
1828 if (in_ACC >= 0 && acc[in_ACC] > ps->post_wait)
1829 {
1830 ps->post_wait = acc[in_ACC];
1831 if (TRACE_INSN_P (cpu))
1832 sprintf (hazard_name, "Data hazard for acc%d:", in_ACC);
1833 }
1834 }
1835
1836 int
1837 post_wait_for_CCR (SIM_CPU *cpu, INT in_CCR)
1838 {
1839 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1840 int *ccr = ps->ccr_busy;
1841
1842 if (in_CCR >= 0 && ccr[in_CCR] > ps->post_wait)
1843 {
1844 ps->post_wait = ccr[in_CCR];
1845 if (TRACE_INSN_P (cpu))
1846 {
1847 if (in_CCR > 3)
1848 sprintf (hazard_name, "Data hazard for icc%d:", in_CCR - 4);
1849 else
1850 sprintf (hazard_name, "Data hazard for fcc%d:", in_CCR);
1851 }
1852 }
1853 }
1854
1855 int
1856 post_wait_for_SPR (SIM_CPU *cpu, INT in_SPR)
1857 {
1858 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1859 int *spr = ps->spr_busy;
1860
1861 if (in_SPR >= 0 && spr[in_SPR] > ps->post_wait)
1862 {
1863 ps->post_wait = spr[in_SPR];
1864 if (TRACE_INSN_P (cpu))
1865 sprintf (hazard_name, "Data hazard for spr[%d]:", in_SPR);
1866 }
1867 }
1868
1869 int
1870 post_wait_for_fdiv (SIM_CPU *cpu, INT slot)
1871 {
1872 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1873 int *fdiv = ps->fdiv_busy;
1874
1875 /* Multiple floating point divisions in the same slot need only wait 1
1876 extra cycle. */
1877 if (fdiv[slot] > 0 && 1 > ps->post_wait)
1878 {
1879 ps->post_wait = 1;
1880 if (TRACE_INSN_P (cpu))
1881 {
1882 sprintf (hazard_name, "Resource hazard for floating point division in slot F%d:", slot);
1883 }
1884 }
1885 }
1886
1887 int
1888 post_wait_for_fsqrt (SIM_CPU *cpu, INT slot)
1889 {
1890 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1891 int *fsqrt = ps->fsqrt_busy;
1892
1893 /* Multiple floating point square roots in the same slot need only wait 1
1894 extra cycle. */
1895 if (fsqrt[slot] > 0 && 1 > ps->post_wait)
1896 {
1897 ps->post_wait = 1;
1898 if (TRACE_INSN_P (cpu))
1899 {
1900 sprintf (hazard_name, "Resource hazard for square root in slot F%d:", slot);
1901 }
1902 }
1903 }
1904
1905 int
1906 post_wait_for_float (SIM_CPU *cpu, INT slot)
1907 {
1908 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1909 int *flt = ps->float_busy;
1910
1911 /* Multiple floating point square roots in the same slot need only wait 1
1912 extra cycle. */
1913 if (flt[slot] > ps->post_wait)
1914 {
1915 ps->post_wait = flt[slot];
1916 if (TRACE_INSN_P (cpu))
1917 {
1918 sprintf (hazard_name, "Resource hazard for floating point unit in slot F%d:", slot);
1919 }
1920 }
1921 }
1922
1923 int
1924 post_wait_for_media (SIM_CPU *cpu, INT slot)
1925 {
1926 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1927 int *media = ps->media_busy;
1928
1929 /* Multiple floating point square roots in the same slot need only wait 1
1930 extra cycle. */
1931 if (media[slot] > ps->post_wait)
1932 {
1933 ps->post_wait = media[slot];
1934 if (TRACE_INSN_P (cpu))
1935 {
1936 sprintf (hazard_name, "Resource hazard for media unit in slot M%d:", slot);
1937 }
1938 }
1939 }
1940
1941 /* Print cpu-specific profile information. */
1942 #define COMMAS(n) sim_add_commas (comma_buf, sizeof (comma_buf), (n))
1943
1944 static void
1945 print_cache (SIM_CPU *cpu, FRV_CACHE *cache, const char *cache_name)
1946 {
1947 SIM_DESC sd = CPU_STATE (cpu);
1948
1949 if (cache != NULL)
1950 {
1951 char comma_buf[20];
1952 unsigned accesses;
1953
1954 sim_io_printf (sd, " %s Cache\n\n", cache_name);
1955 accesses = cache->statistics.accesses;
1956 sim_io_printf (sd, " Total accesses: %s\n", COMMAS (accesses));
1957 if (accesses != 0)
1958 {
1959 float rate;
1960 unsigned hits = cache->statistics.hits;
1961 sim_io_printf (sd, " Hits: %s\n", COMMAS (hits));
1962 rate = (float)hits / accesses;
1963 sim_io_printf (sd, " Hit rate: %.2f%%\n", rate * 100);
1964 }
1965 }
1966 else
1967 sim_io_printf (sd, " Model %s has no %s cache\n",
1968 MODEL_NAME (CPU_MODEL (cpu)), cache_name);
1969
1970 sim_io_printf (sd, "\n");
1971 }
1972
1973 /* This table must correspond to the UNIT_ATTR table in
1974 opcodes/frv-desc.h. Only the units up to UNIT_C need be
1975 listed since the others cannot occur after mapping. */
1976 static char *
1977 slot_names[] =
1978 {
1979 "none",
1980 "I0", "I1", "I01", "I2", "I3", "IALL",
1981 "FM0", "FM1", "FM01", "FM2", "FM3", "FMALL", "FMLOW",
1982 "B0", "B1", "B01",
1983 "C"
1984 };
1985
1986 static void
1987 print_parallel (SIM_CPU *cpu, int verbose)
1988 {
1989 SIM_DESC sd = CPU_STATE (cpu);
1990 PROFILE_DATA *p = CPU_PROFILE_DATA (cpu);
1991 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1992 unsigned total, vliw;
1993 char comma_buf[20];
1994 float average;
1995
1996 sim_io_printf (sd, "Model %s Parallelization\n\n",
1997 MODEL_NAME (CPU_MODEL (cpu)));
1998
1999 total = PROFILE_TOTAL_INSN_COUNT (p);
2000 sim_io_printf (sd, " Total instructions: %s\n", COMMAS (total));
2001 vliw = ps->vliw_insns;
2002 sim_io_printf (sd, " VLIW instructions: %s\n", COMMAS (vliw));
2003 average = (float)total / vliw;
2004 sim_io_printf (sd, " Average VLIW length: %.2f\n", average);
2005 average = (float)PROFILE_MODEL_TOTAL_CYCLES (p) / vliw;
2006 sim_io_printf (sd, " Cycles per VLIW instruction: %.2f\n", average);
2007 average = (float)total / PROFILE_MODEL_TOTAL_CYCLES (p);
2008 sim_io_printf (sd, " Instructions per cycle: %.2f\n", average);
2009
2010 if (verbose)
2011 {
2012 int i;
2013 int max_val = 0;
2014 int max_name_len = 0;
2015 for (i = UNIT_NIL + 1; i < UNIT_NUM_UNITS; ++i)
2016 {
2017 if (INSNS_IN_SLOT (i))
2018 {
2019 int len;
2020 if (INSNS_IN_SLOT (i) > max_val)
2021 max_val = INSNS_IN_SLOT (i);
2022 len = strlen (slot_names[i]);
2023 if (len > max_name_len)
2024 max_name_len = len;
2025 }
2026 }
2027 if (max_val > 0)
2028 {
2029 sim_io_printf (sd, "\n");
2030 sim_io_printf (sd, " Instructions per slot:\n");
2031 sim_io_printf (sd, "\n");
2032 for (i = UNIT_NIL + 1; i < UNIT_NUM_UNITS; ++i)
2033 {
2034 if (INSNS_IN_SLOT (i) != 0)
2035 {
2036 sim_io_printf (sd, " %*s: %*s: ",
2037 max_name_len, slot_names[i],
2038 max_val < 10000 ? 5 : 10,
2039 COMMAS (INSNS_IN_SLOT (i)));
2040 sim_profile_print_bar (sd, PROFILE_HISTOGRAM_WIDTH,
2041 INSNS_IN_SLOT (i),
2042 max_val);
2043 sim_io_printf (sd, "\n");
2044 }
2045 }
2046 } /* details to print */
2047 } /* verbose */
2048
2049 sim_io_printf (sd, "\n");
2050 }
2051
2052 void
2053 frv_profile_info (SIM_CPU *cpu, int verbose)
2054 {
2055 /* FIXME: Need to add smp support. */
2056 PROFILE_DATA *p = CPU_PROFILE_DATA (cpu);
2057
2058 #if WITH_PROFILE_PARALLEL_P
2059 if (PROFILE_FLAGS (p) [PROFILE_PARALLEL_IDX])
2060 print_parallel (cpu, verbose);
2061 #endif
2062
2063 #if WITH_PROFILE_CACHE_P
2064 if (PROFILE_FLAGS (p) [PROFILE_CACHE_IDX])
2065 {
2066 SIM_DESC sd = CPU_STATE (cpu);
2067 sim_io_printf (sd, "Model %s Cache Statistics\n\n",
2068 MODEL_NAME (CPU_MODEL (cpu)));
2069 print_cache (cpu, CPU_INSN_CACHE (cpu), "Instruction");
2070 print_cache (cpu, CPU_DATA_CACHE (cpu), "Data");
2071 }
2072 #endif /* WITH_PROFILE_CACHE_P */
2073 }
2074
2075 /* A hack to get registers referenced for profiling. */
2076 SI frv_ref_SI (SI ref) {return ref;}
2077 #endif /* WITH_PROFILE_MODEL_P */