cpu/
[binutils-gdb.git] / sim / frv / memory.c
1 /* frv memory model.
2 Copyright (C) 1999, 2000, 2001, 2003 Free Software Foundation, Inc.
3 Contributed by Red Hat
4
5 This file is part of the GNU simulators.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 2, or (at your option)
10 any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License along
18 with this program; if not, write to the Free Software Foundation, Inc.,
19 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */
20
21 #define WANT_CPU frvbf
22 #define WANT_CPU_FRVBF
23
24 #include "sim-main.h"
25 #include "cgen-mem.h"
26 #include "bfd.h"
27
28 /* Check for alignment and access restrictions. Return the corrected address.
29 */
30 static SI
31 fr400_check_data_read_address (SIM_CPU *current_cpu, SI address, int align_mask)
32 {
33 /* Check access restrictions for double word loads only. */
34 if (align_mask == 7)
35 {
36 if ((USI)address >= 0xfe800000 && (USI)address <= 0xfeffffff)
37 frv_queue_data_access_error_interrupt (current_cpu, address);
38 }
39 return address;
40 }
41
42 static SI
43 fr500_check_data_read_address (SIM_CPU *current_cpu, SI address, int align_mask)
44 {
45 if (address & align_mask)
46 {
47 frv_queue_mem_address_not_aligned_interrupt (current_cpu, address);
48 address &= ~align_mask;
49 }
50
51 if ((USI)address >= 0xfeff0600 && (USI)address <= 0xfeff7fff
52 || (USI)address >= 0xfe800000 && (USI)address <= 0xfefeffff)
53 frv_queue_data_access_error_interrupt (current_cpu, address);
54
55 return address;
56 }
57
58 static SI
59 fr550_check_data_read_address (SIM_CPU *current_cpu, SI address, int align_mask)
60 {
61 if ((USI)address >= 0xfe800000 && (USI)address <= 0xfefeffff
62 || (align_mask > 0x3
63 && ((USI)address >= 0xfeff0000 && (USI)address <= 0xfeffffff)))
64 frv_queue_data_access_error_interrupt (current_cpu, address);
65
66 return address;
67 }
68
69 static SI
70 check_data_read_address (SIM_CPU *current_cpu, SI address, int align_mask)
71 {
72 SIM_DESC sd = CPU_STATE (current_cpu);
73 switch (STATE_ARCHITECTURE (sd)->mach)
74 {
75 case bfd_mach_fr400:
76 address = fr400_check_data_read_address (current_cpu, address,
77 align_mask);
78 break;
79 case bfd_mach_frvtomcat:
80 case bfd_mach_fr500:
81 case bfd_mach_frv:
82 address = fr500_check_data_read_address (current_cpu, address,
83 align_mask);
84 break;
85 case bfd_mach_fr550:
86 address = fr550_check_data_read_address (current_cpu, address,
87 align_mask);
88 break;
89 default:
90 break;
91 }
92
93 return address;
94 }
95
96 static SI
97 fr400_check_readwrite_address (SIM_CPU *current_cpu, SI address, int align_mask)
98 {
99 if (address & align_mask)
100 {
101 /* Make sure that this exception is not masked. */
102 USI isr = GET_ISR ();
103 if (! GET_ISR_EMAM (isr))
104 {
105 /* Bad alignment causes a data_access_error on fr400. */
106 frv_queue_data_access_error_interrupt (current_cpu, address);
107 }
108 address &= ~align_mask;
109 }
110 /* Nothing to check. */
111 return address;
112 }
113
114 static SI
115 fr500_check_readwrite_address (SIM_CPU *current_cpu, SI address, int align_mask)
116 {
117 if ((USI)address >= 0xfe000000 && (USI)address <= 0xfe003fff
118 || (USI)address >= 0xfe004000 && (USI)address <= 0xfe3fffff
119 || (USI)address >= 0xfe400000 && (USI)address <= 0xfe403fff
120 || (USI)address >= 0xfe404000 && (USI)address <= 0xfe7fffff)
121 frv_queue_data_access_exception_interrupt (current_cpu);
122
123 return address;
124 }
125
126 static SI
127 fr550_check_readwrite_address (SIM_CPU *current_cpu, SI address, int align_mask)
128 {
129 /* No alignment restrictions on fr550 */
130
131 if ((USI)address >= 0xfe000000 && (USI)address <= 0xfe3fffff
132 || (USI)address >= 0xfe408000 && (USI)address <= 0xfe7fffff)
133 frv_queue_data_access_exception_interrupt (current_cpu);
134 else
135 {
136 USI hsr0 = GET_HSR0 ();
137 if (! GET_HSR0_RME (hsr0)
138 && (USI)address >= 0xfe400000 && (USI)address <= 0xfe407fff)
139 frv_queue_data_access_exception_interrupt (current_cpu);
140 }
141
142 return address;
143 }
144
145 static SI
146 check_readwrite_address (SIM_CPU *current_cpu, SI address, int align_mask)
147 {
148 SIM_DESC sd = CPU_STATE (current_cpu);
149 switch (STATE_ARCHITECTURE (sd)->mach)
150 {
151 case bfd_mach_fr400:
152 address = fr400_check_readwrite_address (current_cpu, address,
153 align_mask);
154 break;
155 case bfd_mach_frvtomcat:
156 case bfd_mach_fr500:
157 case bfd_mach_frv:
158 address = fr500_check_readwrite_address (current_cpu, address,
159 align_mask);
160 break;
161 case bfd_mach_fr550:
162 address = fr550_check_readwrite_address (current_cpu, address,
163 align_mask);
164 break;
165 default:
166 break;
167 }
168
169 return address;
170 }
171
172 static PCADDR
173 fr400_check_insn_read_address (SIM_CPU *current_cpu, PCADDR address,
174 int align_mask)
175 {
176 if (address & align_mask)
177 {
178 frv_queue_instruction_access_error_interrupt (current_cpu);
179 address &= ~align_mask;
180 }
181 else if ((USI)address >= 0xfe800000 && (USI)address <= 0xfeffffff)
182 frv_queue_instruction_access_error_interrupt (current_cpu);
183
184 return address;
185 }
186
187 static PCADDR
188 fr500_check_insn_read_address (SIM_CPU *current_cpu, PCADDR address,
189 int align_mask)
190 {
191 if (address & align_mask)
192 {
193 frv_queue_mem_address_not_aligned_interrupt (current_cpu, address);
194 address &= ~align_mask;
195 }
196
197 if ((USI)address >= 0xfeff0600 && (USI)address <= 0xfeff7fff
198 || (USI)address >= 0xfe800000 && (USI)address <= 0xfefeffff)
199 frv_queue_instruction_access_error_interrupt (current_cpu);
200 else if ((USI)address >= 0xfe004000 && (USI)address <= 0xfe3fffff
201 || (USI)address >= 0xfe400000 && (USI)address <= 0xfe403fff
202 || (USI)address >= 0xfe404000 && (USI)address <= 0xfe7fffff)
203 frv_queue_instruction_access_exception_interrupt (current_cpu);
204 else
205 {
206 USI hsr0 = GET_HSR0 ();
207 if (! GET_HSR0_RME (hsr0)
208 && (USI)address >= 0xfe000000 && (USI)address <= 0xfe003fff)
209 frv_queue_instruction_access_exception_interrupt (current_cpu);
210 }
211
212 return address;
213 }
214
215 static PCADDR
216 fr550_check_insn_read_address (SIM_CPU *current_cpu, PCADDR address,
217 int align_mask)
218 {
219 address &= ~align_mask;
220
221 if ((USI)address >= 0xfe800000 && (USI)address <= 0xfeffffff)
222 frv_queue_instruction_access_error_interrupt (current_cpu);
223 else if ((USI)address >= 0xfe008000 && (USI)address <= 0xfe7fffff)
224 frv_queue_instruction_access_exception_interrupt (current_cpu);
225 else
226 {
227 USI hsr0 = GET_HSR0 ();
228 if (! GET_HSR0_RME (hsr0)
229 && (USI)address >= 0xfe000000 && (USI)address <= 0xfe007fff)
230 frv_queue_instruction_access_exception_interrupt (current_cpu);
231 }
232
233 return address;
234 }
235
236 static PCADDR
237 check_insn_read_address (SIM_CPU *current_cpu, PCADDR address, int align_mask)
238 {
239 SIM_DESC sd = CPU_STATE (current_cpu);
240 switch (STATE_ARCHITECTURE (sd)->mach)
241 {
242 case bfd_mach_fr400:
243 address = fr400_check_insn_read_address (current_cpu, address,
244 align_mask);
245 break;
246 case bfd_mach_frvtomcat:
247 case bfd_mach_fr500:
248 case bfd_mach_frv:
249 address = fr500_check_insn_read_address (current_cpu, address,
250 align_mask);
251 break;
252 case bfd_mach_fr550:
253 address = fr550_check_insn_read_address (current_cpu, address,
254 align_mask);
255 break;
256 default:
257 break;
258 }
259
260 return address;
261 }
262
263 /* Memory reads. */
264 QI
265 frvbf_read_mem_QI (SIM_CPU *current_cpu, IADDR pc, SI address)
266 {
267 USI hsr0 = GET_HSR0 ();
268 FRV_CACHE *cache = CPU_DATA_CACHE (current_cpu);
269
270 /* Check for access exceptions. */
271 address = check_data_read_address (current_cpu, address, 0);
272 address = check_readwrite_address (current_cpu, address, 0);
273
274 /* If we need to count cycles, then the cache operation will be
275 initiated from the model profiling functions.
276 See frvbf_model_.... */
277 if (model_insn)
278 {
279 CPU_LOAD_ADDRESS (current_cpu) = address;
280 CPU_LOAD_LENGTH (current_cpu) = 1;
281 CPU_LOAD_SIGNED (current_cpu) = 1;
282 return 0xb7; /* any random value */
283 }
284
285 if (GET_HSR0_DCE (hsr0))
286 {
287 int cycles;
288 cycles = frv_cache_read (cache, 0, address);
289 if (cycles != 0)
290 return CACHE_RETURN_DATA (cache, 0, address, QI, 1);
291 }
292
293 return GETMEMQI (current_cpu, pc, address);
294 }
295
296 UQI
297 frvbf_read_mem_UQI (SIM_CPU *current_cpu, IADDR pc, SI address)
298 {
299 USI hsr0 = GET_HSR0 ();
300 FRV_CACHE *cache = CPU_DATA_CACHE (current_cpu);
301
302 /* Check for access exceptions. */
303 address = check_data_read_address (current_cpu, address, 0);
304 address = check_readwrite_address (current_cpu, address, 0);
305
306 /* If we need to count cycles, then the cache operation will be
307 initiated from the model profiling functions.
308 See frvbf_model_.... */
309 if (model_insn)
310 {
311 CPU_LOAD_ADDRESS (current_cpu) = address;
312 CPU_LOAD_LENGTH (current_cpu) = 1;
313 CPU_LOAD_SIGNED (current_cpu) = 0;
314 return 0xb7; /* any random value */
315 }
316
317 if (GET_HSR0_DCE (hsr0))
318 {
319 int cycles;
320 cycles = frv_cache_read (cache, 0, address);
321 if (cycles != 0)
322 return CACHE_RETURN_DATA (cache, 0, address, UQI, 1);
323 }
324
325 return GETMEMUQI (current_cpu, pc, address);
326 }
327
328 /* Read a HI which spans two cache lines */
329 static HI
330 read_mem_unaligned_HI (SIM_CPU *current_cpu, IADDR pc, SI address)
331 {
332 HI value = frvbf_read_mem_QI (current_cpu, pc, address);
333 value <<= 8;
334 value |= frvbf_read_mem_UQI (current_cpu, pc, address + 1);
335 return T2H_2 (value);
336 }
337
338 HI
339 frvbf_read_mem_HI (SIM_CPU *current_cpu, IADDR pc, SI address)
340 {
341 USI hsr0;
342 FRV_CACHE *cache;
343
344 /* Check for access exceptions. */
345 address = check_data_read_address (current_cpu, address, 1);
346 address = check_readwrite_address (current_cpu, address, 1);
347
348 /* If we need to count cycles, then the cache operation will be
349 initiated from the model profiling functions.
350 See frvbf_model_.... */
351 hsr0 = GET_HSR0 ();
352 cache = CPU_DATA_CACHE (current_cpu);
353 if (model_insn)
354 {
355 CPU_LOAD_ADDRESS (current_cpu) = address;
356 CPU_LOAD_LENGTH (current_cpu) = 2;
357 CPU_LOAD_SIGNED (current_cpu) = 1;
358 return 0xb711; /* any random value */
359 }
360
361 if (GET_HSR0_DCE (hsr0))
362 {
363 int cycles;
364 /* Handle access which crosses cache line boundary */
365 SIM_DESC sd = CPU_STATE (current_cpu);
366 if (STATE_ARCHITECTURE (sd)->mach == bfd_mach_fr550)
367 {
368 if (DATA_CROSSES_CACHE_LINE (cache, address, 2))
369 return read_mem_unaligned_HI (current_cpu, pc, address);
370 }
371 cycles = frv_cache_read (cache, 0, address);
372 if (cycles != 0)
373 return CACHE_RETURN_DATA (cache, 0, address, HI, 2);
374 }
375
376 return GETMEMHI (current_cpu, pc, address);
377 }
378
379 UHI
380 frvbf_read_mem_UHI (SIM_CPU *current_cpu, IADDR pc, SI address)
381 {
382 USI hsr0;
383 FRV_CACHE *cache;
384
385 /* Check for access exceptions. */
386 address = check_data_read_address (current_cpu, address, 1);
387 address = check_readwrite_address (current_cpu, address, 1);
388
389 /* If we need to count cycles, then the cache operation will be
390 initiated from the model profiling functions.
391 See frvbf_model_.... */
392 hsr0 = GET_HSR0 ();
393 cache = CPU_DATA_CACHE (current_cpu);
394 if (model_insn)
395 {
396 CPU_LOAD_ADDRESS (current_cpu) = address;
397 CPU_LOAD_LENGTH (current_cpu) = 2;
398 CPU_LOAD_SIGNED (current_cpu) = 0;
399 return 0xb711; /* any random value */
400 }
401
402 if (GET_HSR0_DCE (hsr0))
403 {
404 int cycles;
405 /* Handle access which crosses cache line boundary */
406 SIM_DESC sd = CPU_STATE (current_cpu);
407 if (STATE_ARCHITECTURE (sd)->mach == bfd_mach_fr550)
408 {
409 if (DATA_CROSSES_CACHE_LINE (cache, address, 2))
410 return read_mem_unaligned_HI (current_cpu, pc, address);
411 }
412 cycles = frv_cache_read (cache, 0, address);
413 if (cycles != 0)
414 return CACHE_RETURN_DATA (cache, 0, address, UHI, 2);
415 }
416
417 return GETMEMUHI (current_cpu, pc, address);
418 }
419
420 /* Read a SI which spans two cache lines */
421 static SI
422 read_mem_unaligned_SI (SIM_CPU *current_cpu, IADDR pc, SI address)
423 {
424 FRV_CACHE *cache = CPU_DATA_CACHE (current_cpu);
425 unsigned hi_len = cache->line_size - (address & (cache->line_size - 1));
426 char valarray[4];
427 SI SIvalue;
428 HI HIvalue;
429
430 switch (hi_len)
431 {
432 case 1:
433 valarray[0] = frvbf_read_mem_QI (current_cpu, pc, address);
434 SIvalue = frvbf_read_mem_SI (current_cpu, pc, address + 1);
435 SIvalue = H2T_4 (SIvalue);
436 memcpy (valarray + 1, (char*)&SIvalue, 3);
437 break;
438 case 2:
439 HIvalue = frvbf_read_mem_HI (current_cpu, pc, address);
440 HIvalue = H2T_2 (HIvalue);
441 memcpy (valarray, (char*)&HIvalue, 2);
442 HIvalue = frvbf_read_mem_HI (current_cpu, pc, address + 2);
443 HIvalue = H2T_2 (HIvalue);
444 memcpy (valarray + 2, (char*)&HIvalue, 2);
445 break;
446 case 3:
447 SIvalue = frvbf_read_mem_SI (current_cpu, pc, address - 1);
448 SIvalue = H2T_4 (SIvalue);
449 memcpy (valarray, (char*)&SIvalue, 3);
450 valarray[3] = frvbf_read_mem_QI (current_cpu, pc, address + 3);
451 break;
452 default:
453 abort (); /* can't happen */
454 }
455 return T2H_4 (*(SI*)valarray);
456 }
457
458 SI
459 frvbf_read_mem_SI (SIM_CPU *current_cpu, IADDR pc, SI address)
460 {
461 FRV_CACHE *cache;
462 USI hsr0;
463
464 /* Check for access exceptions. */
465 address = check_data_read_address (current_cpu, address, 3);
466 address = check_readwrite_address (current_cpu, address, 3);
467
468 hsr0 = GET_HSR0 ();
469 cache = CPU_DATA_CACHE (current_cpu);
470 /* If we need to count cycles, then the cache operation will be
471 initiated from the model profiling functions.
472 See frvbf_model_.... */
473 if (model_insn)
474 {
475 CPU_LOAD_ADDRESS (current_cpu) = address;
476 CPU_LOAD_LENGTH (current_cpu) = 4;
477 return 0x37111319; /* any random value */
478 }
479
480 if (GET_HSR0_DCE (hsr0))
481 {
482 int cycles;
483 /* Handle access which crosses cache line boundary */
484 SIM_DESC sd = CPU_STATE (current_cpu);
485 if (STATE_ARCHITECTURE (sd)->mach == bfd_mach_fr550)
486 {
487 if (DATA_CROSSES_CACHE_LINE (cache, address, 4))
488 return read_mem_unaligned_SI (current_cpu, pc, address);
489 }
490 cycles = frv_cache_read (cache, 0, address);
491 if (cycles != 0)
492 return CACHE_RETURN_DATA (cache, 0, address, SI, 4);
493 }
494
495 return GETMEMSI (current_cpu, pc, address);
496 }
497
498 SI
499 frvbf_read_mem_WI (SIM_CPU *current_cpu, IADDR pc, SI address)
500 {
501 return frvbf_read_mem_SI (current_cpu, pc, address);
502 }
503
504 /* Read a SI which spans two cache lines */
505 static DI
506 read_mem_unaligned_DI (SIM_CPU *current_cpu, IADDR pc, SI address)
507 {
508 FRV_CACHE *cache = CPU_DATA_CACHE (current_cpu);
509 unsigned hi_len = cache->line_size - (address & (cache->line_size - 1));
510 DI value, value1;
511
512 switch (hi_len)
513 {
514 case 1:
515 value = frvbf_read_mem_QI (current_cpu, pc, address);
516 value <<= 56;
517 value1 = frvbf_read_mem_DI (current_cpu, pc, address + 1);
518 value1 = H2T_8 (value1);
519 value |= value1 & ((DI)0x00ffffff << 32);
520 value |= value1 & 0xffffffffu;
521 break;
522 case 2:
523 value = frvbf_read_mem_HI (current_cpu, pc, address);
524 value = H2T_2 (value);
525 value <<= 48;
526 value1 = frvbf_read_mem_DI (current_cpu, pc, address + 2);
527 value1 = H2T_8 (value1);
528 value |= value1 & ((DI)0x0000ffff << 32);
529 value |= value1 & 0xffffffffu;
530 break;
531 case 3:
532 value = frvbf_read_mem_SI (current_cpu, pc, address - 1);
533 value = H2T_4 (value);
534 value <<= 40;
535 value1 = frvbf_read_mem_DI (current_cpu, pc, address + 3);
536 value1 = H2T_8 (value1);
537 value |= value1 & ((DI)0x000000ff << 32);
538 value |= value1 & 0xffffffffu;
539 break;
540 case 4:
541 value = frvbf_read_mem_SI (current_cpu, pc, address);
542 value = H2T_4 (value);
543 value <<= 32;
544 value1 = frvbf_read_mem_SI (current_cpu, pc, address + 4);
545 value1 = H2T_4 (value1);
546 value |= value1 & 0xffffffffu;
547 break;
548 case 5:
549 value = frvbf_read_mem_DI (current_cpu, pc, address - 3);
550 value = H2T_8 (value);
551 value <<= 24;
552 value1 = frvbf_read_mem_SI (current_cpu, pc, address + 5);
553 value1 = H2T_4 (value1);
554 value |= value1 & 0x00ffffff;
555 break;
556 case 6:
557 value = frvbf_read_mem_DI (current_cpu, pc, address - 2);
558 value = H2T_8 (value);
559 value <<= 16;
560 value1 = frvbf_read_mem_HI (current_cpu, pc, address + 6);
561 value1 = H2T_2 (value1);
562 value |= value1 & 0x0000ffff;
563 break;
564 case 7:
565 value = frvbf_read_mem_DI (current_cpu, pc, address - 1);
566 value = H2T_8 (value);
567 value <<= 8;
568 value1 = frvbf_read_mem_QI (current_cpu, pc, address + 7);
569 value |= value1 & 0x000000ff;
570 break;
571 default:
572 abort (); /* can't happen */
573 }
574 return T2H_8 (value);
575 }
576
577 DI
578 frvbf_read_mem_DI (SIM_CPU *current_cpu, IADDR pc, SI address)
579 {
580 USI hsr0;
581 FRV_CACHE *cache;
582
583 /* Check for access exceptions. */
584 address = check_data_read_address (current_cpu, address, 7);
585 address = check_readwrite_address (current_cpu, address, 7);
586
587 /* If we need to count cycles, then the cache operation will be
588 initiated from the model profiling functions.
589 See frvbf_model_.... */
590 hsr0 = GET_HSR0 ();
591 cache = CPU_DATA_CACHE (current_cpu);
592 if (model_insn)
593 {
594 CPU_LOAD_ADDRESS (current_cpu) = address;
595 CPU_LOAD_LENGTH (current_cpu) = 8;
596 return 0x37111319; /* any random value */
597 }
598
599 if (GET_HSR0_DCE (hsr0))
600 {
601 int cycles;
602 /* Handle access which crosses cache line boundary */
603 SIM_DESC sd = CPU_STATE (current_cpu);
604 if (STATE_ARCHITECTURE (sd)->mach == bfd_mach_fr550)
605 {
606 if (DATA_CROSSES_CACHE_LINE (cache, address, 8))
607 return read_mem_unaligned_DI (current_cpu, pc, address);
608 }
609 cycles = frv_cache_read (cache, 0, address);
610 if (cycles != 0)
611 return CACHE_RETURN_DATA (cache, 0, address, DI, 8);
612 }
613
614 return GETMEMDI (current_cpu, pc, address);
615 }
616
617 DF
618 frvbf_read_mem_DF (SIM_CPU *current_cpu, IADDR pc, SI address)
619 {
620 USI hsr0;
621 FRV_CACHE *cache;
622
623 /* Check for access exceptions. */
624 address = check_data_read_address (current_cpu, address, 7);
625 address = check_readwrite_address (current_cpu, address, 7);
626
627 /* If we need to count cycles, then the cache operation will be
628 initiated from the model profiling functions.
629 See frvbf_model_.... */
630 hsr0 = GET_HSR0 ();
631 cache = CPU_DATA_CACHE (current_cpu);
632 if (model_insn)
633 {
634 CPU_LOAD_ADDRESS (current_cpu) = address;
635 CPU_LOAD_LENGTH (current_cpu) = 8;
636 return 0x37111319; /* any random value */
637 }
638
639 if (GET_HSR0_DCE (hsr0))
640 {
641 int cycles;
642 /* Handle access which crosses cache line boundary */
643 SIM_DESC sd = CPU_STATE (current_cpu);
644 if (STATE_ARCHITECTURE (sd)->mach == bfd_mach_fr550)
645 {
646 if (DATA_CROSSES_CACHE_LINE (cache, address, 8))
647 return read_mem_unaligned_DI (current_cpu, pc, address);
648 }
649 cycles = frv_cache_read (cache, 0, address);
650 if (cycles != 0)
651 return CACHE_RETURN_DATA (cache, 0, address, DF, 8);
652 }
653
654 return GETMEMDF (current_cpu, pc, address);
655 }
656
657 USI
658 frvbf_read_imem_USI (SIM_CPU *current_cpu, PCADDR vpc)
659 {
660 USI hsr0;
661 vpc = check_insn_read_address (current_cpu, vpc, 3);
662
663 hsr0 = GET_HSR0 ();
664 if (GET_HSR0_ICE (hsr0))
665 {
666 FRV_CACHE *cache;
667 USI value;
668
669 /* We don't want this to show up in the cache statistics. That read
670 is done in frvbf_simulate_insn_prefetch. So read the cache or memory
671 passively here. */
672 cache = CPU_INSN_CACHE (current_cpu);
673 if (frv_cache_read_passive_SI (cache, vpc, &value))
674 return value;
675 }
676 return sim_core_read_unaligned_4 (current_cpu, vpc, read_map, vpc);
677 }
678
679 static SI
680 fr400_check_write_address (SIM_CPU *current_cpu, SI address, int align_mask)
681 {
682 if (align_mask == 7
683 && address >= 0xfe800000 && address <= 0xfeffffff)
684 frv_queue_program_interrupt (current_cpu, FRV_DATA_STORE_ERROR);
685
686 return address;
687 }
688
689 static SI
690 fr500_check_write_address (SIM_CPU *current_cpu, SI address, int align_mask)
691 {
692 if (address & align_mask)
693 {
694 struct frv_interrupt_queue_element *item =
695 frv_queue_mem_address_not_aligned_interrupt (current_cpu, address);
696 /* Record the correct vliw slot with the interrupt. */
697 if (item != NULL)
698 item->slot = frv_interrupt_state.slot;
699 address &= ~align_mask;
700 }
701 if (address >= 0xfeff0600 && address <= 0xfeff7fff
702 || address >= 0xfe800000 && address <= 0xfefeffff)
703 frv_queue_program_interrupt (current_cpu, FRV_DATA_STORE_ERROR);
704
705 return address;
706 }
707
708 static SI
709 fr550_check_write_address (SIM_CPU *current_cpu, SI address, int align_mask)
710 {
711 if ((USI)address >= 0xfe800000 && (USI)address <= 0xfefeffff
712 || (align_mask > 0x3
713 && ((USI)address >= 0xfeff0000 && (USI)address <= 0xfeffffff)))
714 frv_queue_program_interrupt (current_cpu, FRV_DATA_STORE_ERROR);
715
716 return address;
717 }
718
719 static SI
720 check_write_address (SIM_CPU *current_cpu, SI address, int align_mask)
721 {
722 SIM_DESC sd = CPU_STATE (current_cpu);
723 switch (STATE_ARCHITECTURE (sd)->mach)
724 {
725 case bfd_mach_fr400:
726 address = fr400_check_write_address (current_cpu, address, align_mask);
727 break;
728 case bfd_mach_frvtomcat:
729 case bfd_mach_fr500:
730 case bfd_mach_frv:
731 address = fr500_check_write_address (current_cpu, address, align_mask);
732 break;
733 case bfd_mach_fr550:
734 address = fr550_check_write_address (current_cpu, address, align_mask);
735 break;
736 default:
737 break;
738 }
739 return address;
740 }
741
742 void
743 frvbf_write_mem_QI (SIM_CPU *current_cpu, IADDR pc, SI address, QI value)
744 {
745 USI hsr0;
746 hsr0 = GET_HSR0 ();
747 if (GET_HSR0_DCE (hsr0))
748 sim_queue_fn_mem_qi_write (current_cpu, frvbf_mem_set_QI, address, value);
749 else
750 sim_queue_mem_qi_write (current_cpu, address, value);
751 frv_set_write_queue_slot (current_cpu);
752 }
753
754 void
755 frvbf_write_mem_UQI (SIM_CPU *current_cpu, IADDR pc, SI address, UQI value)
756 {
757 frvbf_write_mem_QI (current_cpu, pc, address, value);
758 }
759
760 void
761 frvbf_write_mem_HI (SIM_CPU *current_cpu, IADDR pc, SI address, HI value)
762 {
763 USI hsr0;
764 hsr0 = GET_HSR0 ();
765 if (GET_HSR0_DCE (hsr0))
766 sim_queue_fn_mem_hi_write (current_cpu, frvbf_mem_set_HI, address, value);
767 else
768 sim_queue_mem_hi_write (current_cpu, address, value);
769 frv_set_write_queue_slot (current_cpu);
770 }
771
772 void
773 frvbf_write_mem_UHI (SIM_CPU *current_cpu, IADDR pc, SI address, UHI value)
774 {
775 frvbf_write_mem_HI (current_cpu, pc, address, value);
776 }
777
778 void
779 frvbf_write_mem_SI (SIM_CPU *current_cpu, IADDR pc, SI address, SI value)
780 {
781 USI hsr0;
782 hsr0 = GET_HSR0 ();
783 if (GET_HSR0_DCE (hsr0))
784 sim_queue_fn_mem_si_write (current_cpu, frvbf_mem_set_SI, address, value);
785 else
786 sim_queue_mem_si_write (current_cpu, address, value);
787 frv_set_write_queue_slot (current_cpu);
788 }
789
790 void
791 frvbf_write_mem_WI (SIM_CPU *current_cpu, IADDR pc, SI address, SI value)
792 {
793 frvbf_write_mem_SI (current_cpu, pc, address, value);
794 }
795
796 void
797 frvbf_write_mem_DI (SIM_CPU *current_cpu, IADDR pc, SI address, DI value)
798 {
799 USI hsr0;
800 hsr0 = GET_HSR0 ();
801 if (GET_HSR0_DCE (hsr0))
802 sim_queue_fn_mem_di_write (current_cpu, frvbf_mem_set_DI, address, value);
803 else
804 sim_queue_mem_di_write (current_cpu, address, value);
805 frv_set_write_queue_slot (current_cpu);
806 }
807
808 void
809 frvbf_write_mem_DF (SIM_CPU *current_cpu, IADDR pc, SI address, DF value)
810 {
811 USI hsr0;
812 hsr0 = GET_HSR0 ();
813 if (GET_HSR0_DCE (hsr0))
814 sim_queue_fn_mem_df_write (current_cpu, frvbf_mem_set_DF, address, value);
815 else
816 sim_queue_mem_df_write (current_cpu, address, value);
817 frv_set_write_queue_slot (current_cpu);
818 }
819
820 /* Memory writes. These do the actual writing through the cache. */
821 void
822 frvbf_mem_set_QI (SIM_CPU *current_cpu, IADDR pc, SI address, QI value)
823 {
824 FRV_CACHE *cache = CPU_DATA_CACHE (current_cpu);
825
826 /* Check for access errors. */
827 address = check_write_address (current_cpu, address, 0);
828 address = check_readwrite_address (current_cpu, address, 0);
829
830 /* If we need to count cycles, then submit the write request to the cache
831 and let it prioritize the request. Otherwise perform the write now. */
832 if (model_insn)
833 {
834 int slot = UNIT_I0;
835 frv_cache_request_store (cache, address, slot, (char *)&value,
836 sizeof (value));
837 }
838 else
839 frv_cache_write (cache, address, (char *)&value, sizeof (value));
840 }
841
842 /* Write a HI which spans two cache lines */
843 static void
844 mem_set_unaligned_HI (SIM_CPU *current_cpu, IADDR pc, SI address, HI value)
845 {
846 FRV_CACHE *cache = CPU_DATA_CACHE (current_cpu);
847 /* value is already in target byte order */
848 frv_cache_write (cache, address, (char *)&value, 1);
849 frv_cache_write (cache, address + 1, ((char *)&value + 1), 1);
850 }
851
852 void
853 frvbf_mem_set_HI (SIM_CPU *current_cpu, IADDR pc, SI address, HI value)
854 {
855 FRV_CACHE *cache;
856
857 /* Check for access errors. */
858 address = check_write_address (current_cpu, address, 1);
859 address = check_readwrite_address (current_cpu, address, 1);
860
861 /* If we need to count cycles, then submit the write request to the cache
862 and let it prioritize the request. Otherwise perform the write now. */
863 value = H2T_2 (value);
864 cache = CPU_DATA_CACHE (current_cpu);
865 if (model_insn)
866 {
867 int slot = UNIT_I0;
868 frv_cache_request_store (cache, address, slot,
869 (char *)&value, sizeof (value));
870 }
871 else
872 {
873 /* Handle access which crosses cache line boundary */
874 SIM_DESC sd = CPU_STATE (current_cpu);
875 if (STATE_ARCHITECTURE (sd)->mach == bfd_mach_fr550)
876 {
877 if (DATA_CROSSES_CACHE_LINE (cache, address, 2))
878 {
879 mem_set_unaligned_HI (current_cpu, pc, address, value);
880 return;
881 }
882 }
883 frv_cache_write (cache, address, (char *)&value, sizeof (value));
884 }
885 }
886
887 /* Write a SI which spans two cache lines */
888 static void
889 mem_set_unaligned_SI (SIM_CPU *current_cpu, IADDR pc, SI address, SI value)
890 {
891 FRV_CACHE *cache = CPU_DATA_CACHE (current_cpu);
892 unsigned hi_len = cache->line_size - (address & (cache->line_size - 1));
893 /* value is already in target byte order */
894 frv_cache_write (cache, address, (char *)&value, hi_len);
895 frv_cache_write (cache, address + hi_len, (char *)&value + hi_len, 4 - hi_len);
896 }
897
898 void
899 frvbf_mem_set_SI (SIM_CPU *current_cpu, IADDR pc, SI address, SI value)
900 {
901 FRV_CACHE *cache;
902
903 /* Check for access errors. */
904 address = check_write_address (current_cpu, address, 3);
905 address = check_readwrite_address (current_cpu, address, 3);
906
907 /* If we need to count cycles, then submit the write request to the cache
908 and let it prioritize the request. Otherwise perform the write now. */
909 cache = CPU_DATA_CACHE (current_cpu);
910 value = H2T_4 (value);
911 if (model_insn)
912 {
913 int slot = UNIT_I0;
914 frv_cache_request_store (cache, address, slot,
915 (char *)&value, sizeof (value));
916 }
917 else
918 {
919 /* Handle access which crosses cache line boundary */
920 SIM_DESC sd = CPU_STATE (current_cpu);
921 if (STATE_ARCHITECTURE (sd)->mach == bfd_mach_fr550)
922 {
923 if (DATA_CROSSES_CACHE_LINE (cache, address, 4))
924 {
925 mem_set_unaligned_SI (current_cpu, pc, address, value);
926 return;
927 }
928 }
929 frv_cache_write (cache, address, (char *)&value, sizeof (value));
930 }
931 }
932
933 /* Write a DI which spans two cache lines */
934 static void
935 mem_set_unaligned_DI (SIM_CPU *current_cpu, IADDR pc, SI address, DI value)
936 {
937 FRV_CACHE *cache = CPU_DATA_CACHE (current_cpu);
938 unsigned hi_len = cache->line_size - (address & (cache->line_size - 1));
939 /* value is already in target byte order */
940 frv_cache_write (cache, address, (char *)&value, hi_len);
941 frv_cache_write (cache, address + hi_len, (char *)&value + hi_len, 8 - hi_len);
942 }
943
944 void
945 frvbf_mem_set_DI (SIM_CPU *current_cpu, IADDR pc, SI address, DI value)
946 {
947 FRV_CACHE *cache;
948
949 /* Check for access errors. */
950 address = check_write_address (current_cpu, address, 7);
951 address = check_readwrite_address (current_cpu, address, 7);
952
953 /* If we need to count cycles, then submit the write request to the cache
954 and let it prioritize the request. Otherwise perform the write now. */
955 value = H2T_8 (value);
956 cache = CPU_DATA_CACHE (current_cpu);
957 if (model_insn)
958 {
959 int slot = UNIT_I0;
960 frv_cache_request_store (cache, address, slot,
961 (char *)&value, sizeof (value));
962 }
963 else
964 {
965 /* Handle access which crosses cache line boundary */
966 SIM_DESC sd = CPU_STATE (current_cpu);
967 if (STATE_ARCHITECTURE (sd)->mach == bfd_mach_fr550)
968 {
969 if (DATA_CROSSES_CACHE_LINE (cache, address, 8))
970 {
971 mem_set_unaligned_DI (current_cpu, pc, address, value);
972 return;
973 }
974 }
975 frv_cache_write (cache, address, (char *)&value, sizeof (value));
976 }
977 }
978
979 void
980 frvbf_mem_set_DF (SIM_CPU *current_cpu, IADDR pc, SI address, DF value)
981 {
982 FRV_CACHE *cache;
983
984 /* Check for access errors. */
985 address = check_write_address (current_cpu, address, 7);
986 address = check_readwrite_address (current_cpu, address, 7);
987
988 /* If we need to count cycles, then submit the write request to the cache
989 and let it prioritize the request. Otherwise perform the write now. */
990 value = H2T_8 (value);
991 cache = CPU_DATA_CACHE (current_cpu);
992 if (model_insn)
993 {
994 int slot = UNIT_I0;
995 frv_cache_request_store (cache, address, slot,
996 (char *)&value, sizeof (value));
997 }
998 else
999 {
1000 /* Handle access which crosses cache line boundary */
1001 SIM_DESC sd = CPU_STATE (current_cpu);
1002 if (STATE_ARCHITECTURE (sd)->mach == bfd_mach_fr550)
1003 {
1004 if (DATA_CROSSES_CACHE_LINE (cache, address, 8))
1005 {
1006 mem_set_unaligned_DI (current_cpu, pc, address, value);
1007 return;
1008 }
1009 }
1010 frv_cache_write (cache, address, (char *)&value, sizeof (value));
1011 }
1012 }
1013
1014 void
1015 frvbf_mem_set_XI (SIM_CPU *current_cpu, IADDR pc, SI address, SI *value)
1016 {
1017 int i;
1018 FRV_CACHE *cache;
1019
1020 /* Check for access errors. */
1021 address = check_write_address (current_cpu, address, 0xf);
1022 address = check_readwrite_address (current_cpu, address, 0xf);
1023
1024 /* TODO -- reverse word order as well? */
1025 for (i = 0; i < 4; ++i)
1026 value[i] = H2T_4 (value[i]);
1027
1028 /* If we need to count cycles, then submit the write request to the cache
1029 and let it prioritize the request. Otherwise perform the write now. */
1030 cache = CPU_DATA_CACHE (current_cpu);
1031 if (model_insn)
1032 {
1033 int slot = UNIT_I0;
1034 frv_cache_request_store (cache, address, slot, (char*)value, 16);
1035 }
1036 else
1037 frv_cache_write (cache, address, (char*)value, 16);
1038 }
1039
1040 /* Record the current VLIW slot on the element at the top of the write queue.
1041 */
1042 void
1043 frv_set_write_queue_slot (SIM_CPU *current_cpu)
1044 {
1045 FRV_VLIW *vliw = CPU_VLIW (current_cpu);
1046 int slot = vliw->next_slot - 1;
1047 CGEN_WRITE_QUEUE *q = CPU_WRITE_QUEUE (current_cpu);
1048 int ix = CGEN_WRITE_QUEUE_INDEX (q) - 1;
1049 CGEN_WRITE_QUEUE_ELEMENT *item = CGEN_WRITE_QUEUE_ELEMENT (q, ix);
1050 CGEN_WRITE_QUEUE_ELEMENT_PIPE (item) = (*vliw->current_vliw)[slot];
1051 }