* core.c: Rename to corefile.c
[binutils-gdb.git] / sim / ppc / vm.c
1 /* This file is part of the program psim.
2
3 Copyright (C) 1994-1995, Andrew Cagney <cagney@highland.com.au>
4
5 This program is free software; you can redistribute it and/or modify
6 it under the terms of the GNU General Public License as published by
7 the Free Software Foundation; either version 2 of the License, or
8 (at your option) any later version.
9
10 This program is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 GNU General Public License for more details.
14
15 You should have received a copy of the GNU General Public License
16 along with this program; if not, write to the Free Software
17 Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
18
19 */
20
21
22 #ifndef _VM_C_
23 #define _VM_C_
24
25 #ifndef STATIC_INLINE_VM
26 #define STATIC_INLINE_VM STATIC_INLINE
27 #endif
28
29
30 #include "basics.h"
31
32 #include "registers.h"
33
34 #include "device_tree.h"
35 #include "corefile.h"
36
37 #include "vm.h"
38
39 #include "interrupts.h"
40
41 #include "mon.h"
42
43 /* OEA vs VEA
44
45 For the VEA model, the VM layer is almost transparent. It's only
46 purpose is to maintain separate core_map's for the instruction
47 and data address spaces. This being so that writes to instruction
48 space or execution of a data space is prevented.
49
50 For the OEA model things are more complex. The reason for separate
51 instruction and data models becomes crucial. The OEA model is
52 built out of three parts. An instruction map, a data map and an
53 underlying structure that provides access to the VM data kept in
54 main memory. */
55
56
57 /* OEA data structures:
58
59 The OEA model maintains internal data structures that shadow the
60 semantics of the various OEA VM registers (BAT, SR, etc). This
61 allows a simple efficient model of the VM to be implemented.
62
63 Consistency between OEA registers and this model's internal data
64 structures is maintained by updating the structures at
65 `synchronization' points. Of particular note is that (at the time
66 of writing) the memory data types for BAT registers are rebuilt
67 when ever the processor moves between problem and system states */
68
69
70 /* Protection table:
71
72 Matrix of processor state, type of access and validity */
73
74 typedef enum {
75 om_supervisor_state,
76 om_problem_state,
77 nr_om_modes
78 } om_processor_modes;
79
80 typedef enum {
81 om_data_read, om_data_write,
82 om_instruction_read, om_access_any,
83 nr_om_access_types
84 } om_access_types;
85
86 static int om_valid_access[2][4][nr_om_access_types] = {
87 /* read, write, instruction, any */
88 /* K bit == 0 */
89 { /*r w i a pp */
90 { 1, 1, 1, 1 }, /* 00 */
91 { 1, 1, 1, 1 }, /* 01 */
92 { 1, 1, 1, 1 }, /* 10 */
93 { 1, 0, 1, 1 }, /* 11 */
94 },
95 /* K bit == 1 or P bit valid */
96 { /*r w i a pp */
97 { 0, 0, 0, 0 }, /* 00 */
98 { 1, 0, 1, 1 }, /* 01 */
99 { 1, 1, 1, 1 }, /* 10 */
100 { 1, 0, 1, 1 }, /* 11 */
101 }
102 };
103
104
105 /* Bat translation:
106
107 The bat data structure only contains information on valid BAT
108 translations for the current processor mode and type of access. */
109
110 typedef struct _om_bat {
111 unsigned_word block_effective_page_index;
112 unsigned_word block_effective_page_index_mask;
113 unsigned_word block_length_mask;
114 unsigned_word block_real_page_number;
115 int protection_bits;
116 } om_bat;
117
118 enum _nr_om_bat_registers {
119 nr_om_bat_registers = 4
120 };
121
122 typedef struct _om_bats {
123 int nr_valid_bat_registers;
124 om_bat bat[nr_om_bat_registers];
125 } om_bats;
126
127
128 /* Segment TLB:
129
130 In this model the 32 and 64 bit segment tables are treated in very
131 similar ways. The 32bit segment registers are treated as a
132 simplification of the 64bit segment tlb */
133
134 enum _om_segment_tlb_constants {
135 #if (WITH_TARGET_WORD_BITSIZE == 64)
136 sizeof_segment_table_entry_group = 128,
137 sizeof_segment_table_entry = 16,
138 #endif
139 om_segment_tlb_index_start_bit = 32,
140 om_segment_tlb_index_stop_bit = 35,
141 nr_om_segment_tlb_entries = 16,
142 nr_om_segment_tlb_constants
143 };
144
145 typedef struct _om_segment_tlb_entry {
146 int key[nr_om_modes];
147 om_access_types invalid_access; /* set to instruction if no_execute bit */
148 unsigned_word masked_virtual_segment_id;
149 #if (WITH_TARGET_WORD_BITSIZE == 64)
150 int is_valid;
151 unsigned_word masked_effective_segment_id;
152 #endif
153 } om_segment_tlb_entry;
154
155 typedef struct _om_segment_tlb {
156 om_segment_tlb_entry entry[nr_om_segment_tlb_entries];
157 } om_segment_tlb;
158
159
160 /* Page TLB:
161
162 This OEA model includes a small direct map Page TLB. The tlb is to
163 cut down on the need for the OEA to perform walks of the page hash
164 table. */
165
166 enum _om_page_tlb_constants {
167 om_page_tlb_index_start_bit = 46,
168 om_page_tlb_index_stop_bit = 51,
169 nr_om_page_tlb_entries = 64,
170 #if (WITH_TARGET_WORD_BITSIZE == 64)
171 sizeof_pte_group = 128,
172 sizeof_pte = 16,
173 #endif
174 #if (WITH_TARGET_WORD_BITSIZE == 32)
175 sizeof_pte_group = 64,
176 sizeof_pte = 8,
177 #endif
178 nr_om_page_tlb_constants
179 };
180
181 typedef struct _om_page_tlb_entry {
182 int valid;
183 int protection;
184 unsigned_word masked_virtual_segment_id;
185 unsigned_word masked_page;
186 unsigned_word masked_real_page_number;
187 } om_page_tlb_entry;
188
189 typedef struct _om_page_tlb {
190 om_page_tlb_entry entry[nr_om_page_tlb_entries];
191 } om_page_tlb;
192
193
194 /* memory translation:
195
196 OEA memory translation possibly involves BAT, SR, TLB and HTAB
197 information*/
198
199 typedef struct _om_map {
200
201 /* local cache of register values */
202 int is_relocate;
203 int is_problem_state;
204
205 /* block address translation */
206 om_bats *bat_registers;
207
208 /* failing that, translate ea to va using segment tlb */
209 #if (WITH_TARGET_WORD_BITSIZE == 64)
210 unsigned_word real_address_of_segment_table;
211 #endif
212 om_segment_tlb *segment_tlb;
213
214 /* then va to ra using hashed page table and tlb */
215 unsigned_word real_address_of_page_table;
216 unsigned_word page_table_hash_mask;
217 om_page_tlb *page_tlb;
218
219 /* physical memory for fetching page table entries */
220 core_map *physical;
221
222 } om_map;
223
224
225 /* VM objects:
226
227 External objects defined by vm.h */
228
229 struct _vm_instruction_map {
230 /* real memory for last part */
231 core_map *code;
232 /* translate effective to real */
233 om_map translation;
234 };
235
236 struct _vm_data_map {
237 /* translate effective to real */
238 om_map translation;
239 /* real memory for translated address */
240 core_map *read;
241 core_map *write;
242 };
243
244
245 /* VM:
246
247 Underlying memory object. For the VEA this is just the
248 core_map. For OEA it is the instruction and data memory
249 translation's */
250
251 struct _vm {
252
253 /* OEA: base address registers */
254 om_bats ibats;
255 om_bats dbats;
256
257 /* OEA: segment registers */
258 om_segment_tlb segment_tlb;
259
260 /* OEA: translation lookaside buffers */
261 om_page_tlb instruction_tlb;
262 om_page_tlb data_tlb;
263
264 /* real memory */
265 core *physical;
266
267 /* memory maps */
268 vm_instruction_map instruction_map;
269 vm_data_map data_map;
270
271 };
272
273
274 /* OEA Support procedures */
275
276
277 STATIC_INLINE_VM unsigned_word
278 om_segment_tlb_index(unsigned_word ea)
279 {
280 unsigned_word index = EXTRACTED(ea,
281 om_segment_tlb_index_start_bit,
282 om_segment_tlb_index_stop_bit);
283 return index;
284 }
285
286 STATIC_INLINE_VM unsigned_word
287 om_page_tlb_index(unsigned_word ea)
288 {
289 unsigned_word index = EXTRACTED(ea,
290 om_page_tlb_index_start_bit,
291 om_page_tlb_index_stop_bit);
292 return index;
293 }
294
295 STATIC_INLINE_VM unsigned_word
296 om_masked_page(unsigned_word ea)
297 {
298 unsigned_word masked_page = MASKED(ea, 36, 51);
299 return masked_page;
300 }
301
302 STATIC_INLINE_VM unsigned_word
303 om_masked_byte(unsigned_word ea)
304 {
305 unsigned_word masked_byte = MASKED(ea, 52, 63);
306 return masked_byte;
307 }
308
309
310
311 INLINE_VM vm *
312 vm_create(core *physical)
313 {
314 vm *virtual;
315
316 /* internal checks */
317 if (nr_om_segment_tlb_entries
318 != (1 << (om_segment_tlb_index_stop_bit
319 - om_segment_tlb_index_start_bit + 1)))
320 error("new_vm() - internal error with om_segment constants\n");
321 if (nr_om_page_tlb_entries
322 != (1 << (om_page_tlb_index_stop_bit
323 - om_page_tlb_index_start_bit + 1)))
324 error("new_vm() - internal error with om_page constants\n");
325
326 /* create the new vm register file */
327 virtual = ZALLOC(vm);
328
329 /* set up core */
330 virtual->physical = physical;
331
332 /* set up the address decoders */
333 virtual->instruction_map.translation.bat_registers = &virtual->ibats;
334 virtual->instruction_map.translation.segment_tlb = &virtual->segment_tlb;
335 virtual->instruction_map.translation.page_tlb = &virtual->instruction_tlb;
336 virtual->instruction_map.translation.is_relocate = 0;
337 virtual->instruction_map.translation.is_problem_state = 0;
338 virtual->instruction_map.translation.physical = core_readable(physical);
339 virtual->instruction_map.code = core_readable(physical);
340
341 virtual->data_map.translation.bat_registers = &virtual->dbats;
342 virtual->data_map.translation.segment_tlb = &virtual->segment_tlb;
343 virtual->data_map.translation.page_tlb = &virtual->data_tlb;
344 virtual->data_map.translation.is_relocate = 0;
345 virtual->data_map.translation.is_problem_state = 0;
346 virtual->data_map.translation.physical = core_readable(physical);
347 virtual->data_map.read = core_readable(physical);
348 virtual->data_map.write = core_writeable(physical);
349
350 return virtual;
351 }
352
353
354 STATIC_INLINE_VM om_bat *
355 om_effective_to_bat(om_map *map,
356 unsigned_word ea)
357 {
358 int curr_bat = 0;
359 om_bats *bats = map->bat_registers;
360 int nr_bats = bats->nr_valid_bat_registers;
361
362 for (curr_bat = 0; curr_bat < nr_bats; curr_bat++) {
363 om_bat *bat = bats->bat + curr_bat;
364 if ((ea & bat->block_effective_page_index_mask)
365 != bat->block_effective_page_index)
366 continue;
367 return bat;
368 }
369
370 return NULL;
371 }
372
373
374 STATIC_INLINE_VM om_segment_tlb_entry *
375 om_effective_to_virtual(om_map *map,
376 unsigned_word ea,
377 cpu *processor,
378 unsigned_word cia)
379 {
380 /* first try the segment tlb */
381 om_segment_tlb_entry *segment_tlb_entry = (map->segment_tlb->entry
382 + om_segment_tlb_index(ea));
383
384 #if (WITH_TARGET_WORD_BITSIZE == 32)
385 return segment_tlb_entry;
386 #endif
387
388 #if (WITH_TARGET_WORD_BITSIZE == 64)
389 if (segment_tlb_entry->is_valid
390 && (segment_tlb_entry->masked_effective_segment_id == MASKED(ea, 0, 35))) {
391 error("fixme - is there a need to update any bits\n");
392 return segment_tlb_entry;
393 }
394
395 /* drats, segment tlb missed */
396 {
397 unsigned_word segment_id_hash = ea;
398 int current_hash = 0;
399 for (current_hash = 0; current_hash < 2; current_hash += 1) {
400 unsigned_word segment_table_entry_group =
401 (map->real_address_of_segment_table
402 | (MASKED64(segment_id_hash, 31, 35) >> (56-35)));
403 unsigned_word segment_table_entry;
404 for (segment_table_entry = segment_table_entry_group;
405 segment_table_entry < (segment_table_entry_group
406 + sizeof_segment_table_entry_group);
407 segment_table_entry += sizeof_segment_table_entry) {
408 /* byte order? */
409 unsigned_word segment_table_entry_dword_0 =
410 core_map_read_8(map->physical, segment_table_entry, processor, cia);
411 unsigned_word segment_table_entry_dword_1 =
412 core_map_read_8(map->physical, segment_table_entry + 8, processor, cia);
413 int is_valid = MASKED64(segment_table_entry_dword_0, 56, 56) != 0;
414 unsigned_word masked_effective_segment_id =
415 MASKED64(segment_table_entry_dword_0, 0, 35);
416 if (is_valid && masked_effective_segment_id == MASKED64(ea, 0, 35)) {
417 /* don't permit some things */
418 if (MASKED64(segment_table_entry_dword_0, 57, 57))
419 error("om_effective_to_virtual() - T=1 in STE not supported\n");
420 /* update segment tlb */
421 segment_tlb_entry->is_valid = is_valid;
422 segment_tlb_entry->masked_effective_segment_id =
423 masked_effective_segment_id;
424 segment_tlb_entry->key[om_supervisor_state] =
425 EXTRACTED64(segment_table_entry_dword_0, 58, 58);
426 segment_tlb_entry->key[om_problem_state] =
427 EXTRACTED64(segment_table_entry_dword_0, 59, 59);
428 segment_tlb_entry->invalid_access =
429 (MASKED64(segment_table_entry_dword_0, 60, 60)
430 ? om_instruction_read
431 : om_access_any);
432 segment_tlb_entry->masked_virtual_segment_id =
433 MASKED(segment_table_entry_dword_1, 0, 51);
434 return segment_tlb_entry;
435 }
436 }
437 segment_id_hash = ~segment_id_hash;
438 }
439 }
440 return NULL;
441 #endif
442 }
443
444
445
446 STATIC_INLINE_VM om_page_tlb_entry *
447 om_virtual_to_real(om_map *map,
448 unsigned_word ea,
449 om_segment_tlb_entry *segment_tlb_entry,
450 om_access_types access,
451 cpu *processor,
452 unsigned_word cia)
453 {
454 om_page_tlb_entry *page_tlb_entry = (map->page_tlb->entry
455 + om_page_tlb_index(ea));
456
457 /* is it a tlb hit? */
458 if (page_tlb_entry->valid
459 && (page_tlb_entry->masked_virtual_segment_id ==
460 segment_tlb_entry->masked_virtual_segment_id)
461 && (page_tlb_entry->masked_page == om_masked_page(ea))) {
462 error("fixme - it is not a hit if direction/update bits do not match\n");
463 return page_tlb_entry;
464 }
465
466 /* drats, it is a tlb miss */
467 {
468 unsigned_word page_hash = (segment_tlb_entry->masked_virtual_segment_id
469 ^ om_masked_page(ea));
470 int current_hash;
471 for (current_hash = 0; current_hash < 2; current_hash += 1) {
472 unsigned_word real_address_of_pte_group =
473 (map->real_address_of_page_table
474 | (page_hash & map->page_table_hash_mask));
475 unsigned_word real_address_of_pte;
476 for (real_address_of_pte = real_address_of_pte_group;
477 real_address_of_pte < (real_address_of_pte_group
478 + sizeof_pte_group);
479 real_address_of_pte += sizeof_pte) {
480 unsigned_word pte_word_0 =
481 core_map_read_word(map->physical,
482 real_address_of_pte,
483 processor, cia);
484 unsigned_word pte_word_1 =
485 core_map_read_word(map->physical,
486 real_address_of_pte + sizeof_pte / 2,
487 processor, cia);
488 error("fixme - check pte hit\n");
489 if (1) {
490 error("fixme - update the page_tlb\n");
491 page_tlb_entry->valid = 1;
492 page_tlb_entry->protection = 0;
493 page_tlb_entry->masked_virtual_segment_id = 0;
494 page_tlb_entry->masked_page = 0;
495 page_tlb_entry->masked_real_page_number = 0;
496 return page_tlb_entry;
497 }
498 }
499 page_hash = ~page_hash; /*???*/
500 }
501 }
502 return NULL;
503 }
504
505
506 static void
507 om_interrupt(cpu *processor,
508 unsigned_word cia,
509 unsigned_word ea,
510 om_access_types access,
511 storage_interrupt_reasons reason)
512 {
513 switch (access) {
514 case om_data_read:
515 data_storage_interrupt(processor, cia, ea, reason, 0/*!is_store*/);
516 break;
517 case om_data_write:
518 data_storage_interrupt(processor, cia, ea, reason, 1/*is_store*/);
519 break;
520 case om_instruction_read:
521 instruction_storage_interrupt(processor, cia, reason);
522 break;
523 default:
524 error("om_interrupt - unexpected access type %d, cia=0x%x, ea=0x%x\n",
525 access, cia, ea);
526 }
527 }
528
529
530 STATIC_INLINE_VM unsigned_word
531 om_translate_effective_to_real(om_map *map,
532 unsigned_word ea,
533 om_access_types access,
534 cpu *processor,
535 unsigned_word cia,
536 int abort)
537 {
538 om_bat *bat = NULL;
539 om_segment_tlb_entry *segment_tlb_entry = NULL;
540 om_page_tlb_entry *page_tlb_entry = NULL;
541 unsigned_word ra;
542
543 if (!map->is_relocate) {
544 ra = ea;
545 TRACE(trace_vm, ("%s, direct map, ea=0x%x\n",
546 "om_translate_effective_to_real",
547 ea));
548 return ra;
549 }
550
551 /* match with BAT? */
552 bat = om_effective_to_bat(map, ea);
553 if (bat != NULL) {
554 if (!om_valid_access[1][bat->protection_bits][access]) {
555 TRACE(trace_vm, ("%s, bat protection violation, ea=0x%x\n",
556 "om_translate_effective_to_real",
557 ea));
558 if (abort)
559 om_interrupt(processor, cia, ea, access,
560 protection_violation_storage_interrupt);
561 else
562 return MASK(0, 63);
563 }
564
565 ra = ((ea & bat->block_length_mask) | bat->block_real_page_number);
566 TRACE(trace_vm, ("%s, bat translation, ea=0x%x, ra=0x%x\n",
567 "om_translate_effective_to_real",
568 ea, ra));
569 return ra;
570 }
571
572 /* translate ea to va using segment map */
573 segment_tlb_entry = om_effective_to_virtual(map, ea, processor, cia);
574 #if (WITH_TARGET_WORD_BITSIZE == 64)
575 if (segment_tlb_entry == NULL) {
576 TRACE(trace_vm, ("%s, segment tlb lookup failed - ea=0x%x\n",
577 "om_translate_effective_to_real",
578 ea));
579 if (abort)
580 om_interrupt(processor, cia, ea, access,
581 segment_table_miss_storage_interrupt);
582 else
583 return MASK(0, 63);
584 }
585 #endif
586 /* check for invalid segment access type */
587 if (segment_tlb_entry->invalid_access == access) {
588 TRACE(trace_vm, ("%s, segment tlb access invalid - ea=0x%x\n",
589 "om_translate_effective_to_real",
590 ea));
591 if (abort)
592 om_interrupt(processor, cia, ea, access,
593 protection_violation_storage_interrupt);
594 else
595 return MASK(0, 63);
596 }
597
598 /* lookup in PTE */
599 page_tlb_entry = om_virtual_to_real(map, ea, segment_tlb_entry,
600 access,
601 processor, cia);
602 if (page_tlb_entry == NULL) {
603 TRACE(trace_vm, ("%s, page tlb lookup failed - ea=0x%x\n",
604 "om_translate_effective_to_real",
605 ea));
606 if (abort)
607 om_interrupt(processor, cia, ea, access,
608 hash_table_miss_storage_interrupt);
609 else
610 return MASK(0, 63);
611 }
612 if (!(om_valid_access
613 [segment_tlb_entry->key[map->is_problem_state]]
614 [page_tlb_entry->protection]
615 [access])) {
616 TRACE(trace_vm, ("%s, page tlb access invalid - ea=0x%x\n",
617 "om_translate_effective_to_real",
618 ea));
619 if (abort)
620 om_interrupt(processor, cia, ea, access,
621 protection_violation_storage_interrupt);
622 else
623 return MASK(0, 63);
624 }
625
626 ra = (page_tlb_entry->masked_real_page_number
627 | om_masked_byte(ea));
628 TRACE(trace_vm, ("%s, page - ea=0x%x, ra=0x%x\n",
629 "om_translate_effective_to_real",
630 ea, ra));
631 return ra;
632 }
633
634
635 /*
636 * Definition of operations for memory management
637 */
638
639
640 /* rebuild all the relevant bat information */
641 STATIC_INLINE_VM void
642 om_unpack_bat(om_bat *bat,
643 spreg ubat,
644 spreg lbat)
645 {
646 /* for extracting out the offset within a page */
647 bat->block_length_mask = ((MASKED(ubat, 51, 61) << (17-2))
648 | MASK(63-17+1, 63));
649
650 /* for checking the effective page index */
651 bat->block_effective_page_index = MASKED(ubat, 0, 46);
652 bat->block_effective_page_index_mask = ~bat->block_length_mask;
653
654 /* protection information */
655 bat->protection_bits = EXTRACTED(lbat, 62, 63);
656 bat->block_real_page_number = MASKED(lbat, 0, 46);
657 }
658
659
660 /* rebuild the given bat table */
661 STATIC_INLINE_VM void
662 om_unpack_bats(om_bats *bats,
663 spreg *raw_bats,
664 msreg msr)
665 {
666 int i;
667 bats->nr_valid_bat_registers = 0;
668 for (i = 0; i < nr_om_bat_registers*2; i += 2) {
669 spreg ubat = raw_bats[i];
670 spreg lbat = raw_bats[i+1];
671 if ((msr & msr_problem_state)
672 ? EXTRACTED(ubat, 62, 62)
673 : EXTRACTED(ubat, 63, 63)) {
674 om_unpack_bat(&bats->bat[bats->nr_valid_bat_registers],
675 ubat, lbat);
676 bats->nr_valid_bat_registers += 1;
677 }
678 }
679 }
680
681
682 #if (WITH_TARGET_WORD_BITSIZE == 32)
683 STATIC_INLINE_VM void
684 om_unpack_sr(vm *virtual,
685 sreg *srs,
686 int which_sr)
687 {
688 om_segment_tlb_entry *segment_tlb_entry = 0;
689 sreg new_sr_value = 0;
690
691 /* check register in range */
692 if (which_sr < 0 || which_sr > nr_om_segment_tlb_entries)
693 error("om_set_sr: segment register out of bounds\n");
694
695 /* get the working values */
696 segment_tlb_entry = &virtual->segment_tlb.entry[which_sr];
697 new_sr_value = srs[which_sr];
698
699 /* do we support this */
700 if (MASKED32(new_sr_value, 0, 0))
701 error("om_ser_sr(): unsupported value of T in segment register %d\n",
702 which_sr);
703
704 /* update info */
705 segment_tlb_entry->key[om_supervisor_state] = EXTRACTED32(new_sr_value, 1, 1);
706 segment_tlb_entry->key[om_problem_state] = EXTRACTED32(new_sr_value, 2, 2);
707 segment_tlb_entry->invalid_access = (MASKED32(new_sr_value, 3, 3)
708 ? om_instruction_read
709 : om_access_any);
710 segment_tlb_entry->masked_virtual_segment_id = MASKED32(new_sr_value, 8, 31);
711 }
712 #endif
713
714
715 #if (WITH_TARGET_WORD_BITSIZE == 32)
716 STATIC_INLINE_VM void
717 om_unpack_srs(vm *virtual,
718 sreg *srs)
719 {
720 int which_sr;
721 for (which_sr = 0; which_sr < nr_om_segment_tlb_entries; which_sr++) {
722 om_unpack_sr(virtual, srs, which_sr);
723 }
724 }
725 #endif
726
727
728 /* Rebuild all the data structures for the new context as specifed by
729 the passed registers */
730 INLINE_VM void
731 vm_synchronize_context(vm *virtual,
732 spreg *sprs,
733 sreg *srs,
734 msreg msr)
735 {
736
737 /* enable/disable translation */
738 int problem_state = (msr & msr_problem_state) != 0;
739 int data_relocate = (msr & msr_data_relocate) != 0;
740 int instruction_relocate = (msr & msr_instruction_relocate) != 0;
741
742 unsigned_word page_table_hash_mask;
743 unsigned_word real_address_of_page_table;
744
745
746 /* update current processor mode */
747 virtual->instruction_map.translation.is_relocate = instruction_relocate;
748 virtual->instruction_map.translation.is_problem_state = problem_state;
749 virtual->data_map.translation.is_relocate = data_relocate;
750 virtual->data_map.translation.is_problem_state = problem_state;
751
752
753 /* update bat registers for the new context */
754 om_unpack_bats(&virtual->ibats, &sprs[spr_ibat0u], msr);
755 om_unpack_bats(&virtual->dbats, &sprs[spr_dbat0u], msr);
756
757
758 /* unpack SDR1 - the storage description register 1 */
759 #if (WITH_TARGET_WORD_BITSIZE == 64)
760 real_address_of_page_table = EXTRACTED64(sprs[spr_sdr1], 0, 45);
761 page_table_hash_mask = MASK64(47-EXTRACTED64(sprs[spr_sdr1], 59, 63),
762 57);
763 #endif
764 #if (WITH_TARGET_WORD_BITSIZE == 32)
765 real_address_of_page_table = EXTRACTED32(sprs[spr_sdr1], 0, 15);
766 page_table_hash_mask = ((EXTRACTED32(sprs[spr_sdr1], 23, 31) << (10+6))
767 | MASK32(16, 25));
768 #endif
769 virtual->instruction_map.translation.real_address_of_page_table = real_address_of_page_table;
770 virtual->instruction_map.translation.page_table_hash_mask = page_table_hash_mask;
771 virtual->data_map.translation.real_address_of_page_table = real_address_of_page_table;
772 virtual->data_map.translation.page_table_hash_mask = page_table_hash_mask;
773
774
775 #if (WITH_TARGET_WORD_BITSIZE == 32)
776 /* unpack the segment tlb registers */
777 om_unpack_srs(virtual, srs);
778 #endif
779 }
780
781
782 INLINE_VM vm_data_map *
783 vm_create_data_map(vm *memory)
784 {
785 return &memory->data_map;
786 }
787
788
789 INLINE_VM vm_instruction_map *
790 vm_create_instruction_map(vm *memory)
791 {
792 return &memory->instruction_map;
793 }
794
795
796 STATIC_INLINE_VM unsigned_word
797 vm_translate(om_map *map,
798 unsigned_word ea,
799 om_access_types access,
800 cpu *processor,
801 unsigned_word cia,
802 int abort)
803 {
804 switch (CURRENT_ENVIRONMENT) {
805 case USER_ENVIRONMENT:
806 case VIRTUAL_ENVIRONMENT:
807 return ea;
808 case OPERATING_ENVIRONMENT:
809 return om_translate_effective_to_real(map, ea, access,
810 processor, cia,
811 abort);
812 default:
813 error("vm_translate() - unknown environment\n");
814 return 0;
815 }
816 }
817
818
819 INLINE_VM unsigned_word
820 vm_real_data_addr(vm_data_map *map,
821 unsigned_word ea,
822 int is_read,
823 cpu *processor,
824 unsigned_word cia)
825 {
826 return vm_translate(&map->translation,
827 ea,
828 is_read ? om_data_read : om_data_write,
829 processor,
830 cia,
831 1); /*abort*/
832 }
833
834
835 INLINE_VM unsigned_word
836 vm_real_instruction_addr(vm_instruction_map *map,
837 cpu *processor,
838 unsigned_word cia)
839 {
840 return vm_translate(&map->translation,
841 cia,
842 om_instruction_read,
843 processor,
844 cia,
845 1); /*abort*/
846 }
847
848 INLINE_VM instruction_word
849 vm_instruction_map_read(vm_instruction_map *map,
850 cpu *processor,
851 unsigned_word cia)
852 {
853 unsigned_word ra = vm_real_instruction_addr(map, processor, cia);
854 ASSERT((cia & 0x3) == 0); /* always aligned */
855 return core_map_read_4(map->code, ra, processor, cia);
856 }
857
858
859 INLINE_VM int
860 vm_data_map_read_buffer(vm_data_map *map,
861 void *target,
862 unsigned_word addr,
863 unsigned nr_bytes)
864 {
865 unsigned count;
866 for (count = 0; count < nr_bytes; count++) {
867 unsigned_1 byte;
868 unsigned_word ea = addr + count;
869 unsigned_word ra = vm_translate(&map->translation,
870 ea, om_data_read,
871 NULL, /*processor*/
872 0, /*cia*/
873 0); /*dont-abort*/
874 if (ra == MASK(0, 63))
875 break;
876 if (core_map_read_buffer(map->read, &byte, ea, sizeof(byte))
877 != sizeof(byte))
878 break;
879 ((unsigned_1*)target)[count] = T2H_1(byte);
880 }
881 return count;
882 }
883
884
885 INLINE_VM int
886 vm_data_map_write_buffer(vm_data_map *map,
887 const void *source,
888 unsigned_word addr,
889 unsigned nr_bytes,
890 int violate_read_only_section)
891 {
892 unsigned count;
893 unsigned_1 byte;
894 for (count = 0; count < nr_bytes; count++) {
895 unsigned_word ea = addr + count;
896 unsigned_word ra = vm_translate(&map->translation,
897 ea, om_data_write,
898 NULL/*processor*/,
899 0, /*cia*/
900 0); /*dont-abort*/
901 if (ra == MASK(0, 63))
902 break;
903 byte = T2H_1(((unsigned_1*)source)[count]);
904 if (core_map_write_buffer((violate_read_only_section
905 ? map->read
906 : map->write),
907 &byte, ra, sizeof(byte)) != sizeof(byte))
908 break;
909 }
910 return count;
911 }
912
913
914 /* define the read/write 1/2/4/8/word functions */
915
916 #undef N
917 #define N 1
918 #include "vm_n.h"
919
920 #undef N
921 #define N 2
922 #include "vm_n.h"
923
924 #undef N
925 #define N 4
926 #include "vm_n.h"
927
928 #undef N
929 #define N 8
930 #include "vm_n.h"
931
932 #undef N
933 #define N word
934 #include "vm_n.h"
935
936
937
938 #endif /* _VM_C_ */