1 /* This file is part of the program psim.
3 Copyright (C) 1994-1995, Andrew Cagney <cagney@highland.com.au>
5 This program is free software; you can redistribute it and/or modify
6 it under the terms of the GNU General Public License as published by
7 the Free Software Foundation; either version 2 of the License, or
8 (at your option) any later version.
10 This program is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 GNU General Public License for more details.
15 You should have received a copy of the GNU General Public License
16 along with this program; if not, write to the Free Software
17 Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
25 #ifndef STATIC_INLINE_VM
26 #define STATIC_INLINE_VM STATIC_INLINE
32 #include "registers.h"
34 #include "device_tree.h"
39 #include "interrupts.h"
45 For the VEA model, the VM layer is almost transparent. It's only
46 purpose is to maintain separate core_map's for the instruction
47 and data address spaces. This being so that writes to instruction
48 space or execution of a data space is prevented.
50 For the OEA model things are more complex. The reason for separate
51 instruction and data models becomes crucial. The OEA model is
52 built out of three parts. An instruction map, a data map and an
53 underlying structure that provides access to the VM data kept in
57 /* OEA data structures:
59 The OEA model maintains internal data structures that shadow the
60 semantics of the various OEA VM registers (BAT, SR, etc). This
61 allows a simple efficient model of the VM to be implemented.
63 Consistency between OEA registers and this model's internal data
64 structures is maintained by updating the structures at
65 `synchronization' points. Of particular note is that (at the time
66 of writing) the memory data types for BAT registers are rebuilt
67 when ever the processor moves between problem and system states */
72 Matrix of processor state, type of access and validity */
81 om_data_read
, om_data_write
,
82 om_instruction_read
, om_access_any
,
86 static int om_valid_access
[2][4][nr_om_access_types
] = {
87 /* read, write, instruction, any */
90 { 1, 1, 1, 1 }, /* 00 */
91 { 1, 1, 1, 1 }, /* 01 */
92 { 1, 1, 1, 1 }, /* 10 */
93 { 1, 0, 1, 1 }, /* 11 */
95 /* K bit == 1 or P bit valid */
97 { 0, 0, 0, 0 }, /* 00 */
98 { 1, 0, 1, 1 }, /* 01 */
99 { 1, 1, 1, 1 }, /* 10 */
100 { 1, 0, 1, 1 }, /* 11 */
107 The bat data structure only contains information on valid BAT
108 translations for the current processor mode and type of access. */
110 typedef struct _om_bat
{
111 unsigned_word block_effective_page_index
;
112 unsigned_word block_effective_page_index_mask
;
113 unsigned_word block_length_mask
;
114 unsigned_word block_real_page_number
;
118 enum _nr_om_bat_registers
{
119 nr_om_bat_registers
= 4
122 typedef struct _om_bats
{
123 int nr_valid_bat_registers
;
124 om_bat bat
[nr_om_bat_registers
];
130 In this model the 32 and 64 bit segment tables are treated in very
131 similar ways. The 32bit segment registers are treated as a
132 simplification of the 64bit segment tlb */
134 enum _om_segment_tlb_constants
{
135 #if (WITH_TARGET_WORD_BITSIZE == 64)
136 sizeof_segment_table_entry_group
= 128,
137 sizeof_segment_table_entry
= 16,
139 om_segment_tlb_index_start_bit
= 32,
140 om_segment_tlb_index_stop_bit
= 35,
141 nr_om_segment_tlb_entries
= 16,
142 nr_om_segment_tlb_constants
145 typedef struct _om_segment_tlb_entry
{
146 int key
[nr_om_modes
];
147 om_access_types invalid_access
; /* set to instruction if no_execute bit */
148 unsigned_word masked_virtual_segment_id
;
149 #if (WITH_TARGET_WORD_BITSIZE == 64)
151 unsigned_word masked_effective_segment_id
;
153 } om_segment_tlb_entry
;
155 typedef struct _om_segment_tlb
{
156 om_segment_tlb_entry entry
[nr_om_segment_tlb_entries
];
162 This OEA model includes a small direct map Page TLB. The tlb is to
163 cut down on the need for the OEA to perform walks of the page hash
166 enum _om_page_tlb_constants
{
167 om_page_tlb_index_start_bit
= 46,
168 om_page_tlb_index_stop_bit
= 51,
169 nr_om_page_tlb_entries
= 64,
170 #if (WITH_TARGET_WORD_BITSIZE == 64)
171 sizeof_pte_group
= 128,
174 #if (WITH_TARGET_WORD_BITSIZE == 32)
175 sizeof_pte_group
= 64,
178 nr_om_page_tlb_constants
181 typedef struct _om_page_tlb_entry
{
184 unsigned_word masked_virtual_segment_id
;
185 unsigned_word masked_page
;
186 unsigned_word masked_real_page_number
;
189 typedef struct _om_page_tlb
{
190 om_page_tlb_entry entry
[nr_om_page_tlb_entries
];
194 /* memory translation:
196 OEA memory translation possibly involves BAT, SR, TLB and HTAB
199 typedef struct _om_map
{
201 /* local cache of register values */
203 int is_problem_state
;
205 /* block address translation */
206 om_bats
*bat_registers
;
208 /* failing that, translate ea to va using segment tlb */
209 #if (WITH_TARGET_WORD_BITSIZE == 64)
210 unsigned_word real_address_of_segment_table
;
212 om_segment_tlb
*segment_tlb
;
214 /* then va to ra using hashed page table and tlb */
215 unsigned_word real_address_of_page_table
;
216 unsigned_word page_table_hash_mask
;
217 om_page_tlb
*page_tlb
;
219 /* physical memory for fetching page table entries */
227 External objects defined by vm.h */
229 struct _vm_instruction_map
{
230 /* real memory for last part */
232 /* translate effective to real */
236 struct _vm_data_map
{
237 /* translate effective to real */
239 /* real memory for translated address */
247 Underlying memory object. For the VEA this is just the
248 core_map. For OEA it is the instruction and data memory
253 /* OEA: base address registers */
257 /* OEA: segment registers */
258 om_segment_tlb segment_tlb
;
260 /* OEA: translation lookaside buffers */
261 om_page_tlb instruction_tlb
;
262 om_page_tlb data_tlb
;
268 vm_instruction_map instruction_map
;
269 vm_data_map data_map
;
274 /* OEA Support procedures */
277 STATIC_INLINE_VM unsigned_word
278 om_segment_tlb_index(unsigned_word ea
)
280 unsigned_word index
= EXTRACTED(ea
,
281 om_segment_tlb_index_start_bit
,
282 om_segment_tlb_index_stop_bit
);
286 STATIC_INLINE_VM unsigned_word
287 om_page_tlb_index(unsigned_word ea
)
289 unsigned_word index
= EXTRACTED(ea
,
290 om_page_tlb_index_start_bit
,
291 om_page_tlb_index_stop_bit
);
295 STATIC_INLINE_VM unsigned_word
296 om_masked_page(unsigned_word ea
)
298 unsigned_word masked_page
= MASKED(ea
, 36, 51);
302 STATIC_INLINE_VM unsigned_word
303 om_masked_byte(unsigned_word ea
)
305 unsigned_word masked_byte
= MASKED(ea
, 52, 63);
312 vm_create(core
*physical
)
316 /* internal checks */
317 if (nr_om_segment_tlb_entries
318 != (1 << (om_segment_tlb_index_stop_bit
319 - om_segment_tlb_index_start_bit
+ 1)))
320 error("new_vm() - internal error with om_segment constants\n");
321 if (nr_om_page_tlb_entries
322 != (1 << (om_page_tlb_index_stop_bit
323 - om_page_tlb_index_start_bit
+ 1)))
324 error("new_vm() - internal error with om_page constants\n");
326 /* create the new vm register file */
327 virtual = ZALLOC(vm
);
330 virtual->physical
= physical
;
332 /* set up the address decoders */
333 virtual->instruction_map
.translation
.bat_registers
= &virtual->ibats
;
334 virtual->instruction_map
.translation
.segment_tlb
= &virtual->segment_tlb
;
335 virtual->instruction_map
.translation
.page_tlb
= &virtual->instruction_tlb
;
336 virtual->instruction_map
.translation
.is_relocate
= 0;
337 virtual->instruction_map
.translation
.is_problem_state
= 0;
338 virtual->instruction_map
.translation
.physical
= core_readable(physical
);
339 virtual->instruction_map
.code
= core_readable(physical
);
341 virtual->data_map
.translation
.bat_registers
= &virtual->dbats
;
342 virtual->data_map
.translation
.segment_tlb
= &virtual->segment_tlb
;
343 virtual->data_map
.translation
.page_tlb
= &virtual->data_tlb
;
344 virtual->data_map
.translation
.is_relocate
= 0;
345 virtual->data_map
.translation
.is_problem_state
= 0;
346 virtual->data_map
.translation
.physical
= core_readable(physical
);
347 virtual->data_map
.read
= core_readable(physical
);
348 virtual->data_map
.write
= core_writeable(physical
);
354 STATIC_INLINE_VM om_bat
*
355 om_effective_to_bat(om_map
*map
,
359 om_bats
*bats
= map
->bat_registers
;
360 int nr_bats
= bats
->nr_valid_bat_registers
;
362 for (curr_bat
= 0; curr_bat
< nr_bats
; curr_bat
++) {
363 om_bat
*bat
= bats
->bat
+ curr_bat
;
364 if ((ea
& bat
->block_effective_page_index_mask
)
365 != bat
->block_effective_page_index
)
374 STATIC_INLINE_VM om_segment_tlb_entry
*
375 om_effective_to_virtual(om_map
*map
,
380 /* first try the segment tlb */
381 om_segment_tlb_entry
*segment_tlb_entry
= (map
->segment_tlb
->entry
382 + om_segment_tlb_index(ea
));
384 #if (WITH_TARGET_WORD_BITSIZE == 32)
385 return segment_tlb_entry
;
388 #if (WITH_TARGET_WORD_BITSIZE == 64)
389 if (segment_tlb_entry
->is_valid
390 && (segment_tlb_entry
->masked_effective_segment_id
== MASKED(ea
, 0, 35))) {
391 error("fixme - is there a need to update any bits\n");
392 return segment_tlb_entry
;
395 /* drats, segment tlb missed */
397 unsigned_word segment_id_hash
= ea
;
398 int current_hash
= 0;
399 for (current_hash
= 0; current_hash
< 2; current_hash
+= 1) {
400 unsigned_word segment_table_entry_group
=
401 (map
->real_address_of_segment_table
402 | (MASKED64(segment_id_hash
, 31, 35) >> (56-35)));
403 unsigned_word segment_table_entry
;
404 for (segment_table_entry
= segment_table_entry_group
;
405 segment_table_entry
< (segment_table_entry_group
406 + sizeof_segment_table_entry_group
);
407 segment_table_entry
+= sizeof_segment_table_entry
) {
409 unsigned_word segment_table_entry_dword_0
=
410 core_map_read_8(map
->physical
, segment_table_entry
, processor
, cia
);
411 unsigned_word segment_table_entry_dword_1
=
412 core_map_read_8(map
->physical
, segment_table_entry
+ 8, processor
, cia
);
413 int is_valid
= MASKED64(segment_table_entry_dword_0
, 56, 56) != 0;
414 unsigned_word masked_effective_segment_id
=
415 MASKED64(segment_table_entry_dword_0
, 0, 35);
416 if (is_valid
&& masked_effective_segment_id
== MASKED64(ea
, 0, 35)) {
417 /* don't permit some things */
418 if (MASKED64(segment_table_entry_dword_0
, 57, 57))
419 error("om_effective_to_virtual() - T=1 in STE not supported\n");
420 /* update segment tlb */
421 segment_tlb_entry
->is_valid
= is_valid
;
422 segment_tlb_entry
->masked_effective_segment_id
=
423 masked_effective_segment_id
;
424 segment_tlb_entry
->key
[om_supervisor_state
] =
425 EXTRACTED64(segment_table_entry_dword_0
, 58, 58);
426 segment_tlb_entry
->key
[om_problem_state
] =
427 EXTRACTED64(segment_table_entry_dword_0
, 59, 59);
428 segment_tlb_entry
->invalid_access
=
429 (MASKED64(segment_table_entry_dword_0
, 60, 60)
430 ? om_instruction_read
432 segment_tlb_entry
->masked_virtual_segment_id
=
433 MASKED(segment_table_entry_dword_1
, 0, 51);
434 return segment_tlb_entry
;
437 segment_id_hash
= ~segment_id_hash
;
446 STATIC_INLINE_VM om_page_tlb_entry
*
447 om_virtual_to_real(om_map
*map
,
449 om_segment_tlb_entry
*segment_tlb_entry
,
450 om_access_types access
,
454 om_page_tlb_entry
*page_tlb_entry
= (map
->page_tlb
->entry
455 + om_page_tlb_index(ea
));
457 /* is it a tlb hit? */
458 if (page_tlb_entry
->valid
459 && (page_tlb_entry
->masked_virtual_segment_id
==
460 segment_tlb_entry
->masked_virtual_segment_id
)
461 && (page_tlb_entry
->masked_page
== om_masked_page(ea
))) {
462 error("fixme - it is not a hit if direction/update bits do not match\n");
463 return page_tlb_entry
;
466 /* drats, it is a tlb miss */
468 unsigned_word page_hash
= (segment_tlb_entry
->masked_virtual_segment_id
469 ^ om_masked_page(ea
));
471 for (current_hash
= 0; current_hash
< 2; current_hash
+= 1) {
472 unsigned_word real_address_of_pte_group
=
473 (map
->real_address_of_page_table
474 | (page_hash
& map
->page_table_hash_mask
));
475 unsigned_word real_address_of_pte
;
476 for (real_address_of_pte
= real_address_of_pte_group
;
477 real_address_of_pte
< (real_address_of_pte_group
479 real_address_of_pte
+= sizeof_pte
) {
480 unsigned_word pte_word_0
=
481 core_map_read_word(map
->physical
,
484 unsigned_word pte_word_1
=
485 core_map_read_word(map
->physical
,
486 real_address_of_pte
+ sizeof_pte
/ 2,
488 error("fixme - check pte hit %ld %ld\n",
492 error("fixme - update the page_tlb\n");
493 page_tlb_entry
->valid
= 1;
494 page_tlb_entry
->protection
= 0;
495 page_tlb_entry
->masked_virtual_segment_id
= 0;
496 page_tlb_entry
->masked_page
= 0;
497 page_tlb_entry
->masked_real_page_number
= 0;
498 return page_tlb_entry
;
501 page_hash
= ~page_hash
; /*???*/
509 om_interrupt(cpu
*processor
,
512 om_access_types access
,
513 storage_interrupt_reasons reason
)
517 data_storage_interrupt(processor
, cia
, ea
, reason
, 0/*!is_store*/);
520 data_storage_interrupt(processor
, cia
, ea
, reason
, 1/*is_store*/);
522 case om_instruction_read
:
523 instruction_storage_interrupt(processor
, cia
, reason
);
526 error("om_interrupt - unexpected access type %d, cia=0x%x, ea=0x%x\n",
532 STATIC_INLINE_VM unsigned_word
533 om_translate_effective_to_real(om_map
*map
,
535 om_access_types access
,
541 om_segment_tlb_entry
*segment_tlb_entry
= NULL
;
542 om_page_tlb_entry
*page_tlb_entry
= NULL
;
545 if (!map
->is_relocate
) {
547 TRACE(trace_vm
, ("%s, direct map, ea=0x%x\n",
548 "om_translate_effective_to_real",
553 /* match with BAT? */
554 bat
= om_effective_to_bat(map
, ea
);
556 if (!om_valid_access
[1][bat
->protection_bits
][access
]) {
557 TRACE(trace_vm
, ("%s, bat protection violation, ea=0x%x\n",
558 "om_translate_effective_to_real",
561 om_interrupt(processor
, cia
, ea
, access
,
562 protection_violation_storage_interrupt
);
567 ra
= ((ea
& bat
->block_length_mask
) | bat
->block_real_page_number
);
568 TRACE(trace_vm
, ("%s, bat translation, ea=0x%x, ra=0x%x\n",
569 "om_translate_effective_to_real",
574 /* translate ea to va using segment map */
575 segment_tlb_entry
= om_effective_to_virtual(map
, ea
, processor
, cia
);
576 #if (WITH_TARGET_WORD_BITSIZE == 64)
577 if (segment_tlb_entry
== NULL
) {
578 TRACE(trace_vm
, ("%s, segment tlb lookup failed - ea=0x%x\n",
579 "om_translate_effective_to_real",
582 om_interrupt(processor
, cia
, ea
, access
,
583 segment_table_miss_storage_interrupt
);
588 /* check for invalid segment access type */
589 if (segment_tlb_entry
->invalid_access
== access
) {
590 TRACE(trace_vm
, ("%s, segment tlb access invalid - ea=0x%x\n",
591 "om_translate_effective_to_real",
594 om_interrupt(processor
, cia
, ea
, access
,
595 protection_violation_storage_interrupt
);
601 page_tlb_entry
= om_virtual_to_real(map
, ea
, segment_tlb_entry
,
604 if (page_tlb_entry
== NULL
) {
605 TRACE(trace_vm
, ("%s, page tlb lookup failed - ea=0x%x\n",
606 "om_translate_effective_to_real",
609 om_interrupt(processor
, cia
, ea
, access
,
610 hash_table_miss_storage_interrupt
);
614 if (!(om_valid_access
615 [segment_tlb_entry
->key
[map
->is_problem_state
]]
616 [page_tlb_entry
->protection
]
618 TRACE(trace_vm
, ("%s, page tlb access invalid - ea=0x%x\n",
619 "om_translate_effective_to_real",
622 om_interrupt(processor
, cia
, ea
, access
,
623 protection_violation_storage_interrupt
);
628 ra
= (page_tlb_entry
->masked_real_page_number
629 | om_masked_byte(ea
));
630 TRACE(trace_vm
, ("%s, page - ea=0x%x, ra=0x%x\n",
631 "om_translate_effective_to_real",
638 * Definition of operations for memory management
642 /* rebuild all the relevant bat information */
643 STATIC_INLINE_VM
void
644 om_unpack_bat(om_bat
*bat
,
648 /* for extracting out the offset within a page */
649 bat
->block_length_mask
= ((MASKED(ubat
, 51, 61) << (17-2))
650 | MASK(63-17+1, 63));
652 /* for checking the effective page index */
653 bat
->block_effective_page_index
= MASKED(ubat
, 0, 46);
654 bat
->block_effective_page_index_mask
= ~bat
->block_length_mask
;
656 /* protection information */
657 bat
->protection_bits
= EXTRACTED(lbat
, 62, 63);
658 bat
->block_real_page_number
= MASKED(lbat
, 0, 46);
662 /* rebuild the given bat table */
663 STATIC_INLINE_VM
void
664 om_unpack_bats(om_bats
*bats
,
669 bats
->nr_valid_bat_registers
= 0;
670 for (i
= 0; i
< nr_om_bat_registers
*2; i
+= 2) {
671 spreg ubat
= raw_bats
[i
];
672 spreg lbat
= raw_bats
[i
+1];
673 if ((msr
& msr_problem_state
)
674 ? EXTRACTED(ubat
, 62, 62)
675 : EXTRACTED(ubat
, 63, 63)) {
676 om_unpack_bat(&bats
->bat
[bats
->nr_valid_bat_registers
],
678 bats
->nr_valid_bat_registers
+= 1;
684 #if (WITH_TARGET_WORD_BITSIZE == 32)
685 STATIC_INLINE_VM
void
686 om_unpack_sr(vm
*virtual,
690 om_segment_tlb_entry
*segment_tlb_entry
= 0;
691 sreg new_sr_value
= 0;
693 /* check register in range */
694 if (which_sr
< 0 || which_sr
> nr_om_segment_tlb_entries
)
695 error("om_set_sr: segment register out of bounds\n");
697 /* get the working values */
698 segment_tlb_entry
= &virtual->segment_tlb
.entry
[which_sr
];
699 new_sr_value
= srs
[which_sr
];
701 /* do we support this */
702 if (MASKED32(new_sr_value
, 0, 0))
703 error("om_ser_sr(): unsupported value of T in segment register %d\n",
707 segment_tlb_entry
->key
[om_supervisor_state
] = EXTRACTED32(new_sr_value
, 1, 1);
708 segment_tlb_entry
->key
[om_problem_state
] = EXTRACTED32(new_sr_value
, 2, 2);
709 segment_tlb_entry
->invalid_access
= (MASKED32(new_sr_value
, 3, 3)
710 ? om_instruction_read
712 segment_tlb_entry
->masked_virtual_segment_id
= MASKED32(new_sr_value
, 8, 31);
717 #if (WITH_TARGET_WORD_BITSIZE == 32)
718 STATIC_INLINE_VM
void
719 om_unpack_srs(vm
*virtual,
723 for (which_sr
= 0; which_sr
< nr_om_segment_tlb_entries
; which_sr
++) {
724 om_unpack_sr(virtual, srs
, which_sr
);
730 /* Rebuild all the data structures for the new context as specifed by
731 the passed registers */
733 vm_synchronize_context(vm
*virtual,
739 /* enable/disable translation */
740 int problem_state
= (msr
& msr_problem_state
) != 0;
741 int data_relocate
= (msr
& msr_data_relocate
) != 0;
742 int instruction_relocate
= (msr
& msr_instruction_relocate
) != 0;
744 unsigned_word page_table_hash_mask
;
745 unsigned_word real_address_of_page_table
;
748 /* update current processor mode */
749 virtual->instruction_map
.translation
.is_relocate
= instruction_relocate
;
750 virtual->instruction_map
.translation
.is_problem_state
= problem_state
;
751 virtual->data_map
.translation
.is_relocate
= data_relocate
;
752 virtual->data_map
.translation
.is_problem_state
= problem_state
;
755 /* update bat registers for the new context */
756 om_unpack_bats(&virtual->ibats
, &sprs
[spr_ibat0u
], msr
);
757 om_unpack_bats(&virtual->dbats
, &sprs
[spr_dbat0u
], msr
);
760 /* unpack SDR1 - the storage description register 1 */
761 #if (WITH_TARGET_WORD_BITSIZE == 64)
762 real_address_of_page_table
= EXTRACTED64(sprs
[spr_sdr1
], 0, 45);
763 page_table_hash_mask
= MASK64(47-EXTRACTED64(sprs
[spr_sdr1
], 59, 63),
766 #if (WITH_TARGET_WORD_BITSIZE == 32)
767 real_address_of_page_table
= EXTRACTED32(sprs
[spr_sdr1
], 0, 15);
768 page_table_hash_mask
= ((EXTRACTED32(sprs
[spr_sdr1
], 23, 31) << (10+6))
771 virtual->instruction_map
.translation
.real_address_of_page_table
= real_address_of_page_table
;
772 virtual->instruction_map
.translation
.page_table_hash_mask
= page_table_hash_mask
;
773 virtual->data_map
.translation
.real_address_of_page_table
= real_address_of_page_table
;
774 virtual->data_map
.translation
.page_table_hash_mask
= page_table_hash_mask
;
777 #if (WITH_TARGET_WORD_BITSIZE == 32)
778 /* unpack the segment tlb registers */
779 om_unpack_srs(virtual, srs
);
784 INLINE_VM vm_data_map
*
785 vm_create_data_map(vm
*memory
)
787 return &memory
->data_map
;
791 INLINE_VM vm_instruction_map
*
792 vm_create_instruction_map(vm
*memory
)
794 return &memory
->instruction_map
;
798 STATIC_INLINE_VM unsigned_word
799 vm_translate(om_map
*map
,
801 om_access_types access
,
806 switch (CURRENT_ENVIRONMENT
) {
807 case USER_ENVIRONMENT
:
808 case VIRTUAL_ENVIRONMENT
:
810 case OPERATING_ENVIRONMENT
:
811 return om_translate_effective_to_real(map
, ea
, access
,
815 error("vm_translate() - unknown environment\n");
821 INLINE_VM unsigned_word
822 vm_real_data_addr(vm_data_map
*map
,
828 return vm_translate(&map
->translation
,
830 is_read
? om_data_read
: om_data_write
,
837 INLINE_VM unsigned_word
838 vm_real_instruction_addr(vm_instruction_map
*map
,
842 return vm_translate(&map
->translation
,
850 INLINE_VM instruction_word
851 vm_instruction_map_read(vm_instruction_map
*map
,
855 unsigned_word ra
= vm_real_instruction_addr(map
, processor
, cia
);
856 ASSERT((cia
& 0x3) == 0); /* always aligned */
857 return core_map_read_4(map
->code
, ra
, processor
, cia
);
862 vm_data_map_read_buffer(vm_data_map
*map
,
868 for (count
= 0; count
< nr_bytes
; count
++) {
870 unsigned_word ea
= addr
+ count
;
871 unsigned_word ra
= vm_translate(&map
->translation
,
876 if (ra
== MASK(0, 63))
878 if (core_map_read_buffer(map
->read
, &byte
, ea
, sizeof(byte
))
881 ((unsigned_1
*)target
)[count
] = T2H_1(byte
);
888 vm_data_map_write_buffer(vm_data_map
*map
,
892 int violate_read_only_section
)
896 for (count
= 0; count
< nr_bytes
; count
++) {
897 unsigned_word ea
= addr
+ count
;
898 unsigned_word ra
= vm_translate(&map
->translation
,
903 if (ra
== MASK(0, 63))
905 byte
= T2H_1(((unsigned_1
*)source
)[count
]);
906 if (core_map_write_buffer((violate_read_only_section
909 &byte
, ra
, sizeof(byte
)) != sizeof(byte
))
916 /* define the read/write 1/2/4/8/word functions */