1 /* This file is part of the program psim.
3 Copyright (C) 1994-1997, Andrew Cagney <cagney@highland.com.au>
5 This program is free software; you can redistribute it and/or modify
6 it under the terms of the GNU General Public License as published by
7 the Free Software Foundation; either version 3 of the License, or
8 (at your option) any later version.
10 This program is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 GNU General Public License for more details.
15 You should have received a copy of the GNU General Public License
16 along with this program; if not, see <http://www.gnu.org/licenses/>.
26 #include "registers.h"
30 #include "interrupts.h"
38 For the VEA model, the VM layer is almost transparent. It's only
39 purpose is to maintain separate core_map's for the instruction
40 and data address spaces. This being so that writes to instruction
41 space or execution of a data space is prevented.
43 For the OEA model things are more complex. The reason for separate
44 instruction and data models becomes crucial. The OEA model is
45 built out of three parts. An instruction map, a data map and an
46 underlying structure that provides access to the VM data kept in
50 /* OEA data structures:
52 The OEA model maintains internal data structures that shadow the
53 semantics of the various OEA VM registers (BAT, SR, etc). This
54 allows a simple efficient model of the VM to be implemented.
56 Consistency between OEA registers and this model's internal data
57 structures is maintained by updating the structures at
58 `synchronization' points. Of particular note is that (at the time
59 of writing) the memory data types for BAT registers are rebuilt
60 when ever the processor moves between problem and system states.
62 Unpacked values are stored in the OEA so that they correctly align
63 to where they will be needed by the PTE address. */
68 Matrix of processor state, type of access and validity */
77 om_data_read
, om_data_write
,
78 om_instruction_read
, om_access_any
,
82 static int om_valid_access
[2][4][nr_om_access_types
] = {
83 /* read, write, instruction, any */
86 { 1, 1, 1, 1 }, /* 00 */
87 { 1, 1, 1, 1 }, /* 01 */
88 { 1, 1, 1, 1 }, /* 10 */
89 { 1, 0, 1, 1 }, /* 11 */
91 /* K bit == 1 or P bit valid */
93 { 0, 0, 0, 0 }, /* 00 */
94 { 1, 0, 1, 1 }, /* 01 */
95 { 1, 1, 1, 1 }, /* 10 */
96 { 1, 0, 1, 1 }, /* 11 */
103 The bat data structure only contains information on valid BAT
104 translations for the current processor mode and type of access. */
106 typedef struct _om_bat
{
107 unsigned_word block_effective_page_index
;
108 unsigned_word block_effective_page_index_mask
;
109 unsigned_word block_length_mask
;
110 unsigned_word block_real_page_number
;
114 enum _nr_om_bat_registers
{
115 nr_om_bat_registers
= 4
118 typedef struct _om_bats
{
119 int nr_valid_bat_registers
;
120 om_bat bat
[nr_om_bat_registers
];
126 In this model the 32 and 64 bit segment tables are treated in very
127 similar ways. The 32bit segment registers are treated as a
128 simplification of the 64bit segment tlb */
130 enum _om_segment_tlb_constants
{
131 #if (WITH_TARGET_WORD_BITSIZE == 64)
132 sizeof_segment_table_entry_group
= 128,
133 sizeof_segment_table_entry
= 16,
135 om_segment_tlb_index_start_bit
= 32,
136 om_segment_tlb_index_stop_bit
= 35,
137 nr_om_segment_tlb_entries
= 16,
138 nr_om_segment_tlb_constants
141 typedef struct _om_segment_tlb_entry
{
142 int key
[nr_om_modes
];
143 om_access_types invalid_access
; /* set to instruction if no_execute bit */
144 unsigned_word masked_virtual_segment_id
; /* aligned ready for pte group addr */
145 #if (WITH_TARGET_WORD_BITSIZE == 64)
147 unsigned_word masked_effective_segment_id
;
149 } om_segment_tlb_entry
;
151 typedef struct _om_segment_tlb
{
152 om_segment_tlb_entry entry
[nr_om_segment_tlb_entries
];
158 This OEA model includes a small direct map Page TLB. The tlb is to
159 cut down on the need for the OEA to perform walks of the page hash
162 enum _om_page_tlb_constants
{
163 om_page_tlb_index_start_bit
= 46,
164 om_page_tlb_index_stop_bit
= 51,
165 nr_om_page_tlb_entries
= 64,
166 #if (WITH_TARGET_WORD_BITSIZE == 64)
167 sizeof_pte_group
= 128,
170 #if (WITH_TARGET_WORD_BITSIZE == 32)
171 sizeof_pte_group
= 64,
174 nr_om_page_tlb_constants
177 typedef struct _om_page_tlb_entry
{
180 unsigned_word real_address_of_pte_1
;
181 unsigned_word masked_virtual_segment_id
;
182 unsigned_word masked_page
;
183 unsigned_word masked_real_page_number
;
186 typedef struct _om_page_tlb
{
187 om_page_tlb_entry entry
[nr_om_page_tlb_entries
];
191 /* memory translation:
193 OEA memory translation possibly involves BAT, SR, TLB and HTAB
196 typedef struct _om_map
{
198 /* local cache of register values */
200 int is_problem_state
;
202 /* block address translation */
203 om_bats
*bat_registers
;
205 /* failing that, translate ea to va using segment tlb */
206 #if (WITH_TARGET_WORD_BITSIZE == 64)
207 unsigned_word real_address_of_segment_table
;
209 om_segment_tlb
*segment_tlb
;
211 /* then va to ra using hashed page table and tlb */
212 unsigned_word real_address_of_page_table
;
213 unsigned_word page_table_hash_mask
;
214 om_page_tlb
*page_tlb
;
216 /* physical memory for fetching page table entries */
219 /* address xor for PPC endian */
220 unsigned xor[WITH_XOR_ENDIAN
];
227 External objects defined by vm.h */
229 struct _vm_instruction_map
{
230 /* real memory for last part */
232 /* translate effective to real */
236 struct _vm_data_map
{
237 /* translate effective to real */
239 /* real memory for translated address */
247 Underlying memory object. For the VEA this is just the
248 core_map. For OEA it is the instruction and data memory
253 /* OEA: base address registers */
257 /* OEA: segment registers */
258 om_segment_tlb segment_tlb
;
260 /* OEA: translation lookaside buffers */
261 om_page_tlb instruction_tlb
;
262 om_page_tlb data_tlb
;
268 vm_instruction_map instruction_map
;
269 vm_data_map data_map
;
274 /* OEA Support procedures */
279 om_segment_tlb_index(unsigned_word ea
)
281 unsigned_word index
= EXTRACTED(ea
,
282 om_segment_tlb_index_start_bit
,
283 om_segment_tlb_index_stop_bit
);
289 om_page_tlb_index(unsigned_word ea
)
291 unsigned_word index
= EXTRACTED(ea
,
292 om_page_tlb_index_start_bit
,
293 om_page_tlb_index_stop_bit
);
299 om_hash_page(unsigned_word masked_vsid
,
302 unsigned_word extracted_ea
= EXTRACTED(ea
, 36, 51);
303 #if (WITH_TARGET_WORD_BITSIZE == 32)
304 unsigned_word masked_ea
= INSERTED32(extracted_ea
, 7, 31-6);
305 unsigned_word hash
= masked_vsid
^ masked_ea
;
307 #if (WITH_TARGET_WORD_BITSIZE == 64)
308 unsigned_word masked_ea
= INSERTED64(extracted_ea
, 18, 63-7);
309 unsigned_word hash
= masked_vsid
^ masked_ea
;
311 TRACE(trace_vm
, ("ea=0x%lx - masked-vsid=0x%lx masked-ea=0x%lx hash=0x%lx\n",
313 (unsigned long)masked_vsid
,
314 (unsigned long)masked_ea
,
315 (unsigned long)hash
));
321 om_pte_0_api(unsigned_word pte_0
)
323 #if (WITH_TARGET_WORD_BITSIZE == 32)
324 return EXTRACTED32(pte_0
, 26, 31);
326 #if (WITH_TARGET_WORD_BITSIZE == 64)
327 return EXTRACTED64(pte_0
, 52, 56);
333 om_pte_0_hash(unsigned_word pte_0
)
335 #if (WITH_TARGET_WORD_BITSIZE == 32)
336 return EXTRACTED32(pte_0
, 25, 25);
338 #if (WITH_TARGET_WORD_BITSIZE == 64)
339 return EXTRACTED64(pte_0
, 62, 62);
345 om_pte_0_valid(unsigned_word pte_0
)
347 #if (WITH_TARGET_WORD_BITSIZE == 32)
348 return MASKED32(pte_0
, 0, 0) != 0;
350 #if (WITH_TARGET_WORD_BITSIZE == 64)
351 return MASKED64(pte_0
, 63, 63) != 0;
357 om_ea_masked_page(unsigned_word ea
)
359 return MASKED(ea
, 36, 51);
364 om_ea_masked_byte(unsigned_word ea
)
366 return MASKED(ea
, 52, 63);
369 /* return the VSID aligned for pte group addr */
372 om_pte_0_masked_vsid(unsigned_word pte_0
)
374 #if (WITH_TARGET_WORD_BITSIZE == 32)
375 return INSERTED32(EXTRACTED32(pte_0
, 1, 24), 31-6-24+1, 31-6);
377 #if (WITH_TARGET_WORD_BITSIZE == 64)
378 return INSERTED64(EXTRACTED64(pte_0
, 0, 51), 63-7-52+1, 63-7);
384 om_pte_1_pp(unsigned_word pte_1
)
386 return MASKED(pte_1
, 62, 63); /*PP*/
391 om_pte_1_referenced(unsigned_word pte_1
)
393 return EXTRACTED(pte_1
, 55, 55);
398 om_pte_1_changed(unsigned_word pte_1
)
400 return EXTRACTED(pte_1
, 56, 56);
405 om_pte_1_masked_rpn(unsigned_word pte_1
)
407 return MASKED(pte_1
, 0, 51); /*RPN*/
412 om_ea_api(unsigned_word ea
)
414 return EXTRACTED(ea
, 36, 41);
418 /* Page and Segment table read/write operators, these need to still
419 account for the PPC's XOR operation */
423 om_read_word(om_map
*map
,
429 ra
^= map
->xor[sizeof(instruction_word
) - 1];
430 return core_map_read_word(map
->physical
, ra
, processor
, cia
);
435 om_write_word(om_map
*map
,
442 ra
^= map
->xor[sizeof(instruction_word
) - 1];
443 core_map_write_word(map
->physical
, ra
, val
, processor
, cia
);
447 /* Bring things into existance */
451 vm_create(core
*physical
)
455 /* internal checks */
456 if (nr_om_segment_tlb_entries
457 != (1 << (om_segment_tlb_index_stop_bit
458 - om_segment_tlb_index_start_bit
+ 1)))
459 error("internal error - vm_create - problem with om_segment constants\n");
460 if (nr_om_page_tlb_entries
461 != (1 << (om_page_tlb_index_stop_bit
462 - om_page_tlb_index_start_bit
+ 1)))
463 error("internal error - vm_create - problem with om_page constants\n");
465 /* create the new vm register file */
466 virtual = ZALLOC(vm
);
469 virtual->physical
= physical
;
471 /* set up the address decoders */
472 virtual->instruction_map
.translation
.bat_registers
= &virtual->ibats
;
473 virtual->instruction_map
.translation
.segment_tlb
= &virtual->segment_tlb
;
474 virtual->instruction_map
.translation
.page_tlb
= &virtual->instruction_tlb
;
475 virtual->instruction_map
.translation
.is_relocate
= 0;
476 virtual->instruction_map
.translation
.is_problem_state
= 0;
477 virtual->instruction_map
.translation
.physical
= core_readable(physical
);
478 virtual->instruction_map
.code
= core_readable(physical
);
480 virtual->data_map
.translation
.bat_registers
= &virtual->dbats
;
481 virtual->data_map
.translation
.segment_tlb
= &virtual->segment_tlb
;
482 virtual->data_map
.translation
.page_tlb
= &virtual->data_tlb
;
483 virtual->data_map
.translation
.is_relocate
= 0;
484 virtual->data_map
.translation
.is_problem_state
= 0;
485 virtual->data_map
.translation
.physical
= core_readable(physical
);
486 virtual->data_map
.read
= core_readable(physical
);
487 virtual->data_map
.write
= core_writeable(physical
);
495 om_effective_to_bat(om_map
*map
,
499 om_bats
*bats
= map
->bat_registers
;
500 int nr_bats
= bats
->nr_valid_bat_registers
;
502 for (curr_bat
= 0; curr_bat
< nr_bats
; curr_bat
++) {
503 om_bat
*bat
= bats
->bat
+ curr_bat
;
504 if ((ea
& bat
->block_effective_page_index_mask
)
505 != bat
->block_effective_page_index
)
515 (om_segment_tlb_entry
*)
516 om_effective_to_virtual(om_map
*map
,
521 /* first try the segment tlb */
522 om_segment_tlb_entry
*segment_tlb_entry
= (map
->segment_tlb
->entry
523 + om_segment_tlb_index(ea
));
525 #if (WITH_TARGET_WORD_BITSIZE == 32)
526 TRACE(trace_vm
, ("ea=0x%lx - sr[%ld] - masked-vsid=0x%lx va=0x%lx%07lx\n",
528 (long)om_segment_tlb_index(ea
),
529 (unsigned long)segment_tlb_entry
->masked_virtual_segment_id
,
530 (unsigned long)EXTRACTED32(segment_tlb_entry
->masked_virtual_segment_id
, 31-6-24+1, 31-6),
531 (unsigned long)EXTRACTED32(ea
, 4, 31)));
532 return segment_tlb_entry
;
535 #if (WITH_TARGET_WORD_BITSIZE == 64)
536 if (segment_tlb_entry
->is_valid
537 && (segment_tlb_entry
->masked_effective_segment_id
== MASKED(ea
, 0, 35))) {
538 error("fixme - is there a need to update any bits\n");
539 return segment_tlb_entry
;
542 /* drats, segment tlb missed */
544 unsigned_word segment_id_hash
= ea
;
545 int current_hash
= 0;
546 for (current_hash
= 0; current_hash
< 2; current_hash
+= 1) {
547 unsigned_word segment_table_entry_group
=
548 (map
->real_address_of_segment_table
549 | (MASKED64(segment_id_hash
, 31, 35) >> (56-35)));
550 unsigned_word segment_table_entry
;
551 for (segment_table_entry
= segment_table_entry_group
;
552 segment_table_entry
< (segment_table_entry_group
553 + sizeof_segment_table_entry_group
);
554 segment_table_entry
+= sizeof_segment_table_entry
) {
556 unsigned_word segment_table_entry_dword_0
=
557 om_read_word(map
->physical
, segment_table_entry
, processor
, cia
);
558 unsigned_word segment_table_entry_dword_1
=
559 om_read_word(map
->physical
, segment_table_entry
+ 8,
561 int is_valid
= MASKED64(segment_table_entry_dword_0
, 56, 56) != 0;
562 unsigned_word masked_effective_segment_id
=
563 MASKED64(segment_table_entry_dword_0
, 0, 35);
564 if (is_valid
&& masked_effective_segment_id
== MASKED64(ea
, 0, 35)) {
565 /* don't permit some things */
566 if (MASKED64(segment_table_entry_dword_0
, 57, 57))
567 error("om_effective_to_virtual() - T=1 in STE not supported\n");
568 /* update segment tlb */
569 segment_tlb_entry
->is_valid
= is_valid
;
570 segment_tlb_entry
->masked_effective_segment_id
=
571 masked_effective_segment_id
;
572 segment_tlb_entry
->key
[om_supervisor_state
] =
573 EXTRACTED64(segment_table_entry_dword_0
, 58, 58);
574 segment_tlb_entry
->key
[om_problem_state
] =
575 EXTRACTED64(segment_table_entry_dword_0
, 59, 59);
576 segment_tlb_entry
->invalid_access
=
577 (MASKED64(segment_table_entry_dword_0
, 60, 60)
578 ? om_instruction_read
580 segment_tlb_entry
->masked_virtual_segment_id
=
581 INSERTED64(EXTRACTED64(segment_table_entry_dword_1
, 0, 51),
582 18-13, 63-7); /* aligned ready for pte group addr */
583 return segment_tlb_entry
;
586 segment_id_hash
= ~segment_id_hash
;
596 (om_page_tlb_entry
*)
597 om_virtual_to_real(om_map
*map
,
599 om_segment_tlb_entry
*segment_tlb_entry
,
600 om_access_types access
,
604 om_page_tlb_entry
*page_tlb_entry
= (map
->page_tlb
->entry
605 + om_page_tlb_index(ea
));
607 /* is it a tlb hit? */
608 if ((page_tlb_entry
->masked_virtual_segment_id
609 == segment_tlb_entry
->masked_virtual_segment_id
)
610 && (page_tlb_entry
->masked_page
611 == om_ea_masked_page(ea
))) {
612 TRACE(trace_vm
, ("ea=0x%lx - tlb hit - tlb=0x%lx\n",
613 (long)ea
, (long)page_tlb_entry
));
614 return page_tlb_entry
;
617 /* drats, it is a tlb miss */
619 unsigned_word page_hash
=
620 om_hash_page(segment_tlb_entry
->masked_virtual_segment_id
, ea
);
622 for (current_hash
= 0; current_hash
< 2; current_hash
+= 1) {
623 unsigned_word real_address_of_pte_group
=
624 (map
->real_address_of_page_table
625 | (page_hash
& map
->page_table_hash_mask
));
626 unsigned_word real_address_of_pte_0
;
628 ("ea=0x%lx - htab search %d - htab=0x%lx hash=0x%lx mask=0x%lx pteg=0x%lx\n",
629 (long)ea
, current_hash
,
630 map
->real_address_of_page_table
,
632 map
->page_table_hash_mask
,
633 (long)real_address_of_pte_group
));
634 for (real_address_of_pte_0
= real_address_of_pte_group
;
635 real_address_of_pte_0
< (real_address_of_pte_group
637 real_address_of_pte_0
+= sizeof_pte
) {
638 unsigned_word pte_0
= om_read_word(map
,
639 real_address_of_pte_0
,
642 if (om_pte_0_valid(pte_0
)
643 && (current_hash
== om_pte_0_hash(pte_0
))
644 && (segment_tlb_entry
->masked_virtual_segment_id
645 == om_pte_0_masked_vsid(pte_0
))
646 && (om_ea_api(ea
) == om_pte_0_api(pte_0
))) {
647 unsigned_word real_address_of_pte_1
= (real_address_of_pte_0
649 unsigned_word pte_1
= om_read_word(map
,
650 real_address_of_pte_1
,
652 page_tlb_entry
->protection
= om_pte_1_pp(pte_1
);
653 page_tlb_entry
->changed
= om_pte_1_changed(pte_1
);
654 page_tlb_entry
->masked_virtual_segment_id
= segment_tlb_entry
->masked_virtual_segment_id
;
655 page_tlb_entry
->masked_page
= om_ea_masked_page(ea
);
656 page_tlb_entry
->masked_real_page_number
= om_pte_1_masked_rpn(pte_1
);
657 page_tlb_entry
->real_address_of_pte_1
= real_address_of_pte_1
;
658 if (!om_pte_1_referenced(pte_1
)) {
660 real_address_of_pte_1
,
664 ("ea=0x%lx - htab hit - set ref - tlb=0x%lx &pte1=0x%lx\n",
665 (long)ea
, (long)page_tlb_entry
, (long)real_address_of_pte_1
));
669 ("ea=0x%lx - htab hit - tlb=0x%lx &pte1=0x%lx\n",
670 (long)ea
, (long)page_tlb_entry
, (long)real_address_of_pte_1
));
672 return page_tlb_entry
;
675 page_hash
= ~page_hash
; /*???*/
684 om_interrupt(cpu
*processor
,
687 om_access_types access
,
688 storage_interrupt_reasons reason
)
692 data_storage_interrupt(processor
, cia
, ea
, reason
, 0/*!is_store*/);
695 data_storage_interrupt(processor
, cia
, ea
, reason
, 1/*is_store*/);
697 case om_instruction_read
:
698 instruction_storage_interrupt(processor
, cia
, reason
);
701 error("internal error - om_interrupt - unexpected access type %d", access
);
708 om_translate_effective_to_real(om_map
*map
,
710 om_access_types access
,
716 om_segment_tlb_entry
*segment_tlb_entry
= NULL
;
717 om_page_tlb_entry
*page_tlb_entry
= NULL
;
720 if (!map
->is_relocate
) {
722 TRACE(trace_vm
, ("ea=0x%lx - direct map - ra=0x%lx\n",
723 (long)ea
, (long)ra
));
727 /* match with BAT? */
728 bat
= om_effective_to_bat(map
, ea
);
730 if (!om_valid_access
[1][bat
->protection_bits
][access
]) {
731 TRACE(trace_vm
, ("ea=0x%lx - bat access violation\n", (long)ea
));
733 om_interrupt(processor
, cia
, ea
, access
,
734 protection_violation_storage_interrupt
);
739 ra
= ((ea
& bat
->block_length_mask
) | bat
->block_real_page_number
);
740 TRACE(trace_vm
, ("ea=0x%lx - bat translation - ra=0x%lx\n",
741 (long)ea
, (long)ra
));
745 /* translate ea to va using segment map */
746 segment_tlb_entry
= om_effective_to_virtual(map
, ea
, processor
, cia
);
747 #if (WITH_TARGET_WORD_BITSIZE == 64)
748 if (segment_tlb_entry
== NULL
) {
749 TRACE(trace_vm
, ("ea=0x%lx - segment tlb miss\n", (long)ea
));
751 om_interrupt(processor
, cia
, ea
, access
,
752 segment_table_miss_storage_interrupt
);
757 /* check for invalid segment access type */
758 if (segment_tlb_entry
->invalid_access
== access
) {
759 TRACE(trace_vm
, ("ea=0x%lx - segment access invalid\n", (long)ea
));
761 om_interrupt(processor
, cia
, ea
, access
,
762 protection_violation_storage_interrupt
);
768 page_tlb_entry
= om_virtual_to_real(map
, ea
, segment_tlb_entry
,
771 if (page_tlb_entry
== NULL
) {
772 TRACE(trace_vm
, ("ea=0x%lx - page tlb miss\n", (long)ea
));
774 om_interrupt(processor
, cia
, ea
, access
,
775 hash_table_miss_storage_interrupt
);
779 if (!(om_valid_access
780 [segment_tlb_entry
->key
[map
->is_problem_state
]]
781 [page_tlb_entry
->protection
]
783 TRACE(trace_vm
, ("ea=0x%lx - page tlb access violation\n", (long)ea
));
785 om_interrupt(processor
, cia
, ea
, access
,
786 protection_violation_storage_interrupt
);
791 /* update change bit as needed */
792 if (access
== om_data_write
&&!page_tlb_entry
->changed
) {
793 unsigned_word pte_1
= om_read_word(map
,
794 page_tlb_entry
->real_address_of_pte_1
,
797 page_tlb_entry
->real_address_of_pte_1
,
800 TRACE(trace_vm
, ("ea=0x%lx - set change bit - tlb=0x%lx &pte1=0x%lx\n",
801 (long)ea
, (long)page_tlb_entry
,
802 (long)page_tlb_entry
->real_address_of_pte_1
));
805 ra
= (page_tlb_entry
->masked_real_page_number
| om_ea_masked_byte(ea
));
806 TRACE(trace_vm
, ("ea=0x%lx - page translation - ra=0x%lx\n",
807 (long)ea
, (long)ra
));
813 * Definition of operations for memory management
817 /* rebuild all the relevant bat information */
820 om_unpack_bat(om_bat
*bat
,
824 /* for extracting out the offset within a page */
825 bat
->block_length_mask
= ((MASKED(ubat
, 51, 61) << (17-2))
826 | MASK(63-17+1, 63));
828 /* for checking the effective page index */
829 bat
->block_effective_page_index
= MASKED(ubat
, 0, 46);
830 bat
->block_effective_page_index_mask
= ~bat
->block_length_mask
;
832 /* protection information */
833 bat
->protection_bits
= EXTRACTED(lbat
, 62, 63);
834 bat
->block_real_page_number
= MASKED(lbat
, 0, 46);
838 /* rebuild the given bat table */
841 om_unpack_bats(om_bats
*bats
,
846 bats
->nr_valid_bat_registers
= 0;
847 for (i
= 0; i
< nr_om_bat_registers
*2; i
+= 2) {
848 spreg ubat
= raw_bats
[i
];
849 spreg lbat
= raw_bats
[i
+1];
850 if ((msr
& msr_problem_state
)
851 ? EXTRACTED(ubat
, 63, 63)
852 : EXTRACTED(ubat
, 62, 62)) {
853 om_unpack_bat(&bats
->bat
[bats
->nr_valid_bat_registers
],
855 bats
->nr_valid_bat_registers
+= 1;
861 #if (WITH_TARGET_WORD_BITSIZE == 32)
864 om_unpack_sr(vm
*virtual,
870 om_segment_tlb_entry
*segment_tlb_entry
= 0;
871 sreg new_sr_value
= 0;
873 /* check register in range */
874 ASSERT(which_sr
>= 0 && which_sr
< nr_om_segment_tlb_entries
);
876 /* get the working values */
877 segment_tlb_entry
= &virtual->segment_tlb
.entry
[which_sr
];
878 new_sr_value
= srs
[which_sr
];
880 /* do we support this */
881 if (MASKED32(new_sr_value
, 0, 0))
882 cpu_error(processor
, cia
, "unsupported value of T in segment register %d",
886 segment_tlb_entry
->key
[om_supervisor_state
] = EXTRACTED32(new_sr_value
, 1, 1);
887 segment_tlb_entry
->key
[om_problem_state
] = EXTRACTED32(new_sr_value
, 2, 2);
888 segment_tlb_entry
->invalid_access
= (MASKED32(new_sr_value
, 3, 3)
889 ? om_instruction_read
891 segment_tlb_entry
->masked_virtual_segment_id
=
892 INSERTED32(EXTRACTED32(new_sr_value
, 8, 31),
893 31-6-24+1, 31-6); /* aligned ready for pte group addr */
898 #if (WITH_TARGET_WORD_BITSIZE == 32)
901 om_unpack_srs(vm
*virtual,
907 for (which_sr
= 0; which_sr
< nr_om_segment_tlb_entries
; which_sr
++) {
908 om_unpack_sr(virtual, srs
, which_sr
,
915 /* Rebuild all the data structures for the new context as specified by
916 the passed registers */
919 vm_synchronize_context(vm
*virtual,
928 /* enable/disable translation */
929 int problem_state
= (msr
& msr_problem_state
) != 0;
930 int data_relocate
= (msr
& msr_data_relocate
) != 0;
931 int instruction_relocate
= (msr
& msr_instruction_relocate
) != 0;
932 int little_endian
= (msr
& msr_little_endian_mode
) != 0;
934 unsigned_word page_table_hash_mask
;
935 unsigned_word real_address_of_page_table
;
937 /* update current processor mode */
938 virtual->instruction_map
.translation
.is_relocate
= instruction_relocate
;
939 virtual->instruction_map
.translation
.is_problem_state
= problem_state
;
940 virtual->data_map
.translation
.is_relocate
= data_relocate
;
941 virtual->data_map
.translation
.is_problem_state
= problem_state
;
943 /* update bat registers for the new context */
944 om_unpack_bats(&virtual->ibats
, &sprs
[spr_ibat0u
], msr
);
945 om_unpack_bats(&virtual->dbats
, &sprs
[spr_dbat0u
], msr
);
947 /* unpack SDR1 - the storage description register 1 */
948 #if (WITH_TARGET_WORD_BITSIZE == 64)
949 real_address_of_page_table
= MASKED64(sprs
[spr_sdr1
], 0, 45);
950 page_table_hash_mask
= MASK64(18+28-EXTRACTED64(sprs
[spr_sdr1
], 59, 63),
953 #if (WITH_TARGET_WORD_BITSIZE == 32)
954 real_address_of_page_table
= MASKED32(sprs
[spr_sdr1
], 0, 15);
955 page_table_hash_mask
= (INSERTED32(EXTRACTED32(sprs
[spr_sdr1
], 23, 31),
957 | MASK32(7+9, 31-6));
959 virtual->instruction_map
.translation
.real_address_of_page_table
= real_address_of_page_table
;
960 virtual->instruction_map
.translation
.page_table_hash_mask
= page_table_hash_mask
;
961 virtual->data_map
.translation
.real_address_of_page_table
= real_address_of_page_table
;
962 virtual->data_map
.translation
.page_table_hash_mask
= page_table_hash_mask
;
965 /* unpack the segment tlb registers */
966 #if (WITH_TARGET_WORD_BITSIZE == 32)
967 om_unpack_srs(virtual, srs
,
971 /* set up the XOR registers if the current endian mode conflicts
972 with what is in the MSR */
973 if (WITH_XOR_ENDIAN
) {
976 if ((little_endian
&& CURRENT_TARGET_BYTE_ORDER
== LITTLE_ENDIAN
)
977 || (!little_endian
&& CURRENT_TARGET_BYTE_ORDER
== BIG_ENDIAN
))
980 mask
= WITH_XOR_ENDIAN
- 1;
981 while (i
- 1 < WITH_XOR_ENDIAN
) {
982 virtual->instruction_map
.translation
.xor[i
-1] = mask
;
983 virtual->data_map
.translation
.xor[i
-1] = mask
;
984 mask
= (mask
<< 1) & (WITH_XOR_ENDIAN
- 1);
989 /* don't allow the processor to change endian modes */
990 if ((little_endian
&& CURRENT_TARGET_BYTE_ORDER
!= LITTLE_ENDIAN
)
991 || (!little_endian
&& CURRENT_TARGET_BYTE_ORDER
!= BIG_ENDIAN
))
992 cpu_error(processor
, cia
, "attempt to change hardwired byte order");
996 /* update vm data structures due to a TLB operation */
1000 vm_page_tlb_invalidate_entry(vm
*memory
,
1003 int i
= om_page_tlb_index(ea
);
1004 memory
->instruction_tlb
.entry
[i
].masked_virtual_segment_id
= MASK(0, 63);
1005 memory
->data_tlb
.entry
[i
].masked_virtual_segment_id
= MASK(0, 63);
1006 TRACE(trace_vm
, ("ea=0x%lx - tlb invalidate entry\n", (long)ea
));
1011 vm_page_tlb_invalidate_all(vm
*memory
)
1014 for (i
= 0; i
< nr_om_page_tlb_entries
; i
++) {
1015 memory
->instruction_tlb
.entry
[i
].masked_virtual_segment_id
= MASK(0, 63);
1016 memory
->data_tlb
.entry
[i
].masked_virtual_segment_id
= MASK(0, 63);
1018 TRACE(trace_vm
, ("tlb invalidate all\n"));
1025 vm_create_data_map(vm
*memory
)
1027 return &memory
->data_map
;
1032 (vm_instruction_map
*)
1033 vm_create_instruction_map(vm
*memory
)
1035 return &memory
->instruction_map
;
1041 vm_translate(om_map
*map
,
1043 om_access_types access
,
1048 switch (CURRENT_ENVIRONMENT
) {
1049 case USER_ENVIRONMENT
:
1050 case VIRTUAL_ENVIRONMENT
:
1052 case OPERATING_ENVIRONMENT
:
1053 return om_translate_effective_to_real(map
, ea
, access
,
1057 error("internal error - vm_translate - bad switch");
1065 vm_real_data_addr(vm_data_map
*map
,
1071 return vm_translate(&map
->translation
,
1073 is_read
? om_data_read
: om_data_write
,
1082 vm_real_instruction_addr(vm_instruction_map
*map
,
1086 return vm_translate(&map
->translation
,
1088 om_instruction_read
,
1096 vm_instruction_map_read(vm_instruction_map
*map
,
1100 unsigned_word ra
= vm_real_instruction_addr(map
, processor
, cia
);
1101 ASSERT((cia
& 0x3) == 0); /* always aligned */
1102 if (WITH_XOR_ENDIAN
)
1103 ra
^= map
->translation
.xor[sizeof(instruction_word
) - 1];
1104 return core_map_read_4(map
->code
, ra
, processor
, cia
);
1110 vm_data_map_read_buffer(vm_data_map
*map
,
1118 for (count
= 0; count
< nr_bytes
; count
++) {
1120 unsigned_word ea
= addr
+ count
;
1121 unsigned_word ra
= vm_translate(&map
->translation
,
1123 processor
, /*processor*/
1125 processor
!= NULL
); /*abort?*/
1126 if (ra
== MASK(0, 63))
1128 if (WITH_XOR_ENDIAN
)
1129 ra
^= map
->translation
.xor[0];
1130 if (core_map_read_buffer(map
->read
, &byte
, ra
, sizeof(byte
))
1133 ((unsigned_1
*)target
)[count
] = T2H_1(byte
);
1141 vm_data_map_write_buffer(vm_data_map
*map
,
1145 int violate_read_only_section
,
1151 for (count
= 0; count
< nr_bytes
; count
++) {
1152 unsigned_word ea
= addr
+ count
;
1153 unsigned_word ra
= vm_translate(&map
->translation
,
1157 processor
!= NULL
); /*abort?*/
1158 if (ra
== MASK(0, 63))
1160 if (WITH_XOR_ENDIAN
)
1161 ra
^= map
->translation
.xor[0];
1162 byte
= T2H_1(((unsigned_1
*)source
)[count
]);
1163 if (core_map_write_buffer((violate_read_only_section
1166 &byte
, ra
, sizeof(byte
)) != sizeof(byte
))
1173 /* define the read/write 1/2/4/8/word functions */