Update copyright year in gdbarch.sh doc/gdb.texinfo and doc/refcard.tex
[binutils-gdb.git] / sim / ppc / vm.c
blob8cf4e8fcf53c30a44a42b773f69ce36fad82410d
1 /* This file is part of the program psim.
3 Copyright (C) 1994-1997, Andrew Cagney <cagney@highland.com.au>
5 This program is free software; you can redistribute it and/or modify
6 it under the terms of the GNU General Public License as published by
7 the Free Software Foundation; either version 3 of the License, or
8 (at your option) any later version.
10 This program is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 GNU General Public License for more details.
15 You should have received a copy of the GNU General Public License
16 along with this program; if not, see <http://www.gnu.org/licenses/>.
21 #ifndef _VM_C_
22 #define _VM_C_
24 #if 0
25 #include "basics.h"
26 #include "registers.h"
27 #include "device.h"
28 #include "corefile.h"
29 #include "vm.h"
30 #include "interrupts.h"
31 #include "mon.h"
32 #endif
34 #include "cpu.h"
36 /* OEA vs VEA
38 For the VEA model, the VM layer is almost transparent. It's only
39 purpose is to maintain separate core_map's for the instruction
40 and data address spaces. This being so that writes to instruction
41 space or execution of a data space is prevented.
43 For the OEA model things are more complex. The reason for separate
44 instruction and data models becomes crucial. The OEA model is
45 built out of three parts. An instruction map, a data map and an
46 underlying structure that provides access to the VM data kept in
47 main memory. */
50 /* OEA data structures:
52 The OEA model maintains internal data structures that shadow the
53 semantics of the various OEA VM registers (BAT, SR, etc). This
54 allows a simple efficient model of the VM to be implemented.
56 Consistency between OEA registers and this model's internal data
57 structures is maintained by updating the structures at
58 `synchronization' points. Of particular note is that (at the time
59 of writing) the memory data types for BAT registers are rebuilt
60 when ever the processor moves between problem and system states.
62 Unpacked values are stored in the OEA so that they correctly align
63 to where they will be needed by the PTE address. */
66 /* Protection table:
68 Matrix of processor state, type of access and validity */
70 typedef enum {
71 om_supervisor_state,
72 om_problem_state,
73 nr_om_modes
74 } om_processor_modes;
76 typedef enum {
77 om_data_read, om_data_write,
78 om_instruction_read, om_access_any,
79 nr_om_access_types
80 } om_access_types;
82 static int om_valid_access[2][4][nr_om_access_types] = {
83 /* read, write, instruction, any */
84 /* K bit == 0 */
85 { /*r w i a pp */
86 { 1, 1, 1, 1 }, /* 00 */
87 { 1, 1, 1, 1 }, /* 01 */
88 { 1, 1, 1, 1 }, /* 10 */
89 { 1, 0, 1, 1 }, /* 11 */
91 /* K bit == 1 or P bit valid */
92 { /*r w i a pp */
93 { 0, 0, 0, 0 }, /* 00 */
94 { 1, 0, 1, 1 }, /* 01 */
95 { 1, 1, 1, 1 }, /* 10 */
96 { 1, 0, 1, 1 }, /* 11 */
101 /* Bat translation:
103 The bat data structure only contains information on valid BAT
104 translations for the current processor mode and type of access. */
106 typedef struct _om_bat {
107 unsigned_word block_effective_page_index;
108 unsigned_word block_effective_page_index_mask;
109 unsigned_word block_length_mask;
110 unsigned_word block_real_page_number;
111 int protection_bits;
112 } om_bat;
114 enum _nr_om_bat_registers {
115 nr_om_bat_registers = 4
118 typedef struct _om_bats {
119 int nr_valid_bat_registers;
120 om_bat bat[nr_om_bat_registers];
121 } om_bats;
124 /* Segment TLB:
126 In this model the 32 and 64 bit segment tables are treated in very
127 similar ways. The 32bit segment registers are treated as a
128 simplification of the 64bit segment tlb */
130 enum _om_segment_tlb_constants {
131 #if (WITH_TARGET_WORD_BITSIZE == 64)
132 sizeof_segment_table_entry_group = 128,
133 sizeof_segment_table_entry = 16,
134 #endif
135 om_segment_tlb_index_start_bit = 32,
136 om_segment_tlb_index_stop_bit = 35,
137 nr_om_segment_tlb_entries = 16,
138 nr_om_segment_tlb_constants
141 typedef struct _om_segment_tlb_entry {
142 int key[nr_om_modes];
143 om_access_types invalid_access; /* set to instruction if no_execute bit */
144 unsigned_word masked_virtual_segment_id; /* aligned ready for pte group addr */
145 #if (WITH_TARGET_WORD_BITSIZE == 64)
146 int is_valid;
147 unsigned_word masked_effective_segment_id;
148 #endif
149 } om_segment_tlb_entry;
151 typedef struct _om_segment_tlb {
152 om_segment_tlb_entry entry[nr_om_segment_tlb_entries];
153 } om_segment_tlb;
156 /* Page TLB:
158 This OEA model includes a small direct map Page TLB. The tlb is to
159 cut down on the need for the OEA to perform walks of the page hash
160 table. */
162 enum _om_page_tlb_constants {
163 om_page_tlb_index_start_bit = 46,
164 om_page_tlb_index_stop_bit = 51,
165 nr_om_page_tlb_entries = 64,
166 #if (WITH_TARGET_WORD_BITSIZE == 64)
167 sizeof_pte_group = 128,
168 sizeof_pte = 16,
169 #endif
170 #if (WITH_TARGET_WORD_BITSIZE == 32)
171 sizeof_pte_group = 64,
172 sizeof_pte = 8,
173 #endif
174 nr_om_page_tlb_constants
177 typedef struct _om_page_tlb_entry {
178 int protection;
179 int changed;
180 unsigned_word real_address_of_pte_1;
181 unsigned_word masked_virtual_segment_id;
182 unsigned_word masked_page;
183 unsigned_word masked_real_page_number;
184 } om_page_tlb_entry;
186 typedef struct _om_page_tlb {
187 om_page_tlb_entry entry[nr_om_page_tlb_entries];
188 } om_page_tlb;
191 /* memory translation:
193 OEA memory translation possibly involves BAT, SR, TLB and HTAB
194 information*/
196 typedef struct _om_map {
198 /* local cache of register values */
199 int is_relocate;
200 int is_problem_state;
202 /* block address translation */
203 om_bats *bat_registers;
205 /* failing that, translate ea to va using segment tlb */
206 #if (WITH_TARGET_WORD_BITSIZE == 64)
207 unsigned_word real_address_of_segment_table;
208 #endif
209 om_segment_tlb *segment_tlb;
211 /* then va to ra using hashed page table and tlb */
212 unsigned_word real_address_of_page_table;
213 unsigned_word page_table_hash_mask;
214 om_page_tlb *page_tlb;
216 /* physical memory for fetching page table entries */
217 core_map *physical;
219 /* address xor for PPC endian */
220 unsigned xor[WITH_XOR_ENDIAN];
222 } om_map;
225 /* VM objects:
227 External objects defined by vm.h */
229 struct _vm_instruction_map {
230 /* real memory for last part */
231 core_map *code;
232 /* translate effective to real */
233 om_map translation;
236 struct _vm_data_map {
237 /* translate effective to real */
238 om_map translation;
239 /* real memory for translated address */
240 core_map *read;
241 core_map *write;
245 /* VM:
247 Underlying memory object. For the VEA this is just the
248 core_map. For OEA it is the instruction and data memory
249 translation's */
251 struct _vm {
253 /* OEA: base address registers */
254 om_bats ibats;
255 om_bats dbats;
257 /* OEA: segment registers */
258 om_segment_tlb segment_tlb;
260 /* OEA: translation lookaside buffers */
261 om_page_tlb instruction_tlb;
262 om_page_tlb data_tlb;
264 /* real memory */
265 core *physical;
267 /* memory maps */
268 vm_instruction_map instruction_map;
269 vm_data_map data_map;
274 /* OEA Support procedures */
277 STATIC_INLINE_VM\
278 (unsigned_word)
279 om_segment_tlb_index(unsigned_word ea)
281 unsigned_word index = EXTRACTED(ea,
282 om_segment_tlb_index_start_bit,
283 om_segment_tlb_index_stop_bit);
284 return index;
287 STATIC_INLINE_VM\
288 (unsigned_word)
289 om_page_tlb_index(unsigned_word ea)
291 unsigned_word index = EXTRACTED(ea,
292 om_page_tlb_index_start_bit,
293 om_page_tlb_index_stop_bit);
294 return index;
297 STATIC_INLINE_VM\
298 (unsigned_word)
299 om_hash_page(unsigned_word masked_vsid,
300 unsigned_word ea)
302 unsigned_word extracted_ea = EXTRACTED(ea, 36, 51);
303 #if (WITH_TARGET_WORD_BITSIZE == 32)
304 unsigned_word masked_ea = INSERTED32(extracted_ea, 7, 31-6);
305 unsigned_word hash = masked_vsid ^ masked_ea;
306 #endif
307 #if (WITH_TARGET_WORD_BITSIZE == 64)
308 unsigned_word masked_ea = INSERTED64(extracted_ea, 18, 63-7);
309 unsigned_word hash = masked_vsid ^ masked_ea;
310 #endif
311 TRACE(trace_vm, ("ea=0x%lx - masked-vsid=0x%lx masked-ea=0x%lx hash=0x%lx\n",
312 (unsigned long)ea,
313 (unsigned long)masked_vsid,
314 (unsigned long)masked_ea,
315 (unsigned long)hash));
316 return hash;
319 STATIC_INLINE_VM\
320 (unsigned_word)
321 om_pte_0_api(unsigned_word pte_0)
323 #if (WITH_TARGET_WORD_BITSIZE == 32)
324 return EXTRACTED32(pte_0, 26, 31);
325 #endif
326 #if (WITH_TARGET_WORD_BITSIZE == 64)
327 return EXTRACTED64(pte_0, 52, 56);
328 #endif
331 STATIC_INLINE_VM\
332 (unsigned_word)
333 om_pte_0_hash(unsigned_word pte_0)
335 #if (WITH_TARGET_WORD_BITSIZE == 32)
336 return EXTRACTED32(pte_0, 25, 25);
337 #endif
338 #if (WITH_TARGET_WORD_BITSIZE == 64)
339 return EXTRACTED64(pte_0, 62, 62);
340 #endif
343 STATIC_INLINE_VM\
344 (int)
345 om_pte_0_valid(unsigned_word pte_0)
347 #if (WITH_TARGET_WORD_BITSIZE == 32)
348 return MASKED32(pte_0, 0, 0) != 0;
349 #endif
350 #if (WITH_TARGET_WORD_BITSIZE == 64)
351 return MASKED64(pte_0, 63, 63) != 0;
352 #endif
355 STATIC_INLINE_VM\
356 (unsigned_word)
357 om_ea_masked_page(unsigned_word ea)
359 return MASKED(ea, 36, 51);
362 STATIC_INLINE_VM\
363 (unsigned_word)
364 om_ea_masked_byte(unsigned_word ea)
366 return MASKED(ea, 52, 63);
369 /* return the VSID aligned for pte group addr */
370 STATIC_INLINE_VM\
371 (unsigned_word)
372 om_pte_0_masked_vsid(unsigned_word pte_0)
374 #if (WITH_TARGET_WORD_BITSIZE == 32)
375 return INSERTED32(EXTRACTED32(pte_0, 1, 24), 31-6-24+1, 31-6);
376 #endif
377 #if (WITH_TARGET_WORD_BITSIZE == 64)
378 return INSERTED64(EXTRACTED64(pte_0, 0, 51), 63-7-52+1, 63-7);
379 #endif
382 STATIC_INLINE_VM\
383 (unsigned_word)
384 om_pte_1_pp(unsigned_word pte_1)
386 return MASKED(pte_1, 62, 63); /*PP*/
389 STATIC_INLINE_VM\
390 (int)
391 om_pte_1_referenced(unsigned_word pte_1)
393 return EXTRACTED(pte_1, 55, 55);
396 STATIC_INLINE_VM\
397 (int)
398 om_pte_1_changed(unsigned_word pte_1)
400 return EXTRACTED(pte_1, 56, 56);
403 STATIC_INLINE_VM\
404 (int)
405 om_pte_1_masked_rpn(unsigned_word pte_1)
407 return MASKED(pte_1, 0, 51); /*RPN*/
410 STATIC_INLINE_VM\
411 (unsigned_word)
412 om_ea_api(unsigned_word ea)
414 return EXTRACTED(ea, 36, 41);
418 /* Page and Segment table read/write operators, these need to still
419 account for the PPC's XOR operation */
421 STATIC_INLINE_VM\
422 (unsigned_word)
423 om_read_word(om_map *map,
424 unsigned_word ra,
425 cpu *processor,
426 unsigned_word cia)
428 if (WITH_XOR_ENDIAN)
429 ra ^= map->xor[sizeof(instruction_word) - 1];
430 return core_map_read_word(map->physical, ra, processor, cia);
433 STATIC_INLINE_VM\
434 (void)
435 om_write_word(om_map *map,
436 unsigned_word ra,
437 unsigned_word val,
438 cpu *processor,
439 unsigned_word cia)
441 if (WITH_XOR_ENDIAN)
442 ra ^= map->xor[sizeof(instruction_word) - 1];
443 core_map_write_word(map->physical, ra, val, processor, cia);
447 /* Bring things into existance */
449 INLINE_VM\
450 (vm *)
451 vm_create(core *physical)
453 vm *virtual;
455 /* internal checks */
456 if (nr_om_segment_tlb_entries
457 != (1 << (om_segment_tlb_index_stop_bit
458 - om_segment_tlb_index_start_bit + 1)))
459 error("internal error - vm_create - problem with om_segment constants\n");
460 if (nr_om_page_tlb_entries
461 != (1 << (om_page_tlb_index_stop_bit
462 - om_page_tlb_index_start_bit + 1)))
463 error("internal error - vm_create - problem with om_page constants\n");
465 /* create the new vm register file */
466 virtual = ZALLOC(vm);
468 /* set up core */
469 virtual->physical = physical;
471 /* set up the address decoders */
472 virtual->instruction_map.translation.bat_registers = &virtual->ibats;
473 virtual->instruction_map.translation.segment_tlb = &virtual->segment_tlb;
474 virtual->instruction_map.translation.page_tlb = &virtual->instruction_tlb;
475 virtual->instruction_map.translation.is_relocate = 0;
476 virtual->instruction_map.translation.is_problem_state = 0;
477 virtual->instruction_map.translation.physical = core_readable(physical);
478 virtual->instruction_map.code = core_readable(physical);
480 virtual->data_map.translation.bat_registers = &virtual->dbats;
481 virtual->data_map.translation.segment_tlb = &virtual->segment_tlb;
482 virtual->data_map.translation.page_tlb = &virtual->data_tlb;
483 virtual->data_map.translation.is_relocate = 0;
484 virtual->data_map.translation.is_problem_state = 0;
485 virtual->data_map.translation.physical = core_readable(physical);
486 virtual->data_map.read = core_readable(physical);
487 virtual->data_map.write = core_writeable(physical);
489 return virtual;
493 STATIC_INLINE_VM\
494 (om_bat *)
495 om_effective_to_bat(om_map *map,
496 unsigned_word ea)
498 int curr_bat = 0;
499 om_bats *bats = map->bat_registers;
500 int nr_bats = bats->nr_valid_bat_registers;
502 for (curr_bat = 0; curr_bat < nr_bats; curr_bat++) {
503 om_bat *bat = bats->bat + curr_bat;
504 if ((ea & bat->block_effective_page_index_mask)
505 != bat->block_effective_page_index)
506 continue;
507 return bat;
510 return NULL;
514 STATIC_INLINE_VM\
515 (om_segment_tlb_entry *)
516 om_effective_to_virtual(om_map *map,
517 unsigned_word ea,
518 cpu *processor,
519 unsigned_word cia)
521 /* first try the segment tlb */
522 om_segment_tlb_entry *segment_tlb_entry = (map->segment_tlb->entry
523 + om_segment_tlb_index(ea));
525 #if (WITH_TARGET_WORD_BITSIZE == 32)
526 TRACE(trace_vm, ("ea=0x%lx - sr[%ld] - masked-vsid=0x%lx va=0x%lx%07lx\n",
527 (unsigned long)ea,
528 (long)om_segment_tlb_index(ea),
529 (unsigned long)segment_tlb_entry->masked_virtual_segment_id,
530 (unsigned long)EXTRACTED32(segment_tlb_entry->masked_virtual_segment_id, 31-6-24+1, 31-6),
531 (unsigned long)EXTRACTED32(ea, 4, 31)));
532 return segment_tlb_entry;
533 #endif
535 #if (WITH_TARGET_WORD_BITSIZE == 64)
536 if (segment_tlb_entry->is_valid
537 && (segment_tlb_entry->masked_effective_segment_id == MASKED(ea, 0, 35))) {
538 error("fixme - is there a need to update any bits\n");
539 return segment_tlb_entry;
542 /* drats, segment tlb missed */
544 unsigned_word segment_id_hash = ea;
545 int current_hash = 0;
546 for (current_hash = 0; current_hash < 2; current_hash += 1) {
547 unsigned_word segment_table_entry_group =
548 (map->real_address_of_segment_table
549 | (MASKED64(segment_id_hash, 31, 35) >> (56-35)));
550 unsigned_word segment_table_entry;
551 for (segment_table_entry = segment_table_entry_group;
552 segment_table_entry < (segment_table_entry_group
553 + sizeof_segment_table_entry_group);
554 segment_table_entry += sizeof_segment_table_entry) {
555 /* byte order? */
556 unsigned_word segment_table_entry_dword_0 =
557 om_read_word(map->physical, segment_table_entry, processor, cia);
558 unsigned_word segment_table_entry_dword_1 =
559 om_read_word(map->physical, segment_table_entry + 8,
560 processor, cia);
561 int is_valid = MASKED64(segment_table_entry_dword_0, 56, 56) != 0;
562 unsigned_word masked_effective_segment_id =
563 MASKED64(segment_table_entry_dword_0, 0, 35);
564 if (is_valid && masked_effective_segment_id == MASKED64(ea, 0, 35)) {
565 /* don't permit some things */
566 if (MASKED64(segment_table_entry_dword_0, 57, 57))
567 error("om_effective_to_virtual() - T=1 in STE not supported\n");
568 /* update segment tlb */
569 segment_tlb_entry->is_valid = is_valid;
570 segment_tlb_entry->masked_effective_segment_id =
571 masked_effective_segment_id;
572 segment_tlb_entry->key[om_supervisor_state] =
573 EXTRACTED64(segment_table_entry_dword_0, 58, 58);
574 segment_tlb_entry->key[om_problem_state] =
575 EXTRACTED64(segment_table_entry_dword_0, 59, 59);
576 segment_tlb_entry->invalid_access =
577 (MASKED64(segment_table_entry_dword_0, 60, 60)
578 ? om_instruction_read
579 : om_access_any);
580 segment_tlb_entry->masked_virtual_segment_id =
581 INSERTED64(EXTRACTED64(segment_table_entry_dword_1, 0, 51),
582 18-13, 63-7); /* aligned ready for pte group addr */
583 return segment_tlb_entry;
586 segment_id_hash = ~segment_id_hash;
589 return NULL;
590 #endif
595 STATIC_INLINE_VM\
596 (om_page_tlb_entry *)
597 om_virtual_to_real(om_map *map,
598 unsigned_word ea,
599 om_segment_tlb_entry *segment_tlb_entry,
600 om_access_types access,
601 cpu *processor,
602 unsigned_word cia)
604 om_page_tlb_entry *page_tlb_entry = (map->page_tlb->entry
605 + om_page_tlb_index(ea));
607 /* is it a tlb hit? */
608 if ((page_tlb_entry->masked_virtual_segment_id
609 == segment_tlb_entry->masked_virtual_segment_id)
610 && (page_tlb_entry->masked_page
611 == om_ea_masked_page(ea))) {
612 TRACE(trace_vm, ("ea=0x%lx - tlb hit - tlb=0x%lx\n",
613 (long)ea, (long)page_tlb_entry));
614 return page_tlb_entry;
617 /* drats, it is a tlb miss */
619 unsigned_word page_hash =
620 om_hash_page(segment_tlb_entry->masked_virtual_segment_id, ea);
621 int current_hash;
622 for (current_hash = 0; current_hash < 2; current_hash += 1) {
623 unsigned_word real_address_of_pte_group =
624 (map->real_address_of_page_table
625 | (page_hash & map->page_table_hash_mask));
626 unsigned_word real_address_of_pte_0;
627 TRACE(trace_vm,
628 ("ea=0x%lx - htab search %d - htab=0x%lx hash=0x%lx mask=0x%lx pteg=0x%lx\n",
629 (long)ea, current_hash,
630 map->real_address_of_page_table,
631 page_hash,
632 map->page_table_hash_mask,
633 (long)real_address_of_pte_group));
634 for (real_address_of_pte_0 = real_address_of_pte_group;
635 real_address_of_pte_0 < (real_address_of_pte_group
636 + sizeof_pte_group);
637 real_address_of_pte_0 += sizeof_pte) {
638 unsigned_word pte_0 = om_read_word(map,
639 real_address_of_pte_0,
640 processor, cia);
641 /* did we hit? */
642 if (om_pte_0_valid(pte_0)
643 && (current_hash == om_pte_0_hash(pte_0))
644 && (segment_tlb_entry->masked_virtual_segment_id
645 == om_pte_0_masked_vsid(pte_0))
646 && (om_ea_api(ea) == om_pte_0_api(pte_0))) {
647 unsigned_word real_address_of_pte_1 = (real_address_of_pte_0
648 + sizeof_pte / 2);
649 unsigned_word pte_1 = om_read_word(map,
650 real_address_of_pte_1,
651 processor, cia);
652 page_tlb_entry->protection = om_pte_1_pp(pte_1);
653 page_tlb_entry->changed = om_pte_1_changed(pte_1);
654 page_tlb_entry->masked_virtual_segment_id = segment_tlb_entry->masked_virtual_segment_id;
655 page_tlb_entry->masked_page = om_ea_masked_page(ea);
656 page_tlb_entry->masked_real_page_number = om_pte_1_masked_rpn(pte_1);
657 page_tlb_entry->real_address_of_pte_1 = real_address_of_pte_1;
658 if (!om_pte_1_referenced(pte_1)) {
659 om_write_word(map,
660 real_address_of_pte_1,
661 pte_1 | BIT(55),
662 processor, cia);
663 TRACE(trace_vm,
664 ("ea=0x%lx - htab hit - set ref - tlb=0x%lx &pte1=0x%lx\n",
665 (long)ea, (long)page_tlb_entry, (long)real_address_of_pte_1));
667 else {
668 TRACE(trace_vm,
669 ("ea=0x%lx - htab hit - tlb=0x%lx &pte1=0x%lx\n",
670 (long)ea, (long)page_tlb_entry, (long)real_address_of_pte_1));
672 return page_tlb_entry;
675 page_hash = ~page_hash; /*???*/
678 return NULL;
682 STATIC_INLINE_VM\
683 (void)
684 om_interrupt(cpu *processor,
685 unsigned_word cia,
686 unsigned_word ea,
687 om_access_types access,
688 storage_interrupt_reasons reason)
690 switch (access) {
691 case om_data_read:
692 data_storage_interrupt(processor, cia, ea, reason, 0/*!is_store*/);
693 break;
694 case om_data_write:
695 data_storage_interrupt(processor, cia, ea, reason, 1/*is_store*/);
696 break;
697 case om_instruction_read:
698 instruction_storage_interrupt(processor, cia, reason);
699 break;
700 default:
701 error("internal error - om_interrupt - unexpected access type %d", access);
706 STATIC_INLINE_VM\
707 (unsigned_word)
708 om_translate_effective_to_real(om_map *map,
709 unsigned_word ea,
710 om_access_types access,
711 cpu *processor,
712 unsigned_word cia,
713 int abort)
715 om_bat *bat = NULL;
716 om_segment_tlb_entry *segment_tlb_entry = NULL;
717 om_page_tlb_entry *page_tlb_entry = NULL;
718 unsigned_word ra;
720 if (!map->is_relocate) {
721 ra = ea;
722 TRACE(trace_vm, ("ea=0x%lx - direct map - ra=0x%lx\n",
723 (long)ea, (long)ra));
724 return ra;
727 /* match with BAT? */
728 bat = om_effective_to_bat(map, ea);
729 if (bat != NULL) {
730 if (!om_valid_access[1][bat->protection_bits][access]) {
731 TRACE(trace_vm, ("ea=0x%lx - bat access violation\n", (long)ea));
732 if (abort)
733 om_interrupt(processor, cia, ea, access,
734 protection_violation_storage_interrupt);
735 else
736 return MASK(0, 63);
739 ra = ((ea & bat->block_length_mask) | bat->block_real_page_number);
740 TRACE(trace_vm, ("ea=0x%lx - bat translation - ra=0x%lx\n",
741 (long)ea, (long)ra));
742 return ra;
745 /* translate ea to va using segment map */
746 segment_tlb_entry = om_effective_to_virtual(map, ea, processor, cia);
747 #if (WITH_TARGET_WORD_BITSIZE == 64)
748 if (segment_tlb_entry == NULL) {
749 TRACE(trace_vm, ("ea=0x%lx - segment tlb miss\n", (long)ea));
750 if (abort)
751 om_interrupt(processor, cia, ea, access,
752 segment_table_miss_storage_interrupt);
753 else
754 return MASK(0, 63);
756 #endif
757 /* check for invalid segment access type */
758 if (segment_tlb_entry->invalid_access == access) {
759 TRACE(trace_vm, ("ea=0x%lx - segment access invalid\n", (long)ea));
760 if (abort)
761 om_interrupt(processor, cia, ea, access,
762 protection_violation_storage_interrupt);
763 else
764 return MASK(0, 63);
767 /* lookup in PTE */
768 page_tlb_entry = om_virtual_to_real(map, ea, segment_tlb_entry,
769 access,
770 processor, cia);
771 if (page_tlb_entry == NULL) {
772 TRACE(trace_vm, ("ea=0x%lx - page tlb miss\n", (long)ea));
773 if (abort)
774 om_interrupt(processor, cia, ea, access,
775 hash_table_miss_storage_interrupt);
776 else
777 return MASK(0, 63);
779 if (!(om_valid_access
780 [segment_tlb_entry->key[map->is_problem_state]]
781 [page_tlb_entry->protection]
782 [access])) {
783 TRACE(trace_vm, ("ea=0x%lx - page tlb access violation\n", (long)ea));
784 if (abort)
785 om_interrupt(processor, cia, ea, access,
786 protection_violation_storage_interrupt);
787 else
788 return MASK(0, 63);
791 /* update change bit as needed */
792 if (access == om_data_write &&!page_tlb_entry->changed) {
793 unsigned_word pte_1 = om_read_word(map,
794 page_tlb_entry->real_address_of_pte_1,
795 processor, cia);
796 om_write_word(map,
797 page_tlb_entry->real_address_of_pte_1,
798 pte_1 | BIT(56),
799 processor, cia);
800 TRACE(trace_vm, ("ea=0x%lx - set change bit - tlb=0x%lx &pte1=0x%lx\n",
801 (long)ea, (long)page_tlb_entry,
802 (long)page_tlb_entry->real_address_of_pte_1));
805 ra = (page_tlb_entry->masked_real_page_number | om_ea_masked_byte(ea));
806 TRACE(trace_vm, ("ea=0x%lx - page translation - ra=0x%lx\n",
807 (long)ea, (long)ra));
808 return ra;
813 * Definition of operations for memory management
817 /* rebuild all the relevant bat information */
818 STATIC_INLINE_VM\
819 (void)
820 om_unpack_bat(om_bat *bat,
821 spreg ubat,
822 spreg lbat)
824 /* for extracting out the offset within a page */
825 bat->block_length_mask = ((MASKED(ubat, 51, 61) << (17-2))
826 | MASK(63-17+1, 63));
828 /* for checking the effective page index */
829 bat->block_effective_page_index = MASKED(ubat, 0, 46);
830 bat->block_effective_page_index_mask = ~bat->block_length_mask;
832 /* protection information */
833 bat->protection_bits = EXTRACTED(lbat, 62, 63);
834 bat->block_real_page_number = MASKED(lbat, 0, 46);
838 /* rebuild the given bat table */
839 STATIC_INLINE_VM\
840 (void)
841 om_unpack_bats(om_bats *bats,
842 spreg *raw_bats,
843 msreg msr)
845 int i;
846 bats->nr_valid_bat_registers = 0;
847 for (i = 0; i < nr_om_bat_registers*2; i += 2) {
848 spreg ubat = raw_bats[i];
849 spreg lbat = raw_bats[i+1];
850 if ((msr & msr_problem_state)
851 ? EXTRACTED(ubat, 63, 63)
852 : EXTRACTED(ubat, 62, 62)) {
853 om_unpack_bat(&bats->bat[bats->nr_valid_bat_registers],
854 ubat, lbat);
855 bats->nr_valid_bat_registers += 1;
861 #if (WITH_TARGET_WORD_BITSIZE == 32)
862 STATIC_INLINE_VM\
863 (void)
864 om_unpack_sr(vm *virtual,
865 sreg *srs,
866 int which_sr,
867 cpu *processor,
868 unsigned_word cia)
870 om_segment_tlb_entry *segment_tlb_entry = 0;
871 sreg new_sr_value = 0;
873 /* check register in range */
874 ASSERT(which_sr >= 0 && which_sr < nr_om_segment_tlb_entries);
876 /* get the working values */
877 segment_tlb_entry = &virtual->segment_tlb.entry[which_sr];
878 new_sr_value = srs[which_sr];
880 /* do we support this */
881 if (MASKED32(new_sr_value, 0, 0))
882 cpu_error(processor, cia, "unsupported value of T in segment register %d",
883 which_sr);
885 /* update info */
886 segment_tlb_entry->key[om_supervisor_state] = EXTRACTED32(new_sr_value, 1, 1);
887 segment_tlb_entry->key[om_problem_state] = EXTRACTED32(new_sr_value, 2, 2);
888 segment_tlb_entry->invalid_access = (MASKED32(new_sr_value, 3, 3)
889 ? om_instruction_read
890 : om_access_any);
891 segment_tlb_entry->masked_virtual_segment_id =
892 INSERTED32(EXTRACTED32(new_sr_value, 8, 31),
893 31-6-24+1, 31-6); /* aligned ready for pte group addr */
895 #endif
898 #if (WITH_TARGET_WORD_BITSIZE == 32)
899 STATIC_INLINE_VM\
900 (void)
901 om_unpack_srs(vm *virtual,
902 sreg *srs,
903 cpu *processor,
904 unsigned_word cia)
906 int which_sr;
907 for (which_sr = 0; which_sr < nr_om_segment_tlb_entries; which_sr++) {
908 om_unpack_sr(virtual, srs, which_sr,
909 processor, cia);
912 #endif
915 /* Rebuild all the data structures for the new context as specified by
916 the passed registers */
917 INLINE_VM\
918 (void)
919 vm_synchronize_context(vm *virtual,
920 spreg *sprs,
921 sreg *srs,
922 msreg msr,
923 /**/
924 cpu *processor,
925 unsigned_word cia)
928 /* enable/disable translation */
929 int problem_state = (msr & msr_problem_state) != 0;
930 int data_relocate = (msr & msr_data_relocate) != 0;
931 int instruction_relocate = (msr & msr_instruction_relocate) != 0;
932 int little_endian = (msr & msr_little_endian_mode) != 0;
934 unsigned_word page_table_hash_mask;
935 unsigned_word real_address_of_page_table;
937 /* update current processor mode */
938 virtual->instruction_map.translation.is_relocate = instruction_relocate;
939 virtual->instruction_map.translation.is_problem_state = problem_state;
940 virtual->data_map.translation.is_relocate = data_relocate;
941 virtual->data_map.translation.is_problem_state = problem_state;
943 /* update bat registers for the new context */
944 om_unpack_bats(&virtual->ibats, &sprs[spr_ibat0u], msr);
945 om_unpack_bats(&virtual->dbats, &sprs[spr_dbat0u], msr);
947 /* unpack SDR1 - the storage description register 1 */
948 #if (WITH_TARGET_WORD_BITSIZE == 64)
949 real_address_of_page_table = MASKED64(sprs[spr_sdr1], 0, 45);
950 page_table_hash_mask = MASK64(18+28-EXTRACTED64(sprs[spr_sdr1], 59, 63),
951 63-7);
952 #endif
953 #if (WITH_TARGET_WORD_BITSIZE == 32)
954 real_address_of_page_table = MASKED32(sprs[spr_sdr1], 0, 15);
955 page_table_hash_mask = (INSERTED32(EXTRACTED32(sprs[spr_sdr1], 23, 31),
956 7, 7+9-1)
957 | MASK32(7+9, 31-6));
958 #endif
959 virtual->instruction_map.translation.real_address_of_page_table = real_address_of_page_table;
960 virtual->instruction_map.translation.page_table_hash_mask = page_table_hash_mask;
961 virtual->data_map.translation.real_address_of_page_table = real_address_of_page_table;
962 virtual->data_map.translation.page_table_hash_mask = page_table_hash_mask;
965 /* unpack the segment tlb registers */
966 #if (WITH_TARGET_WORD_BITSIZE == 32)
967 om_unpack_srs(virtual, srs,
968 processor, cia);
969 #endif
971 /* set up the XOR registers if the current endian mode conflicts
972 with what is in the MSR */
973 if (WITH_XOR_ENDIAN) {
974 int i = 1;
975 unsigned mask;
976 if ((little_endian && CURRENT_TARGET_BYTE_ORDER == LITTLE_ENDIAN)
977 || (!little_endian && CURRENT_TARGET_BYTE_ORDER == BIG_ENDIAN))
978 mask = 0;
979 else
980 mask = WITH_XOR_ENDIAN - 1;
981 while (i - 1 < WITH_XOR_ENDIAN) {
982 virtual->instruction_map.translation.xor[i-1] = mask;
983 virtual->data_map.translation.xor[i-1] = mask;
984 mask = (mask << 1) & (WITH_XOR_ENDIAN - 1);
985 i = i * 2;
988 else {
989 /* don't allow the processor to change endian modes */
990 if ((little_endian && CURRENT_TARGET_BYTE_ORDER != LITTLE_ENDIAN)
991 || (!little_endian && CURRENT_TARGET_BYTE_ORDER != BIG_ENDIAN))
992 cpu_error(processor, cia, "attempt to change hardwired byte order");
996 /* update vm data structures due to a TLB operation */
998 INLINE_VM\
999 (void)
1000 vm_page_tlb_invalidate_entry(vm *memory,
1001 unsigned_word ea)
1003 int i = om_page_tlb_index(ea);
1004 memory->instruction_tlb.entry[i].masked_virtual_segment_id = MASK(0, 63);
1005 memory->data_tlb.entry[i].masked_virtual_segment_id = MASK(0, 63);
1006 TRACE(trace_vm, ("ea=0x%lx - tlb invalidate entry\n", (long)ea));
1009 INLINE_VM\
1010 (void)
1011 vm_page_tlb_invalidate_all(vm *memory)
1013 int i;
1014 for (i = 0; i < nr_om_page_tlb_entries; i++) {
1015 memory->instruction_tlb.entry[i].masked_virtual_segment_id = MASK(0, 63);
1016 memory->data_tlb.entry[i].masked_virtual_segment_id = MASK(0, 63);
1018 TRACE(trace_vm, ("tlb invalidate all\n"));
1023 INLINE_VM\
1024 (vm_data_map *)
1025 vm_create_data_map(vm *memory)
1027 return &memory->data_map;
1031 INLINE_VM\
1032 (vm_instruction_map *)
1033 vm_create_instruction_map(vm *memory)
1035 return &memory->instruction_map;
1039 STATIC_INLINE_VM\
1040 (unsigned_word)
1041 vm_translate(om_map *map,
1042 unsigned_word ea,
1043 om_access_types access,
1044 cpu *processor,
1045 unsigned_word cia,
1046 int abort)
1048 switch (CURRENT_ENVIRONMENT) {
1049 case USER_ENVIRONMENT:
1050 case VIRTUAL_ENVIRONMENT:
1051 return ea;
1052 case OPERATING_ENVIRONMENT:
1053 return om_translate_effective_to_real(map, ea, access,
1054 processor, cia,
1055 abort);
1056 default:
1057 error("internal error - vm_translate - bad switch");
1058 return 0;
1063 INLINE_VM\
1064 (unsigned_word)
1065 vm_real_data_addr(vm_data_map *map,
1066 unsigned_word ea,
1067 int is_read,
1068 cpu *processor,
1069 unsigned_word cia)
1071 return vm_translate(&map->translation,
1073 is_read ? om_data_read : om_data_write,
1074 processor,
1075 cia,
1076 1); /*abort*/
1080 INLINE_VM\
1081 (unsigned_word)
1082 vm_real_instruction_addr(vm_instruction_map *map,
1083 cpu *processor,
1084 unsigned_word cia)
1086 return vm_translate(&map->translation,
1087 cia,
1088 om_instruction_read,
1089 processor,
1090 cia,
1091 1); /*abort*/
1094 INLINE_VM\
1095 (instruction_word)
1096 vm_instruction_map_read(vm_instruction_map *map,
1097 cpu *processor,
1098 unsigned_word cia)
1100 unsigned_word ra = vm_real_instruction_addr(map, processor, cia);
1101 ASSERT((cia & 0x3) == 0); /* always aligned */
1102 if (WITH_XOR_ENDIAN)
1103 ra ^= map->translation.xor[sizeof(instruction_word) - 1];
1104 return core_map_read_4(map->code, ra, processor, cia);
1108 INLINE_VM\
1109 (int)
1110 vm_data_map_read_buffer(vm_data_map *map,
1111 void *target,
1112 unsigned_word addr,
1113 unsigned nr_bytes,
1114 cpu *processor,
1115 unsigned_word cia)
1117 unsigned count;
1118 for (count = 0; count < nr_bytes; count++) {
1119 unsigned_1 byte;
1120 unsigned_word ea = addr + count;
1121 unsigned_word ra = vm_translate(&map->translation,
1122 ea, om_data_read,
1123 processor, /*processor*/
1124 cia, /*cia*/
1125 processor != NULL); /*abort?*/
1126 if (ra == MASK(0, 63))
1127 break;
1128 if (WITH_XOR_ENDIAN)
1129 ra ^= map->translation.xor[0];
1130 if (core_map_read_buffer(map->read, &byte, ra, sizeof(byte))
1131 != sizeof(byte))
1132 break;
1133 ((unsigned_1*)target)[count] = T2H_1(byte);
1135 return count;
1139 INLINE_VM\
1140 (int)
1141 vm_data_map_write_buffer(vm_data_map *map,
1142 const void *source,
1143 unsigned_word addr,
1144 unsigned nr_bytes,
1145 int violate_read_only_section,
1146 cpu *processor,
1147 unsigned_word cia)
1149 unsigned count;
1150 unsigned_1 byte;
1151 for (count = 0; count < nr_bytes; count++) {
1152 unsigned_word ea = addr + count;
1153 unsigned_word ra = vm_translate(&map->translation,
1154 ea, om_data_write,
1155 processor,
1156 cia,
1157 processor != NULL); /*abort?*/
1158 if (ra == MASK(0, 63))
1159 break;
1160 if (WITH_XOR_ENDIAN)
1161 ra ^= map->translation.xor[0];
1162 byte = T2H_1(((unsigned_1*)source)[count]);
1163 if (core_map_write_buffer((violate_read_only_section
1164 ? map->read
1165 : map->write),
1166 &byte, ra, sizeof(byte)) != sizeof(byte))
1167 break;
1169 return count;
1173 /* define the read/write 1/2/4/8/word functions */
1175 #define N 1
1176 #include "vm_n.h"
1177 #undef N
1179 #define N 2
1180 #include "vm_n.h"
1181 #undef N
1183 #define N 4
1184 #include "vm_n.h"
1185 #undef N
1187 #define N 8
1188 #include "vm_n.h"
1189 #undef N
1191 #define N word
1192 #include "vm_n.h"
1193 #undef N
1197 #endif /* _VM_C_ */