2 * Copyright 2003-2005, Axel Dörfler, axeld@pinc-software.de.
3 * Distributed under the terms of the MIT License.
5 * Copyright 2001, Travis Geiselbrecht. All rights reserved.
6 * Distributed under the terms of the NewOS License.
9 #include <KernelExport.h>
12 #include <boot/kernel_args.h>
15 #include <vm/VMAddressSpace.h>
20 //#define TRACE_ARCH_VM
22 # define TRACE(x) dprintf x
29 arch_vm_init(kernel_args
*args
)
36 arch_vm_init2(kernel_args
*args
)
42 // print out any bat mappings
45 for(i
= 0; i
< 4; i
++)
46 dprintf("0x%x 0x%x\n", bats
[i
*2], bats
[i
*2+1]);
49 for(i
= 0; i
< 4; i
++)
50 dprintf("0x%x 0x%x\n", bats
[i
*2], bats
[i
*2+1]);
54 // turn off the first 2 BAT mappings (3 & 4 are used by the lower level code)
55 block_address_translation bat
;
63 memset(bats, 0, 2 * 2);
66 memset(bats, 0, 2 * 2);
71 // just clear the first BAT mapping (0 - 256MB)
72 dprintf("msr 0x%x\n", getmsr());
75 asm("mr %0,1" : "=r"(reg
));
76 dprintf("sp 0x%x\n", reg
);
78 dprintf("ka %p\n", ka
);
82 for(i
= 0; i
< 4; i
++)
83 dprintf("0x%x 0x%x\n", bats
[i
*2], bats
[i
*2+1]);
84 bats
[0] = bats
[1] = 0;
88 for(i
= 0; i
< 4; i
++)
89 dprintf("0x%x 0x%x\n", bats
[i
*2], bats
[i
*2+1]);
90 bats
[0] = bats
[1] = 0;
98 arch_vm_init_post_area(kernel_args
*args
)
105 arch_vm_init_end(kernel_args
*args
)
107 TRACE(("arch_vm_init_end(): %lu virtual ranges to keep:\n",
108 args
->arch_args
.num_virtual_ranges_to_keep
));
110 for (int i
= 0; i
< (int)args
->arch_args
.num_virtual_ranges_to_keep
; i
++) {
111 addr_range
&range
= args
->arch_args
.virtual_ranges_to_keep
[i
];
113 TRACE((" start: %p, size: 0x%lx\n", (void*)range
.start
, range
.size
));
115 // skip ranges outside the kernel address space
116 if (!IS_KERNEL_ADDRESS(range
.start
)) {
117 TRACE((" no kernel address, skipping...\n"));
121 phys_addr_t physicalAddress
;
122 void *address
= (void*)range
.start
;
123 if (vm_get_page_mapping(VMAddressSpace::KernelID(), range
.start
,
124 &physicalAddress
) != B_OK
)
125 panic("arch_vm_init_end(): No page mapping for %p\n", address
);
126 area_id area
= vm_map_physical_memory(VMAddressSpace::KernelID(),
127 "boot loader reserved area", &address
,
128 B_EXACT_ADDRESS
, range
.size
,
129 B_KERNEL_READ_AREA
| B_KERNEL_WRITE_AREA
,
130 physicalAddress
, true);
132 panic("arch_vm_init_end(): Failed to create area for boot loader "
133 "reserved area: %p - %p\n", (void*)range
.start
,
134 (void*)(range
.start
+ range
.size
));
138 // Throw away any address space mappings we've inherited from the boot
139 // loader and have not yet turned into an area.
140 vm_free_unused_boot_loader_range(0, 0xffffffff - B_PAGE_SIZE
+ 1);
147 arch_vm_init_post_modules(kernel_args
*args
)
154 arch_vm_aspace_swap(struct VMAddressSpace
*from
, struct VMAddressSpace
*to
)
160 arch_vm_supports_protection(uint32 protection
)
167 arch_vm_unset_memory_type(VMArea
*area
)
173 arch_vm_set_memory_type(VMArea
*area
, phys_addr_t physicalBase
, uint32 type
)