headers/bsd: Add sys/queue.h.
[haiku.git] / src / system / kernel / arch / ppc / arch_vm.cpp
blob39296034a55ce4b262bf1f859af770bbc318dcca
1 /*
2 * Copyright 2003-2005, Axel Dörfler, axeld@pinc-software.de.
3 * Distributed under the terms of the MIT License.
5 * Copyright 2001, Travis Geiselbrecht. All rights reserved.
6 * Distributed under the terms of the NewOS License.
7 */
9 #include <KernelExport.h>
11 #include <kernel.h>
12 #include <boot/kernel_args.h>
14 #include <vm/vm.h>
15 #include <vm/VMAddressSpace.h>
16 #include <arch/vm.h>
17 #include <arch_mmu.h>
20 //#define TRACE_ARCH_VM
21 #ifdef TRACE_ARCH_VM
22 # define TRACE(x) dprintf x
23 #else
24 # define TRACE(x) ;
25 #endif
28 status_t
29 arch_vm_init(kernel_args *args)
31 return B_OK;
35 status_t
36 arch_vm_init2(kernel_args *args)
38 // int bats[8];
39 // int i;
41 #if 0
42 // print out any bat mappings
43 getibats(bats);
44 dprintf("ibats:\n");
45 for(i = 0; i < 4; i++)
46 dprintf("0x%x 0x%x\n", bats[i*2], bats[i*2+1]);
47 getdbats(bats);
48 dprintf("dbats:\n");
49 for(i = 0; i < 4; i++)
50 dprintf("0x%x 0x%x\n", bats[i*2], bats[i*2+1]);
51 #endif
53 #if 1
54 // turn off the first 2 BAT mappings (3 & 4 are used by the lower level code)
55 block_address_translation bat;
56 bat.Clear();
58 set_ibat0(&bat);
59 set_ibat1(&bat);
60 set_dbat0(&bat);
61 set_dbat1(&bat);
62 /* getibats(bats);
63 memset(bats, 0, 2 * 2);
64 setibats(bats);
65 getdbats(bats);
66 memset(bats, 0, 2 * 2);
67 setdbats(bats);
69 #endif
70 #if 0
71 // just clear the first BAT mapping (0 - 256MB)
72 dprintf("msr 0x%x\n", getmsr());
74 unsigned int reg;
75 asm("mr %0,1" : "=r"(reg));
76 dprintf("sp 0x%x\n", reg);
78 dprintf("ka %p\n", ka);
80 getibats(bats);
81 dprintf("ibats:\n");
82 for(i = 0; i < 4; i++)
83 dprintf("0x%x 0x%x\n", bats[i*2], bats[i*2+1]);
84 bats[0] = bats[1] = 0;
85 setibats(bats);
86 getdbats(bats);
87 dprintf("dbats:\n");
88 for(i = 0; i < 4; i++)
89 dprintf("0x%x 0x%x\n", bats[i*2], bats[i*2+1]);
90 bats[0] = bats[1] = 0;
91 setdbats(bats);
92 #endif
93 return B_OK;
97 status_t
98 arch_vm_init_post_area(kernel_args *args)
100 return B_OK;
104 status_t
105 arch_vm_init_end(kernel_args *args)
107 TRACE(("arch_vm_init_end(): %lu virtual ranges to keep:\n",
108 args->arch_args.num_virtual_ranges_to_keep));
110 for (int i = 0; i < (int)args->arch_args.num_virtual_ranges_to_keep; i++) {
111 addr_range &range = args->arch_args.virtual_ranges_to_keep[i];
113 TRACE((" start: %p, size: 0x%lx\n", (void*)range.start, range.size));
115 // skip ranges outside the kernel address space
116 if (!IS_KERNEL_ADDRESS(range.start)) {
117 TRACE((" no kernel address, skipping...\n"));
118 continue;
121 phys_addr_t physicalAddress;
122 void *address = (void*)range.start;
123 if (vm_get_page_mapping(VMAddressSpace::KernelID(), range.start,
124 &physicalAddress) != B_OK)
125 panic("arch_vm_init_end(): No page mapping for %p\n", address);
126 area_id area = vm_map_physical_memory(VMAddressSpace::KernelID(),
127 "boot loader reserved area", &address,
128 B_EXACT_ADDRESS, range.size,
129 B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA,
130 physicalAddress, true);
131 if (area < 0) {
132 panic("arch_vm_init_end(): Failed to create area for boot loader "
133 "reserved area: %p - %p\n", (void*)range.start,
134 (void*)(range.start + range.size));
138 // Throw away any address space mappings we've inherited from the boot
139 // loader and have not yet turned into an area.
140 vm_free_unused_boot_loader_range(0, 0xffffffff - B_PAGE_SIZE + 1);
142 return B_OK;
146 status_t
147 arch_vm_init_post_modules(kernel_args *args)
149 return B_OK;
153 void
154 arch_vm_aspace_swap(struct VMAddressSpace *from, struct VMAddressSpace *to)
159 bool
160 arch_vm_supports_protection(uint32 protection)
162 return true;
166 void
167 arch_vm_unset_memory_type(VMArea *area)
172 status_t
173 arch_vm_set_memory_type(VMArea *area, phys_addr_t physicalBase, uint32 type)
175 if (type == 0)
176 return B_OK;
178 return B_OK;