2 * Copyright 2003-2005, Axel Dörfler, axeld@pinc-software.de.
3 * Distributed under the terms of the MIT License.
5 * Copyright 2001, Travis Geiselbrecht. All rights reserved.
6 * Distributed under the terms of the NewOS License.
10 #include <KernelExport.h>
12 #include <arch_platform.h>
14 #include <arch/thread.h>
15 #include <boot/kernel_args.h>
17 static bool sHasTlbia
;
20 arch_cpu_preboot_init_percpu(kernel_args
*args
, int curr_cpu
)
23 set_msr(get_msr() | MSR_FP_AVAILABLE
);
25 // The current thread must be NULL for all CPUs till we have threads.
26 // Some boot code relies on this.
27 arch_thread_set_current_thread(NULL
);
34 arch_cpu_init(kernel_args
*args
)
36 // TODO: Let the boot loader put that info into the kernel args
37 // (property "tlbia" in the CPU node).
45 arch_cpu_init_post_vm(kernel_args
*args
)
51 arch_cpu_init_percpu(kernel_args
*args
, int curr_cpu
)
53 //detect_cpu(curr_cpu);
55 // we only support one on ppc anyway at the moment...
61 arch_cpu_init_post_modules(kernel_args
*args
)
69 arch_cpu_sync_icache(void *address
, size_t len
)
74 off
= (unsigned int)address
& (CACHELINE
- 1);
78 p
= (char *)address
- off
;
80 asm volatile ("dcbst 0,%0" :: "r"(p
));
82 } while ((l
-= CACHELINE
) > 0);
83 asm volatile ("sync");
85 p
= (char *)address
- off
;
87 asm volatile ("icbi 0,%0" :: "r"(p
));
89 } while ((len
-= CACHELINE
) > 0);
90 asm volatile ("sync");
96 arch_cpu_memory_read_barrier(void)
98 // WARNING PPC: is it model-dependant ?
99 asm volatile ("lwsync");
104 arch_cpu_memory_write_barrier(void)
106 // WARNING PPC: is it model-dependant ?
107 asm volatile ("isync");
108 asm volatile ("eieio");
113 arch_cpu_invalidate_TLB_range(addr_t start
, addr_t end
)
115 asm volatile("sync");
116 while (start
< end
) {
117 asm volatile("tlbie %0" :: "r" (start
));
118 asm volatile("eieio");
119 asm volatile("sync");
120 start
+= B_PAGE_SIZE
;
122 asm volatile("tlbsync");
123 asm volatile("sync");
128 arch_cpu_invalidate_TLB_list(addr_t pages
[], int num_pages
)
132 asm volatile("sync");
133 for (i
= 0; i
< num_pages
; i
++) {
134 asm volatile("tlbie %0" :: "r" (pages
[i
]));
135 asm volatile("eieio");
136 asm volatile("sync");
138 asm volatile("tlbsync");
139 asm volatile("sync");
144 arch_cpu_global_TLB_invalidate(void)
155 for (i
= 0; i
< 0x100000; i
++) {
160 address
+= B_PAGE_SIZE
;
169 arch_cpu_user_TLB_invalidate(void)
171 arch_cpu_global_TLB_invalidate();
175 // TODO: all functions that use fault handlers need to be implemented
176 // in assembly due to problems passing in label addresses in gcc4.
179 arch_cpu_user_memcpy(void *to
, const void *from
, size_t size
,
180 addr_t
*faultHandler
)
182 char *tmp
= (char *)to
;
183 char *s
= (char *)from
;
184 addr_t oldFaultHandler
= *faultHandler
;
186 // TODO: This doesn't work correctly with gcc 4 anymore!
187 if (ppc_set_fault_handler(faultHandler
, (addr_t
)&&error
))
193 *faultHandler
= oldFaultHandler
;
197 *faultHandler
= oldFaultHandler
;
198 return B_BAD_ADDRESS
;
202 /** \brief Copies at most (\a size - 1) characters from the string in \a from to
203 * the string in \a to, NULL-terminating the result.
205 * \param to Pointer to the destination C-string.
206 * \param from Pointer to the source C-string.
207 * \param size Size in bytes of the string buffer pointed to by \a to.
209 * \return strlen(\a from).
213 arch_cpu_user_strlcpy(char *to
, const char *from
, size_t size
, addr_t
*faultHandler
)
216 addr_t oldFaultHandler
= *faultHandler
;
218 // TODO: This doesn't work correctly with gcc 4 anymore!
219 if (ppc_set_fault_handler(faultHandler
, (addr_t
)&&error
))
225 for ( ; size
; size
--, from_length
++, to
++, from
++) {
226 if ((*to
= *from
) == '\0')
230 // count any leftover from chars
231 while (*from
++ != '\0')
234 *faultHandler
= oldFaultHandler
;
238 *faultHandler
= oldFaultHandler
;
239 return B_BAD_ADDRESS
;
244 arch_cpu_user_memset(void *s
, char c
, size_t count
, addr_t
*faultHandler
)
246 char *xs
= (char *)s
;
247 addr_t oldFaultHandler
= *faultHandler
;
249 // TODO: This doesn't work correctly with gcc 4 anymore!
250 if (ppc_set_fault_handler(faultHandler
, (addr_t
)&&error
))
256 *faultHandler
= oldFaultHandler
;
260 *faultHandler
= oldFaultHandler
;
261 return B_BAD_ADDRESS
;
266 arch_cpu_shutdown(bool reboot
)
268 PPCPlatform::Default()->ShutDown(reboot
);
273 // The purpose of this function is to trick the compiler. When setting the
274 // page_handler to a label that is obviously (to the compiler) never used,
275 // it may reorganize the control flow, so that the labeled part is optimized
277 // By invoking the function like this
279 // if (ppc_set_fault_handler(faultHandler, (addr_t)&&error))
282 // the compiler has to keep the labeled code, since it can't guess the return
283 // value of this (non-inlinable) function. At least in my tests it worked that
284 // way, and I hope it will continue to work like this in the future.
287 ppc_set_fault_handler(addr_t
*handlerLocation
, addr_t handler
)
289 // TODO: This doesn't work correctly with gcc 4 anymore!
290 *handlerLocation
= handler
;