2 * Copyright 2010 Tilera Corporation. All Rights Reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation, version 2.
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
11 * NON INFRINGEMENT. See the GNU General Public License for
14 * based on machine_kexec.c from other architectures in linux-2.6.18
18 #include <linux/kexec.h>
19 #include <linux/delay.h>
20 #include <linux/reboot.h>
21 #include <linux/errno.h>
22 #include <linux/vmalloc.h>
23 #include <linux/cpumask.h>
24 #include <linux/kernel.h>
25 #include <linux/elf.h>
26 #include <linux/highmem.h>
27 #include <linux/mmu_context.h>
29 #include <linux/timex.h>
30 #include <asm/pgtable.h>
31 #include <asm/pgalloc.h>
32 #include <asm/cacheflush.h>
33 #include <asm/checksum.h>
34 #include <asm/tlbflush.h>
35 #include <asm/homecache.h>
36 #include <hv/hypervisor.h>
40 * This stuff is not in elf.h and is not in any other kernel include.
41 * This stuff is needed below in the little boot notes parser to
42 * extract the command line so we can pass it to the hypervisor.
45 Elf32_Word b_signature
;
47 Elf32_Half b_checksum
;
50 #define ELF_BOOT_MAGIC 0x0E1FB007
51 #define EBN_COMMAND_LINE 0x00000004
52 #define roundupsz(X) (((X) + 3) & ~3)
54 /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
57 void machine_shutdown(void)
60 * Normally we would stop all the other processors here, but
61 * the check in machine_kexec_prepare below ensures we'll only
62 * get this far if we've been booted with "nosmp" on the
63 * command line or without CONFIG_SMP so there's nothing to do
68 void machine_crash_shutdown(struct pt_regs
*regs
)
71 * Cannot happen. This type of kexec is disabled on this
72 * architecture (and enforced in machine_kexec_prepare below).
77 int machine_kexec_prepare(struct kimage
*image
)
79 if (num_online_cpus() > 1) {
80 pr_warning("%s: detected attempt to kexec "
81 "with num_online_cpus() > 1\n",
85 if (image
->type
!= KEXEC_TYPE_DEFAULT
) {
86 pr_warning("%s: detected attempt to kexec "
87 "with unsupported type: %d\n",
95 void machine_kexec_cleanup(struct kimage
*image
)
98 * We did nothing in machine_kexec_prepare,
99 * so we have nothing to do here.
104 * If we can find elf boot notes on this page, return the command
105 * line. Otherwise, silently return null. Somewhat kludgy, but no
106 * good way to do this without significantly rearchitecting the
107 * architecture-independent kexec code.
110 static unsigned char *kexec_bn2cl(void *pg
)
112 struct Elf32_Bhdr
*bhdrp
;
115 unsigned char *command_line
;
118 bhdrp
= (struct Elf32_Bhdr
*) pg
;
121 * This routine is invoked for every source page, so make
122 * sure to quietly ignore every impossible page.
124 if (bhdrp
->b_signature
!= ELF_BOOT_MAGIC
||
125 bhdrp
->b_size
> PAGE_SIZE
)
129 * If we get a checksum mismatch, warn with the checksum
130 * so we can diagnose better.
132 csum
= ip_compute_csum(pg
, bhdrp
->b_size
);
134 pr_warning("%s: bad checksum %#x (size %d)\n",
135 __func__
, csum
, bhdrp
->b_size
);
139 nhdrp
= (Elf32_Nhdr
*) (bhdrp
+ 1);
141 while (nhdrp
->n_type
!= EBN_COMMAND_LINE
) {
143 desc
= (unsigned char *) (nhdrp
+ 1);
144 desc
+= roundupsz(nhdrp
->n_descsz
);
146 nhdrp
= (Elf32_Nhdr
*) desc
;
148 /* still in bounds? */
149 if ((unsigned char *) (nhdrp
+ 1) >
150 ((unsigned char *) pg
) + bhdrp
->b_size
) {
152 pr_info("%s: out of bounds\n", __func__
);
157 command_line
= (unsigned char *) (nhdrp
+ 1);
160 while (*desc
!= '\0') {
162 if (((unsigned long)desc
& PAGE_MASK
) != (unsigned long)pg
) {
163 pr_info("%s: ran off end of page\n",
172 static void kexec_find_and_set_command_line(struct kimage
*image
)
174 kimage_entry_t
*ptr
, entry
;
176 unsigned char *command_line
= 0;
180 for (ptr
= &image
->head
;
181 (entry
= *ptr
) && !(entry
& IND_DONE
);
182 ptr
= (entry
& IND_INDIRECTION
) ?
183 phys_to_virt((entry
& PAGE_MASK
)) : ptr
+ 1) {
185 if ((entry
& IND_SOURCE
)) {
187 kmap_atomic_pfn(entry
>> PAGE_SHIFT
);
197 if (command_line
!= 0) {
198 pr_info("setting new command line to \"%s\"\n",
201 hverr
= hv_set_command_line(
202 (HV_VirtAddr
) command_line
, strlen(command_line
));
203 kunmap_atomic(command_line
);
205 pr_info("%s: no command line found; making empty\n",
207 hverr
= hv_set_command_line((HV_VirtAddr
) command_line
, 0);
210 pr_warning("%s: hv_set_command_line returned error: %d\n",
215 * The kexec code range-checks all its PAs, so to avoid having it run
216 * amok and allocate memory and then sequester it from every other
217 * controller, we force it to come from controller zero. We also
218 * disable the oom-killer since if we do end up running out of memory,
219 * that almost certainly won't help.
221 struct page
*kimage_alloc_pages_arch(gfp_t gfp_mask
, unsigned int order
)
223 gfp_mask
|= __GFP_THISNODE
| __GFP_NORETRY
;
224 return alloc_pages_node(0, gfp_mask
, order
);
228 * Address range in which pa=va mapping is set in setup_quasi_va_is_pa().
229 * For tilepro, PAGE_OFFSET is used since this is the largest possbile value
230 * for tilepro, while for tilegx, we limit it to entire middle level page
231 * table which we assume has been allocated and is undoubtedly large enough.
234 #define QUASI_VA_IS_PA_ADDR_RANGE PAGE_OFFSET
236 #define QUASI_VA_IS_PA_ADDR_RANGE PGDIR_SIZE
239 static void setup_quasi_va_is_pa(void)
245 * Flush our TLB to prevent conflicts between the previous contents
246 * and the new stuff we're about to add.
248 local_flush_tlb_all();
251 * setup VA is PA, at least up to QUASI_VA_IS_PA_ADDR_RANGE.
252 * Note here we assume that level-1 page table is defined by
255 pte
= hv_pte(_PAGE_KERNEL
| _PAGE_HUGE_PAGE
);
256 pte
= hv_pte_set_mode(pte
, HV_PTE_MODE_CACHE_NO_L3
);
257 for (i
= 0; i
< (QUASI_VA_IS_PA_ADDR_RANGE
>> HPAGE_SHIFT
); i
++) {
258 unsigned long vaddr
= i
<< HPAGE_SHIFT
;
259 pgd_t
*pgd
= pgd_offset(current
->mm
, vaddr
);
260 pud_t
*pud
= pud_offset(pgd
, vaddr
);
261 pte_t
*ptep
= (pte_t
*) pmd_offset(pud
, vaddr
);
262 unsigned long pfn
= i
<< (HPAGE_SHIFT
- PAGE_SHIFT
);
265 __set_pte(ptep
, pfn_pte(pfn
, pte
));
270 void machine_kexec(struct kimage
*image
)
272 void *reboot_code_buffer
;
274 void (*rnk
)(unsigned long, void *, unsigned long)
277 /* Mask all interrupts before starting to reboot. */
278 interrupt_mask_set_mask(~0ULL);
280 kexec_find_and_set_command_line(image
);
283 * Adjust the home caching of the control page to be cached on
284 * this cpu, and copy the assembly helper into the control
285 * code page, which we map in the vmalloc area.
287 homecache_change_page_home(image
->control_code_page
, 0,
289 reboot_code_buffer
= page_address(image
->control_code_page
);
290 BUG_ON(reboot_code_buffer
== NULL
);
291 ptep
= virt_to_pte(NULL
, (unsigned long)reboot_code_buffer
);
292 __set_pte(ptep
, pte_mkexec(*ptep
));
293 memcpy(reboot_code_buffer
, relocate_new_kernel
,
294 relocate_new_kernel_size
);
295 __flush_icache_range(
296 (unsigned long) reboot_code_buffer
,
297 (unsigned long) reboot_code_buffer
+ relocate_new_kernel_size
);
299 setup_quasi_va_is_pa();
302 rnk
= reboot_code_buffer
;
303 (*rnk
)(image
->head
, reboot_code_buffer
, image
->start
);