2 * kexec.c - kexec system call
3 * Copyright (C) 2002-2004 Eric Biederman <ebiederm@xmission.com>
5 * This source code is licensed under the GNU General Public License,
6 * Version 2. See the file COPYING for more details.
9 #include <linux/capability.h>
11 #include <linux/file.h>
12 #include <linux/slab.h>
14 #include <linux/kexec.h>
15 #include <linux/mutex.h>
16 #include <linux/list.h>
17 #include <linux/highmem.h>
18 #include <linux/syscalls.h>
19 #include <linux/reboot.h>
20 #include <linux/ioport.h>
21 #include <linux/hardirq.h>
22 #include <linux/elf.h>
23 #include <linux/elfcore.h>
24 #include <linux/utsname.h>
25 #include <linux/numa.h>
26 #include <linux/suspend.h>
27 #include <linux/device.h>
28 #include <linux/freezer.h>
30 #include <linux/cpu.h>
31 #include <linux/console.h>
32 #include <linux/vmalloc.h>
33 #include <linux/swap.h>
34 #include <linux/syscore_ops.h>
35 #include <linux/compiler.h>
38 #include <asm/uaccess.h>
40 #include <asm/sections.h>
42 /* Per cpu memory for storing cpu states in case of system crash. */
43 note_buf_t __percpu
*crash_notes
;
45 /* vmcoreinfo stuff */
46 static unsigned char vmcoreinfo_data
[VMCOREINFO_BYTES
];
47 u32 vmcoreinfo_note
[VMCOREINFO_NOTE_SIZE
/4];
48 size_t vmcoreinfo_size
;
49 size_t vmcoreinfo_max_size
= sizeof(vmcoreinfo_data
);
51 /* Flag to indicate we are going to kexec a new kernel */
52 bool kexec_in_progress
= false;
54 /* Location of the reserved area for the crash kernel */
55 struct resource crashk_res
= {
56 .name
= "Crash kernel",
59 .flags
= IORESOURCE_BUSY
| IORESOURCE_MEM
61 struct resource crashk_low_res
= {
62 .name
= "Crash kernel",
65 .flags
= IORESOURCE_BUSY
| IORESOURCE_MEM
68 int kexec_should_crash(struct task_struct
*p
)
70 if (in_interrupt() || !p
->pid
|| is_global_init(p
) || panic_on_oops
)
76 * When kexec transitions to the new kernel there is a one-to-one
77 * mapping between physical and virtual addresses. On processors
78 * where you can disable the MMU this is trivial, and easy. For
79 * others it is still a simple predictable page table to setup.
81 * In that environment kexec copies the new kernel to its final
82 * resting place. This means I can only support memory whose
83 * physical address can fit in an unsigned long. In particular
84 * addresses where (pfn << PAGE_SHIFT) > ULONG_MAX cannot be handled.
85 * If the assembly stub has more restrictive requirements
86 * KEXEC_SOURCE_MEMORY_LIMIT and KEXEC_DEST_MEMORY_LIMIT can be
87 * defined more restrictively in <asm/kexec.h>.
89 * The code for the transition from the current kernel to the
90 * the new kernel is placed in the control_code_buffer, whose size
91 * is given by KEXEC_CONTROL_PAGE_SIZE. In the best case only a single
92 * page of memory is necessary, but some architectures require more.
93 * Because this memory must be identity mapped in the transition from
94 * virtual to physical addresses it must live in the range
95 * 0 - TASK_SIZE, as only the user space mappings are arbitrarily
98 * The assembly stub in the control code buffer is passed a linked list
99 * of descriptor pages detailing the source pages of the new kernel,
100 * and the destination addresses of those source pages. As this data
101 * structure is not used in the context of the current OS, it must
104 * The code has been made to work with highmem pages and will use a
105 * destination page in its final resting place (if it happens
106 * to allocate it). The end product of this is that most of the
107 * physical address space, and most of RAM can be used.
109 * Future directions include:
110 * - allocating a page table with the control code buffer identity
111 * mapped, to simplify machine_kexec and make kexec_on_panic more
116 * KIMAGE_NO_DEST is an impossible destination address..., for
117 * allocating pages whose destination address we do not care about.
119 #define KIMAGE_NO_DEST (-1UL)
121 static int kimage_is_destination_range(struct kimage
*image
,
122 unsigned long start
, unsigned long end
);
123 static struct page
*kimage_alloc_page(struct kimage
*image
,
127 static int do_kimage_alloc(struct kimage
**rimage
, unsigned long entry
,
128 unsigned long nr_segments
,
129 struct kexec_segment __user
*segments
)
131 size_t segment_bytes
;
132 struct kimage
*image
;
136 /* Allocate a controlling structure */
138 image
= kzalloc(sizeof(*image
), GFP_KERNEL
);
143 image
->entry
= &image
->head
;
144 image
->last_entry
= &image
->head
;
145 image
->control_page
= ~0; /* By default this does not apply */
146 image
->start
= entry
;
147 image
->type
= KEXEC_TYPE_DEFAULT
;
149 /* Initialize the list of control pages */
150 INIT_LIST_HEAD(&image
->control_pages
);
152 /* Initialize the list of destination pages */
153 INIT_LIST_HEAD(&image
->dest_pages
);
155 /* Initialize the list of unusable pages */
156 INIT_LIST_HEAD(&image
->unuseable_pages
);
158 /* Read in the segments */
159 image
->nr_segments
= nr_segments
;
160 segment_bytes
= nr_segments
* sizeof(*segments
);
161 result
= copy_from_user(image
->segment
, segments
, segment_bytes
);
168 * Verify we have good destination addresses. The caller is
169 * responsible for making certain we don't attempt to load
170 * the new image into invalid or reserved areas of RAM. This
171 * just verifies it is an address we can use.
173 * Since the kernel does everything in page size chunks ensure
174 * the destination addresses are page aligned. Too many
175 * special cases crop of when we don't do this. The most
176 * insidious is getting overlapping destination addresses
177 * simply because addresses are changed to page size
180 result
= -EADDRNOTAVAIL
;
181 for (i
= 0; i
< nr_segments
; i
++) {
182 unsigned long mstart
, mend
;
184 mstart
= image
->segment
[i
].mem
;
185 mend
= mstart
+ image
->segment
[i
].memsz
;
186 if ((mstart
& ~PAGE_MASK
) || (mend
& ~PAGE_MASK
))
188 if (mend
>= KEXEC_DESTINATION_MEMORY_LIMIT
)
192 /* Verify our destination addresses do not overlap.
193 * If we alloed overlapping destination addresses
194 * through very weird things can happen with no
195 * easy explanation as one segment stops on another.
198 for (i
= 0; i
< nr_segments
; i
++) {
199 unsigned long mstart
, mend
;
202 mstart
= image
->segment
[i
].mem
;
203 mend
= mstart
+ image
->segment
[i
].memsz
;
204 for (j
= 0; j
< i
; j
++) {
205 unsigned long pstart
, pend
;
206 pstart
= image
->segment
[j
].mem
;
207 pend
= pstart
+ image
->segment
[j
].memsz
;
208 /* Do the segments overlap ? */
209 if ((mend
> pstart
) && (mstart
< pend
))
214 /* Ensure our buffer sizes are strictly less than
215 * our memory sizes. This should always be the case,
216 * and it is easier to check up front than to be surprised
220 for (i
= 0; i
< nr_segments
; i
++) {
221 if (image
->segment
[i
].bufsz
> image
->segment
[i
].memsz
)
236 static void kimage_free_page_list(struct list_head
*list
);
238 static int kimage_normal_alloc(struct kimage
**rimage
, unsigned long entry
,
239 unsigned long nr_segments
,
240 struct kexec_segment __user
*segments
)
243 struct kimage
*image
;
245 /* Allocate and initialize a controlling structure */
247 result
= do_kimage_alloc(&image
, entry
, nr_segments
, segments
);
252 * Find a location for the control code buffer, and add it
253 * the vector of segments so that it's pages will also be
254 * counted as destination pages.
257 image
->control_code_page
= kimage_alloc_control_pages(image
,
258 get_order(KEXEC_CONTROL_PAGE_SIZE
));
259 if (!image
->control_code_page
) {
260 pr_err("Could not allocate control_code_buffer\n");
264 image
->swap_page
= kimage_alloc_control_pages(image
, 0);
265 if (!image
->swap_page
) {
266 pr_err("Could not allocate swap buffer\n");
274 kimage_free_page_list(&image
->control_pages
);
280 static int kimage_crash_alloc(struct kimage
**rimage
, unsigned long entry
,
281 unsigned long nr_segments
,
282 struct kexec_segment __user
*segments
)
285 struct kimage
*image
;
289 /* Verify we have a valid entry point */
290 if ((entry
< crashk_res
.start
) || (entry
> crashk_res
.end
)) {
291 result
= -EADDRNOTAVAIL
;
295 /* Allocate and initialize a controlling structure */
296 result
= do_kimage_alloc(&image
, entry
, nr_segments
, segments
);
300 /* Enable the special crash kernel control page
303 image
->control_page
= crashk_res
.start
;
304 image
->type
= KEXEC_TYPE_CRASH
;
307 * Verify we have good destination addresses. Normally
308 * the caller is responsible for making certain we don't
309 * attempt to load the new image into invalid or reserved
310 * areas of RAM. But crash kernels are preloaded into a
311 * reserved area of ram. We must ensure the addresses
312 * are in the reserved area otherwise preloading the
313 * kernel could corrupt things.
315 result
= -EADDRNOTAVAIL
;
316 for (i
= 0; i
< nr_segments
; i
++) {
317 unsigned long mstart
, mend
;
319 mstart
= image
->segment
[i
].mem
;
320 mend
= mstart
+ image
->segment
[i
].memsz
- 1;
321 /* Ensure we are within the crash kernel limits */
322 if ((mstart
< crashk_res
.start
) || (mend
> crashk_res
.end
))
327 * Find a location for the control code buffer, and add
328 * the vector of segments so that it's pages will also be
329 * counted as destination pages.
332 image
->control_code_page
= kimage_alloc_control_pages(image
,
333 get_order(KEXEC_CONTROL_PAGE_SIZE
));
334 if (!image
->control_code_page
) {
335 pr_err("Could not allocate control_code_buffer\n");
348 static int kimage_is_destination_range(struct kimage
*image
,
354 for (i
= 0; i
< image
->nr_segments
; i
++) {
355 unsigned long mstart
, mend
;
357 mstart
= image
->segment
[i
].mem
;
358 mend
= mstart
+ image
->segment
[i
].memsz
;
359 if ((end
> mstart
) && (start
< mend
))
366 static struct page
*kimage_alloc_pages(gfp_t gfp_mask
, unsigned int order
)
370 pages
= alloc_pages(gfp_mask
, order
);
372 unsigned int count
, i
;
373 pages
->mapping
= NULL
;
374 set_page_private(pages
, order
);
376 for (i
= 0; i
< count
; i
++)
377 SetPageReserved(pages
+ i
);
383 static void kimage_free_pages(struct page
*page
)
385 unsigned int order
, count
, i
;
387 order
= page_private(page
);
389 for (i
= 0; i
< count
; i
++)
390 ClearPageReserved(page
+ i
);
391 __free_pages(page
, order
);
394 static void kimage_free_page_list(struct list_head
*list
)
396 struct list_head
*pos
, *next
;
398 list_for_each_safe(pos
, next
, list
) {
401 page
= list_entry(pos
, struct page
, lru
);
402 list_del(&page
->lru
);
403 kimage_free_pages(page
);
407 static struct page
*kimage_alloc_normal_control_pages(struct kimage
*image
,
410 /* Control pages are special, they are the intermediaries
411 * that are needed while we copy the rest of the pages
412 * to their final resting place. As such they must
413 * not conflict with either the destination addresses
414 * or memory the kernel is already using.
416 * The only case where we really need more than one of
417 * these are for architectures where we cannot disable
418 * the MMU and must instead generate an identity mapped
419 * page table for all of the memory.
421 * At worst this runs in O(N) of the image size.
423 struct list_head extra_pages
;
428 INIT_LIST_HEAD(&extra_pages
);
430 /* Loop while I can allocate a page and the page allocated
431 * is a destination page.
434 unsigned long pfn
, epfn
, addr
, eaddr
;
436 pages
= kimage_alloc_pages(GFP_KERNEL
, order
);
439 pfn
= page_to_pfn(pages
);
441 addr
= pfn
<< PAGE_SHIFT
;
442 eaddr
= epfn
<< PAGE_SHIFT
;
443 if ((epfn
>= (KEXEC_CONTROL_MEMORY_LIMIT
>> PAGE_SHIFT
)) ||
444 kimage_is_destination_range(image
, addr
, eaddr
)) {
445 list_add(&pages
->lru
, &extra_pages
);
451 /* Remember the allocated page... */
452 list_add(&pages
->lru
, &image
->control_pages
);
454 /* Because the page is already in it's destination
455 * location we will never allocate another page at
456 * that address. Therefore kimage_alloc_pages
457 * will not return it (again) and we don't need
458 * to give it an entry in image->segment[].
461 /* Deal with the destination pages I have inadvertently allocated.
463 * Ideally I would convert multi-page allocations into single
464 * page allocations, and add everything to image->dest_pages.
466 * For now it is simpler to just free the pages.
468 kimage_free_page_list(&extra_pages
);
473 static struct page
*kimage_alloc_crash_control_pages(struct kimage
*image
,
476 /* Control pages are special, they are the intermediaries
477 * that are needed while we copy the rest of the pages
478 * to their final resting place. As such they must
479 * not conflict with either the destination addresses
480 * or memory the kernel is already using.
482 * Control pages are also the only pags we must allocate
483 * when loading a crash kernel. All of the other pages
484 * are specified by the segments and we just memcpy
485 * into them directly.
487 * The only case where we really need more than one of
488 * these are for architectures where we cannot disable
489 * the MMU and must instead generate an identity mapped
490 * page table for all of the memory.
492 * Given the low demand this implements a very simple
493 * allocator that finds the first hole of the appropriate
494 * size in the reserved memory region, and allocates all
495 * of the memory up to and including the hole.
497 unsigned long hole_start
, hole_end
, size
;
501 size
= (1 << order
) << PAGE_SHIFT
;
502 hole_start
= (image
->control_page
+ (size
- 1)) & ~(size
- 1);
503 hole_end
= hole_start
+ size
- 1;
504 while (hole_end
<= crashk_res
.end
) {
507 if (hole_end
> KEXEC_CRASH_CONTROL_MEMORY_LIMIT
)
509 /* See if I overlap any of the segments */
510 for (i
= 0; i
< image
->nr_segments
; i
++) {
511 unsigned long mstart
, mend
;
513 mstart
= image
->segment
[i
].mem
;
514 mend
= mstart
+ image
->segment
[i
].memsz
- 1;
515 if ((hole_end
>= mstart
) && (hole_start
<= mend
)) {
516 /* Advance the hole to the end of the segment */
517 hole_start
= (mend
+ (size
- 1)) & ~(size
- 1);
518 hole_end
= hole_start
+ size
- 1;
522 /* If I don't overlap any segments I have found my hole! */
523 if (i
== image
->nr_segments
) {
524 pages
= pfn_to_page(hole_start
>> PAGE_SHIFT
);
529 image
->control_page
= hole_end
;
535 struct page
*kimage_alloc_control_pages(struct kimage
*image
,
538 struct page
*pages
= NULL
;
540 switch (image
->type
) {
541 case KEXEC_TYPE_DEFAULT
:
542 pages
= kimage_alloc_normal_control_pages(image
, order
);
544 case KEXEC_TYPE_CRASH
:
545 pages
= kimage_alloc_crash_control_pages(image
, order
);
552 static int kimage_add_entry(struct kimage
*image
, kimage_entry_t entry
)
554 if (*image
->entry
!= 0)
557 if (image
->entry
== image
->last_entry
) {
558 kimage_entry_t
*ind_page
;
561 page
= kimage_alloc_page(image
, GFP_KERNEL
, KIMAGE_NO_DEST
);
565 ind_page
= page_address(page
);
566 *image
->entry
= virt_to_phys(ind_page
) | IND_INDIRECTION
;
567 image
->entry
= ind_page
;
568 image
->last_entry
= ind_page
+
569 ((PAGE_SIZE
/sizeof(kimage_entry_t
)) - 1);
571 *image
->entry
= entry
;
578 static int kimage_set_destination(struct kimage
*image
,
579 unsigned long destination
)
583 destination
&= PAGE_MASK
;
584 result
= kimage_add_entry(image
, destination
| IND_DESTINATION
);
586 image
->destination
= destination
;
592 static int kimage_add_page(struct kimage
*image
, unsigned long page
)
597 result
= kimage_add_entry(image
, page
| IND_SOURCE
);
599 image
->destination
+= PAGE_SIZE
;
605 static void kimage_free_extra_pages(struct kimage
*image
)
607 /* Walk through and free any extra destination pages I may have */
608 kimage_free_page_list(&image
->dest_pages
);
610 /* Walk through and free any unusable pages I have cached */
611 kimage_free_page_list(&image
->unuseable_pages
);
614 static void kimage_terminate(struct kimage
*image
)
616 if (*image
->entry
!= 0)
619 *image
->entry
= IND_DONE
;
622 #define for_each_kimage_entry(image, ptr, entry) \
623 for (ptr = &image->head; (entry = *ptr) && !(entry & IND_DONE); \
624 ptr = (entry & IND_INDIRECTION) ? \
625 phys_to_virt((entry & PAGE_MASK)) : ptr + 1)
627 static void kimage_free_entry(kimage_entry_t entry
)
631 page
= pfn_to_page(entry
>> PAGE_SHIFT
);
632 kimage_free_pages(page
);
635 static void kimage_free(struct kimage
*image
)
637 kimage_entry_t
*ptr
, entry
;
638 kimage_entry_t ind
= 0;
643 kimage_free_extra_pages(image
);
644 for_each_kimage_entry(image
, ptr
, entry
) {
645 if (entry
& IND_INDIRECTION
) {
646 /* Free the previous indirection page */
647 if (ind
& IND_INDIRECTION
)
648 kimage_free_entry(ind
);
649 /* Save this indirection page until we are
653 } else if (entry
& IND_SOURCE
)
654 kimage_free_entry(entry
);
656 /* Free the final indirection page */
657 if (ind
& IND_INDIRECTION
)
658 kimage_free_entry(ind
);
660 /* Handle any machine specific cleanup */
661 machine_kexec_cleanup(image
);
663 /* Free the kexec control pages... */
664 kimage_free_page_list(&image
->control_pages
);
668 static kimage_entry_t
*kimage_dst_used(struct kimage
*image
,
671 kimage_entry_t
*ptr
, entry
;
672 unsigned long destination
= 0;
674 for_each_kimage_entry(image
, ptr
, entry
) {
675 if (entry
& IND_DESTINATION
)
676 destination
= entry
& PAGE_MASK
;
677 else if (entry
& IND_SOURCE
) {
678 if (page
== destination
)
680 destination
+= PAGE_SIZE
;
687 static struct page
*kimage_alloc_page(struct kimage
*image
,
689 unsigned long destination
)
692 * Here we implement safeguards to ensure that a source page
693 * is not copied to its destination page before the data on
694 * the destination page is no longer useful.
696 * To do this we maintain the invariant that a source page is
697 * either its own destination page, or it is not a
698 * destination page at all.
700 * That is slightly stronger than required, but the proof
701 * that no problems will not occur is trivial, and the
702 * implementation is simply to verify.
704 * When allocating all pages normally this algorithm will run
705 * in O(N) time, but in the worst case it will run in O(N^2)
706 * time. If the runtime is a problem the data structures can
713 * Walk through the list of destination pages, and see if I
716 list_for_each_entry(page
, &image
->dest_pages
, lru
) {
717 addr
= page_to_pfn(page
) << PAGE_SHIFT
;
718 if (addr
== destination
) {
719 list_del(&page
->lru
);
727 /* Allocate a page, if we run out of memory give up */
728 page
= kimage_alloc_pages(gfp_mask
, 0);
731 /* If the page cannot be used file it away */
732 if (page_to_pfn(page
) >
733 (KEXEC_SOURCE_MEMORY_LIMIT
>> PAGE_SHIFT
)) {
734 list_add(&page
->lru
, &image
->unuseable_pages
);
737 addr
= page_to_pfn(page
) << PAGE_SHIFT
;
739 /* If it is the destination page we want use it */
740 if (addr
== destination
)
743 /* If the page is not a destination page use it */
744 if (!kimage_is_destination_range(image
, addr
,
749 * I know that the page is someones destination page.
750 * See if there is already a source page for this
751 * destination page. And if so swap the source pages.
753 old
= kimage_dst_used(image
, addr
);
756 unsigned long old_addr
;
757 struct page
*old_page
;
759 old_addr
= *old
& PAGE_MASK
;
760 old_page
= pfn_to_page(old_addr
>> PAGE_SHIFT
);
761 copy_highpage(page
, old_page
);
762 *old
= addr
| (*old
& ~PAGE_MASK
);
764 /* The old page I have found cannot be a
765 * destination page, so return it if it's
766 * gfp_flags honor the ones passed in.
768 if (!(gfp_mask
& __GFP_HIGHMEM
) &&
769 PageHighMem(old_page
)) {
770 kimage_free_pages(old_page
);
777 /* Place the page on the destination list I
780 list_add(&page
->lru
, &image
->dest_pages
);
787 static int kimage_load_normal_segment(struct kimage
*image
,
788 struct kexec_segment
*segment
)
791 size_t ubytes
, mbytes
;
793 unsigned char __user
*buf
;
797 ubytes
= segment
->bufsz
;
798 mbytes
= segment
->memsz
;
799 maddr
= segment
->mem
;
801 result
= kimage_set_destination(image
, maddr
);
808 size_t uchunk
, mchunk
;
810 page
= kimage_alloc_page(image
, GFP_HIGHUSER
, maddr
);
815 result
= kimage_add_page(image
, page_to_pfn(page
)
821 /* Start with a clear page */
823 ptr
+= maddr
& ~PAGE_MASK
;
824 mchunk
= min_t(size_t, mbytes
,
825 PAGE_SIZE
- (maddr
& ~PAGE_MASK
));
826 uchunk
= min(ubytes
, mchunk
);
828 result
= copy_from_user(ptr
, buf
, uchunk
);
843 static int kimage_load_crash_segment(struct kimage
*image
,
844 struct kexec_segment
*segment
)
846 /* For crash dumps kernels we simply copy the data from
847 * user space to it's destination.
848 * We do things a page at a time for the sake of kmap.
851 size_t ubytes
, mbytes
;
853 unsigned char __user
*buf
;
857 ubytes
= segment
->bufsz
;
858 mbytes
= segment
->memsz
;
859 maddr
= segment
->mem
;
863 size_t uchunk
, mchunk
;
865 page
= pfn_to_page(maddr
>> PAGE_SHIFT
);
871 ptr
+= maddr
& ~PAGE_MASK
;
872 mchunk
= min_t(size_t, mbytes
,
873 PAGE_SIZE
- (maddr
& ~PAGE_MASK
));
874 uchunk
= min(ubytes
, mchunk
);
875 if (mchunk
> uchunk
) {
876 /* Zero the trailing part of the page */
877 memset(ptr
+ uchunk
, 0, mchunk
- uchunk
);
879 result
= copy_from_user(ptr
, buf
, uchunk
);
880 kexec_flush_icache_page(page
);
895 static int kimage_load_segment(struct kimage
*image
,
896 struct kexec_segment
*segment
)
898 int result
= -ENOMEM
;
900 switch (image
->type
) {
901 case KEXEC_TYPE_DEFAULT
:
902 result
= kimage_load_normal_segment(image
, segment
);
904 case KEXEC_TYPE_CRASH
:
905 result
= kimage_load_crash_segment(image
, segment
);
913 * Exec Kernel system call: for obvious reasons only root may call it.
915 * This call breaks up into three pieces.
916 * - A generic part which loads the new kernel from the current
917 * address space, and very carefully places the data in the
920 * - A generic part that interacts with the kernel and tells all of
921 * the devices to shut down. Preventing on-going dmas, and placing
922 * the devices in a consistent state so a later kernel can
925 * - A machine specific part that includes the syscall number
926 * and then copies the image to it's final destination. And
927 * jumps into the image at entry.
929 * kexec does not sync, or unmount filesystems so if you need
930 * that to happen you need to do that yourself.
932 struct kimage
*kexec_image
;
933 struct kimage
*kexec_crash_image
;
934 int kexec_load_disabled
;
936 static DEFINE_MUTEX(kexec_mutex
);
938 SYSCALL_DEFINE4(kexec_load
, unsigned long, entry
, unsigned long, nr_segments
,
939 struct kexec_segment __user
*, segments
, unsigned long, flags
)
941 struct kimage
**dest_image
, *image
;
944 /* We only trust the superuser with rebooting the system. */
945 if (!capable(CAP_SYS_BOOT
) || kexec_load_disabled
)
949 * Verify we have a legal set of flags
950 * This leaves us room for future extensions.
952 if ((flags
& KEXEC_FLAGS
) != (flags
& ~KEXEC_ARCH_MASK
))
955 /* Verify we are on the appropriate architecture */
956 if (((flags
& KEXEC_ARCH_MASK
) != KEXEC_ARCH
) &&
957 ((flags
& KEXEC_ARCH_MASK
) != KEXEC_ARCH_DEFAULT
))
960 /* Put an artificial cap on the number
961 * of segments passed to kexec_load.
963 if (nr_segments
> KEXEC_SEGMENT_MAX
)
969 /* Because we write directly to the reserved memory
970 * region when loading crash kernels we need a mutex here to
971 * prevent multiple crash kernels from attempting to load
972 * simultaneously, and to prevent a crash kernel from loading
973 * over the top of a in use crash kernel.
975 * KISS: always take the mutex.
977 if (!mutex_trylock(&kexec_mutex
))
980 dest_image
= &kexec_image
;
981 if (flags
& KEXEC_ON_CRASH
)
982 dest_image
= &kexec_crash_image
;
983 if (nr_segments
> 0) {
986 /* Loading another kernel to reboot into */
987 if ((flags
& KEXEC_ON_CRASH
) == 0)
988 result
= kimage_normal_alloc(&image
, entry
,
989 nr_segments
, segments
);
990 /* Loading another kernel to switch to if this one crashes */
991 else if (flags
& KEXEC_ON_CRASH
) {
992 /* Free any current crash dump kernel before
995 kimage_free(xchg(&kexec_crash_image
, NULL
));
996 result
= kimage_crash_alloc(&image
, entry
,
997 nr_segments
, segments
);
998 crash_map_reserved_pages();
1003 if (flags
& KEXEC_PRESERVE_CONTEXT
)
1004 image
->preserve_context
= 1;
1005 result
= machine_kexec_prepare(image
);
1009 for (i
= 0; i
< nr_segments
; i
++) {
1010 result
= kimage_load_segment(image
, &image
->segment
[i
]);
1014 kimage_terminate(image
);
1015 if (flags
& KEXEC_ON_CRASH
)
1016 crash_unmap_reserved_pages();
1018 /* Install the new kernel, and Uninstall the old */
1019 image
= xchg(dest_image
, image
);
1022 mutex_unlock(&kexec_mutex
);
1029 * Add and remove page tables for crashkernel memory
1031 * Provide an empty default implementation here -- architecture
1032 * code may override this
1034 void __weak
crash_map_reserved_pages(void)
1037 void __weak
crash_unmap_reserved_pages(void)
1040 #ifdef CONFIG_COMPAT
1041 COMPAT_SYSCALL_DEFINE4(kexec_load
, compat_ulong_t
, entry
,
1042 compat_ulong_t
, nr_segments
,
1043 struct compat_kexec_segment __user
*, segments
,
1044 compat_ulong_t
, flags
)
1046 struct compat_kexec_segment in
;
1047 struct kexec_segment out
, __user
*ksegments
;
1048 unsigned long i
, result
;
1050 /* Don't allow clients that don't understand the native
1051 * architecture to do anything.
1053 if ((flags
& KEXEC_ARCH_MASK
) == KEXEC_ARCH_DEFAULT
)
1056 if (nr_segments
> KEXEC_SEGMENT_MAX
)
1059 ksegments
= compat_alloc_user_space(nr_segments
* sizeof(out
));
1060 for (i
= 0; i
< nr_segments
; i
++) {
1061 result
= copy_from_user(&in
, &segments
[i
], sizeof(in
));
1065 out
.buf
= compat_ptr(in
.buf
);
1066 out
.bufsz
= in
.bufsz
;
1068 out
.memsz
= in
.memsz
;
1070 result
= copy_to_user(&ksegments
[i
], &out
, sizeof(out
));
1075 return sys_kexec_load(entry
, nr_segments
, ksegments
, flags
);
1079 void crash_kexec(struct pt_regs
*regs
)
1081 /* Take the kexec_mutex here to prevent sys_kexec_load
1082 * running on one cpu from replacing the crash kernel
1083 * we are using after a panic on a different cpu.
1085 * If the crash kernel was not located in a fixed area
1086 * of memory the xchg(&kexec_crash_image) would be
1087 * sufficient. But since I reuse the memory...
1089 if (mutex_trylock(&kexec_mutex
)) {
1090 if (kexec_crash_image
) {
1091 struct pt_regs fixed_regs
;
1093 crash_setup_regs(&fixed_regs
, regs
);
1094 crash_save_vmcoreinfo();
1095 machine_crash_shutdown(&fixed_regs
);
1096 machine_kexec(kexec_crash_image
);
1098 mutex_unlock(&kexec_mutex
);
1102 size_t crash_get_memory_size(void)
1105 mutex_lock(&kexec_mutex
);
1106 if (crashk_res
.end
!= crashk_res
.start
)
1107 size
= resource_size(&crashk_res
);
1108 mutex_unlock(&kexec_mutex
);
1112 void __weak
crash_free_reserved_phys_range(unsigned long begin
,
1117 for (addr
= begin
; addr
< end
; addr
+= PAGE_SIZE
)
1118 free_reserved_page(pfn_to_page(addr
>> PAGE_SHIFT
));
1121 int crash_shrink_memory(unsigned long new_size
)
1124 unsigned long start
, end
;
1125 unsigned long old_size
;
1126 struct resource
*ram_res
;
1128 mutex_lock(&kexec_mutex
);
1130 if (kexec_crash_image
) {
1134 start
= crashk_res
.start
;
1135 end
= crashk_res
.end
;
1136 old_size
= (end
== 0) ? 0 : end
- start
+ 1;
1137 if (new_size
>= old_size
) {
1138 ret
= (new_size
== old_size
) ? 0 : -EINVAL
;
1142 ram_res
= kzalloc(sizeof(*ram_res
), GFP_KERNEL
);
1148 start
= roundup(start
, KEXEC_CRASH_MEM_ALIGN
);
1149 end
= roundup(start
+ new_size
, KEXEC_CRASH_MEM_ALIGN
);
1151 crash_map_reserved_pages();
1152 crash_free_reserved_phys_range(end
, crashk_res
.end
);
1154 if ((start
== end
) && (crashk_res
.parent
!= NULL
))
1155 release_resource(&crashk_res
);
1157 ram_res
->start
= end
;
1158 ram_res
->end
= crashk_res
.end
;
1159 ram_res
->flags
= IORESOURCE_BUSY
| IORESOURCE_MEM
;
1160 ram_res
->name
= "System RAM";
1162 crashk_res
.end
= end
- 1;
1164 insert_resource(&iomem_resource
, ram_res
);
1165 crash_unmap_reserved_pages();
1168 mutex_unlock(&kexec_mutex
);
1172 static u32
*append_elf_note(u32
*buf
, char *name
, unsigned type
, void *data
,
1175 struct elf_note note
;
1177 note
.n_namesz
= strlen(name
) + 1;
1178 note
.n_descsz
= data_len
;
1180 memcpy(buf
, ¬e
, sizeof(note
));
1181 buf
+= (sizeof(note
) + 3)/4;
1182 memcpy(buf
, name
, note
.n_namesz
);
1183 buf
+= (note
.n_namesz
+ 3)/4;
1184 memcpy(buf
, data
, note
.n_descsz
);
1185 buf
+= (note
.n_descsz
+ 3)/4;
1190 static void final_note(u32
*buf
)
1192 struct elf_note note
;
1197 memcpy(buf
, ¬e
, sizeof(note
));
1200 void crash_save_cpu(struct pt_regs
*regs
, int cpu
)
1202 struct elf_prstatus prstatus
;
1205 if ((cpu
< 0) || (cpu
>= nr_cpu_ids
))
1208 /* Using ELF notes here is opportunistic.
1209 * I need a well defined structure format
1210 * for the data I pass, and I need tags
1211 * on the data to indicate what information I have
1212 * squirrelled away. ELF notes happen to provide
1213 * all of that, so there is no need to invent something new.
1215 buf
= (u32
*)per_cpu_ptr(crash_notes
, cpu
);
1218 memset(&prstatus
, 0, sizeof(prstatus
));
1219 prstatus
.pr_pid
= current
->pid
;
1220 elf_core_copy_kernel_regs(&prstatus
.pr_reg
, regs
);
1221 buf
= append_elf_note(buf
, KEXEC_CORE_NOTE_NAME
, NT_PRSTATUS
,
1222 &prstatus
, sizeof(prstatus
));
1226 static int __init
crash_notes_memory_init(void)
1228 /* Allocate memory for saving cpu registers. */
1229 crash_notes
= alloc_percpu(note_buf_t
);
1231 pr_warn("Kexec: Memory allocation for saving cpu register states failed\n");
1236 subsys_initcall(crash_notes_memory_init
);
1240 * parsing the "crashkernel" commandline
1242 * this code is intended to be called from architecture specific code
1247 * This function parses command lines in the format
1249 * crashkernel=ramsize-range:size[,...][@offset]
1251 * The function returns 0 on success and -EINVAL on failure.
1253 static int __init
parse_crashkernel_mem(char *cmdline
,
1254 unsigned long long system_ram
,
1255 unsigned long long *crash_size
,
1256 unsigned long long *crash_base
)
1258 char *cur
= cmdline
, *tmp
;
1260 /* for each entry of the comma-separated list */
1262 unsigned long long start
, end
= ULLONG_MAX
, size
;
1264 /* get the start of the range */
1265 start
= memparse(cur
, &tmp
);
1267 pr_warn("crashkernel: Memory value expected\n");
1272 pr_warn("crashkernel: '-' expected\n");
1277 /* if no ':' is here, than we read the end */
1279 end
= memparse(cur
, &tmp
);
1281 pr_warn("crashkernel: Memory value expected\n");
1286 pr_warn("crashkernel: end <= start\n");
1292 pr_warn("crashkernel: ':' expected\n");
1297 size
= memparse(cur
, &tmp
);
1299 pr_warn("Memory value expected\n");
1303 if (size
>= system_ram
) {
1304 pr_warn("crashkernel: invalid size\n");
1309 if (system_ram
>= start
&& system_ram
< end
) {
1313 } while (*cur
++ == ',');
1315 if (*crash_size
> 0) {
1316 while (*cur
&& *cur
!= ' ' && *cur
!= '@')
1320 *crash_base
= memparse(cur
, &tmp
);
1322 pr_warn("Memory value expected after '@'\n");
1332 * That function parses "simple" (old) crashkernel command lines like
1334 * crashkernel=size[@offset]
1336 * It returns 0 on success and -EINVAL on failure.
1338 static int __init
parse_crashkernel_simple(char *cmdline
,
1339 unsigned long long *crash_size
,
1340 unsigned long long *crash_base
)
1342 char *cur
= cmdline
;
1344 *crash_size
= memparse(cmdline
, &cur
);
1345 if (cmdline
== cur
) {
1346 pr_warn("crashkernel: memory value expected\n");
1351 *crash_base
= memparse(cur
+1, &cur
);
1352 else if (*cur
!= ' ' && *cur
!= '\0') {
1353 pr_warn("crashkernel: unrecognized char\n");
1360 #define SUFFIX_HIGH 0
1361 #define SUFFIX_LOW 1
1362 #define SUFFIX_NULL 2
1363 static __initdata
char *suffix_tbl
[] = {
1364 [SUFFIX_HIGH
] = ",high",
1365 [SUFFIX_LOW
] = ",low",
1366 [SUFFIX_NULL
] = NULL
,
1370 * That function parses "suffix" crashkernel command lines like
1372 * crashkernel=size,[high|low]
1374 * It returns 0 on success and -EINVAL on failure.
1376 static int __init
parse_crashkernel_suffix(char *cmdline
,
1377 unsigned long long *crash_size
,
1378 unsigned long long *crash_base
,
1381 char *cur
= cmdline
;
1383 *crash_size
= memparse(cmdline
, &cur
);
1384 if (cmdline
== cur
) {
1385 pr_warn("crashkernel: memory value expected\n");
1389 /* check with suffix */
1390 if (strncmp(cur
, suffix
, strlen(suffix
))) {
1391 pr_warn("crashkernel: unrecognized char\n");
1394 cur
+= strlen(suffix
);
1395 if (*cur
!= ' ' && *cur
!= '\0') {
1396 pr_warn("crashkernel: unrecognized char\n");
1403 static __init
char *get_last_crashkernel(char *cmdline
,
1407 char *p
= cmdline
, *ck_cmdline
= NULL
;
1409 /* find crashkernel and use the last one if there are more */
1410 p
= strstr(p
, name
);
1412 char *end_p
= strchr(p
, ' ');
1416 end_p
= p
+ strlen(p
);
1421 /* skip the one with any known suffix */
1422 for (i
= 0; suffix_tbl
[i
]; i
++) {
1423 q
= end_p
- strlen(suffix_tbl
[i
]);
1424 if (!strncmp(q
, suffix_tbl
[i
],
1425 strlen(suffix_tbl
[i
])))
1430 q
= end_p
- strlen(suffix
);
1431 if (!strncmp(q
, suffix
, strlen(suffix
)))
1435 p
= strstr(p
+1, name
);
1444 static int __init
__parse_crashkernel(char *cmdline
,
1445 unsigned long long system_ram
,
1446 unsigned long long *crash_size
,
1447 unsigned long long *crash_base
,
1451 char *first_colon
, *first_space
;
1454 BUG_ON(!crash_size
|| !crash_base
);
1458 ck_cmdline
= get_last_crashkernel(cmdline
, name
, suffix
);
1463 ck_cmdline
+= strlen(name
);
1466 return parse_crashkernel_suffix(ck_cmdline
, crash_size
,
1467 crash_base
, suffix
);
1469 * if the commandline contains a ':', then that's the extended
1470 * syntax -- if not, it must be the classic syntax
1472 first_colon
= strchr(ck_cmdline
, ':');
1473 first_space
= strchr(ck_cmdline
, ' ');
1474 if (first_colon
&& (!first_space
|| first_colon
< first_space
))
1475 return parse_crashkernel_mem(ck_cmdline
, system_ram
,
1476 crash_size
, crash_base
);
1478 return parse_crashkernel_simple(ck_cmdline
, crash_size
, crash_base
);
1482 * That function is the entry point for command line parsing and should be
1483 * called from the arch-specific code.
1485 int __init
parse_crashkernel(char *cmdline
,
1486 unsigned long long system_ram
,
1487 unsigned long long *crash_size
,
1488 unsigned long long *crash_base
)
1490 return __parse_crashkernel(cmdline
, system_ram
, crash_size
, crash_base
,
1491 "crashkernel=", NULL
);
1494 int __init
parse_crashkernel_high(char *cmdline
,
1495 unsigned long long system_ram
,
1496 unsigned long long *crash_size
,
1497 unsigned long long *crash_base
)
1499 return __parse_crashkernel(cmdline
, system_ram
, crash_size
, crash_base
,
1500 "crashkernel=", suffix_tbl
[SUFFIX_HIGH
]);
1503 int __init
parse_crashkernel_low(char *cmdline
,
1504 unsigned long long system_ram
,
1505 unsigned long long *crash_size
,
1506 unsigned long long *crash_base
)
1508 return __parse_crashkernel(cmdline
, system_ram
, crash_size
, crash_base
,
1509 "crashkernel=", suffix_tbl
[SUFFIX_LOW
]);
1512 static void update_vmcoreinfo_note(void)
1514 u32
*buf
= vmcoreinfo_note
;
1516 if (!vmcoreinfo_size
)
1518 buf
= append_elf_note(buf
, VMCOREINFO_NOTE_NAME
, 0, vmcoreinfo_data
,
1523 void crash_save_vmcoreinfo(void)
1525 vmcoreinfo_append_str("CRASHTIME=%ld\n", get_seconds());
1526 update_vmcoreinfo_note();
1529 void vmcoreinfo_append_str(const char *fmt
, ...)
1535 va_start(args
, fmt
);
1536 r
= vscnprintf(buf
, sizeof(buf
), fmt
, args
);
1539 r
= min(r
, vmcoreinfo_max_size
- vmcoreinfo_size
);
1541 memcpy(&vmcoreinfo_data
[vmcoreinfo_size
], buf
, r
);
1543 vmcoreinfo_size
+= r
;
1547 * provide an empty default implementation here -- architecture
1548 * code may override this
1550 void __weak
arch_crash_save_vmcoreinfo(void)
1553 unsigned long __weak
paddr_vmcoreinfo_note(void)
1555 return __pa((unsigned long)(char *)&vmcoreinfo_note
);
1558 static int __init
crash_save_vmcoreinfo_init(void)
1560 VMCOREINFO_OSRELEASE(init_uts_ns
.name
.release
);
1561 VMCOREINFO_PAGESIZE(PAGE_SIZE
);
1563 VMCOREINFO_SYMBOL(init_uts_ns
);
1564 VMCOREINFO_SYMBOL(node_online_map
);
1566 VMCOREINFO_SYMBOL(swapper_pg_dir
);
1568 VMCOREINFO_SYMBOL(_stext
);
1569 VMCOREINFO_SYMBOL(vmap_area_list
);
1571 #ifndef CONFIG_NEED_MULTIPLE_NODES
1572 VMCOREINFO_SYMBOL(mem_map
);
1573 VMCOREINFO_SYMBOL(contig_page_data
);
1575 #ifdef CONFIG_SPARSEMEM
1576 VMCOREINFO_SYMBOL(mem_section
);
1577 VMCOREINFO_LENGTH(mem_section
, NR_SECTION_ROOTS
);
1578 VMCOREINFO_STRUCT_SIZE(mem_section
);
1579 VMCOREINFO_OFFSET(mem_section
, section_mem_map
);
1581 VMCOREINFO_STRUCT_SIZE(page
);
1582 VMCOREINFO_STRUCT_SIZE(pglist_data
);
1583 VMCOREINFO_STRUCT_SIZE(zone
);
1584 VMCOREINFO_STRUCT_SIZE(free_area
);
1585 VMCOREINFO_STRUCT_SIZE(list_head
);
1586 VMCOREINFO_SIZE(nodemask_t
);
1587 VMCOREINFO_OFFSET(page
, flags
);
1588 VMCOREINFO_OFFSET(page
, _count
);
1589 VMCOREINFO_OFFSET(page
, mapping
);
1590 VMCOREINFO_OFFSET(page
, lru
);
1591 VMCOREINFO_OFFSET(page
, _mapcount
);
1592 VMCOREINFO_OFFSET(page
, private);
1593 VMCOREINFO_OFFSET(pglist_data
, node_zones
);
1594 VMCOREINFO_OFFSET(pglist_data
, nr_zones
);
1595 #ifdef CONFIG_FLAT_NODE_MEM_MAP
1596 VMCOREINFO_OFFSET(pglist_data
, node_mem_map
);
1598 VMCOREINFO_OFFSET(pglist_data
, node_start_pfn
);
1599 VMCOREINFO_OFFSET(pglist_data
, node_spanned_pages
);
1600 VMCOREINFO_OFFSET(pglist_data
, node_id
);
1601 VMCOREINFO_OFFSET(zone
, free_area
);
1602 VMCOREINFO_OFFSET(zone
, vm_stat
);
1603 VMCOREINFO_OFFSET(zone
, spanned_pages
);
1604 VMCOREINFO_OFFSET(free_area
, free_list
);
1605 VMCOREINFO_OFFSET(list_head
, next
);
1606 VMCOREINFO_OFFSET(list_head
, prev
);
1607 VMCOREINFO_OFFSET(vmap_area
, va_start
);
1608 VMCOREINFO_OFFSET(vmap_area
, list
);
1609 VMCOREINFO_LENGTH(zone
.free_area
, MAX_ORDER
);
1610 log_buf_kexec_setup();
1611 VMCOREINFO_LENGTH(free_area
.free_list
, MIGRATE_TYPES
);
1612 VMCOREINFO_NUMBER(NR_FREE_PAGES
);
1613 VMCOREINFO_NUMBER(PG_lru
);
1614 VMCOREINFO_NUMBER(PG_private
);
1615 VMCOREINFO_NUMBER(PG_swapcache
);
1616 VMCOREINFO_NUMBER(PG_slab
);
1617 #ifdef CONFIG_MEMORY_FAILURE
1618 VMCOREINFO_NUMBER(PG_hwpoison
);
1620 VMCOREINFO_NUMBER(PG_head_mask
);
1621 VMCOREINFO_NUMBER(PAGE_BUDDY_MAPCOUNT_VALUE
);
1623 arch_crash_save_vmcoreinfo();
1624 update_vmcoreinfo_note();
1629 subsys_initcall(crash_save_vmcoreinfo_init
);
1632 * Move into place and start executing a preloaded standalone
1633 * executable. If nothing was preloaded return an error.
1635 int kernel_kexec(void)
1639 if (!mutex_trylock(&kexec_mutex
))
1646 #ifdef CONFIG_KEXEC_JUMP
1647 if (kexec_image
->preserve_context
) {
1648 lock_system_sleep();
1649 pm_prepare_console();
1650 error
= freeze_processes();
1653 goto Restore_console
;
1656 error
= dpm_suspend_start(PMSG_FREEZE
);
1658 goto Resume_console
;
1659 /* At this point, dpm_suspend_start() has been called,
1660 * but *not* dpm_suspend_end(). We *must* call
1661 * dpm_suspend_end() now. Otherwise, drivers for
1662 * some devices (e.g. interrupt controllers) become
1663 * desynchronized with the actual state of the
1664 * hardware at resume time, and evil weirdness ensues.
1666 error
= dpm_suspend_end(PMSG_FREEZE
);
1668 goto Resume_devices
;
1669 error
= disable_nonboot_cpus();
1672 local_irq_disable();
1673 error
= syscore_suspend();
1679 kexec_in_progress
= true;
1680 kernel_restart_prepare(NULL
);
1681 migrate_to_reboot_cpu();
1684 * migrate_to_reboot_cpu() disables CPU hotplug assuming that
1685 * no further code needs to use CPU hotplug (which is true in
1686 * the reboot case). However, the kexec path depends on using
1687 * CPU hotplug again; so re-enable it here.
1689 cpu_hotplug_enable();
1690 pr_emerg("Starting new kernel\n");
1694 machine_kexec(kexec_image
);
1696 #ifdef CONFIG_KEXEC_JUMP
1697 if (kexec_image
->preserve_context
) {
1702 enable_nonboot_cpus();
1703 dpm_resume_start(PMSG_RESTORE
);
1705 dpm_resume_end(PMSG_RESTORE
);
1710 pm_restore_console();
1711 unlock_system_sleep();
1716 mutex_unlock(&kexec_mutex
);