2 * kexec.c - kexec system call
3 * Copyright (C) 2002-2004 Eric Biederman <ebiederm@xmission.com>
5 * This source code is licensed under the GNU General Public License,
6 * Version 2. See the file COPYING for more details.
9 #include <linux/capability.h>
11 #include <linux/file.h>
12 #include <linux/slab.h>
14 #include <linux/kexec.h>
15 #include <linux/mutex.h>
16 #include <linux/list.h>
17 #include <linux/highmem.h>
18 #include <linux/syscalls.h>
19 #include <linux/reboot.h>
20 #include <linux/ioport.h>
21 #include <linux/hardirq.h>
22 #include <linux/elf.h>
23 #include <linux/elfcore.h>
24 #include <generated/utsrelease.h>
25 #include <linux/utsname.h>
26 #include <linux/numa.h>
27 #include <linux/suspend.h>
28 #include <linux/device.h>
29 #include <linux/freezer.h>
31 #include <linux/cpu.h>
32 #include <linux/console.h>
33 #include <linux/vmalloc.h>
34 #include <linux/swap.h>
35 #include <linux/syscore_ops.h>
38 #include <asm/uaccess.h>
40 #include <asm/system.h>
41 #include <asm/sections.h>
43 /* Per cpu memory for storing cpu states in case of system crash. */
44 note_buf_t __percpu
*crash_notes
;
46 /* vmcoreinfo stuff */
47 static unsigned char vmcoreinfo_data
[VMCOREINFO_BYTES
];
48 u32 vmcoreinfo_note
[VMCOREINFO_NOTE_SIZE
/4];
49 size_t vmcoreinfo_size
;
50 size_t vmcoreinfo_max_size
= sizeof(vmcoreinfo_data
);
52 /* Location of the reserved area for the crash kernel */
53 struct resource crashk_res
= {
54 .name
= "Crash kernel",
57 .flags
= IORESOURCE_BUSY
| IORESOURCE_MEM
60 int kexec_should_crash(struct task_struct
*p
)
62 if (in_interrupt() || !p
->pid
|| is_global_init(p
) || panic_on_oops
)
68 * When kexec transitions to the new kernel there is a one-to-one
69 * mapping between physical and virtual addresses. On processors
70 * where you can disable the MMU this is trivial, and easy. For
71 * others it is still a simple predictable page table to setup.
73 * In that environment kexec copies the new kernel to its final
74 * resting place. This means I can only support memory whose
75 * physical address can fit in an unsigned long. In particular
76 * addresses where (pfn << PAGE_SHIFT) > ULONG_MAX cannot be handled.
77 * If the assembly stub has more restrictive requirements
78 * KEXEC_SOURCE_MEMORY_LIMIT and KEXEC_DEST_MEMORY_LIMIT can be
79 * defined more restrictively in <asm/kexec.h>.
81 * The code for the transition from the current kernel to the
82 * the new kernel is placed in the control_code_buffer, whose size
83 * is given by KEXEC_CONTROL_PAGE_SIZE. In the best case only a single
84 * page of memory is necessary, but some architectures require more.
85 * Because this memory must be identity mapped in the transition from
86 * virtual to physical addresses it must live in the range
87 * 0 - TASK_SIZE, as only the user space mappings are arbitrarily
90 * The assembly stub in the control code buffer is passed a linked list
91 * of descriptor pages detailing the source pages of the new kernel,
92 * and the destination addresses of those source pages. As this data
93 * structure is not used in the context of the current OS, it must
96 * The code has been made to work with highmem pages and will use a
97 * destination page in its final resting place (if it happens
98 * to allocate it). The end product of this is that most of the
99 * physical address space, and most of RAM can be used.
101 * Future directions include:
102 * - allocating a page table with the control code buffer identity
103 * mapped, to simplify machine_kexec and make kexec_on_panic more
108 * KIMAGE_NO_DEST is an impossible destination address..., for
109 * allocating pages whose destination address we do not care about.
111 #define KIMAGE_NO_DEST (-1UL)
113 static int kimage_is_destination_range(struct kimage
*image
,
114 unsigned long start
, unsigned long end
);
115 static struct page
*kimage_alloc_page(struct kimage
*image
,
119 static int do_kimage_alloc(struct kimage
**rimage
, unsigned long entry
,
120 unsigned long nr_segments
,
121 struct kexec_segment __user
*segments
)
123 size_t segment_bytes
;
124 struct kimage
*image
;
128 /* Allocate a controlling structure */
130 image
= kzalloc(sizeof(*image
), GFP_KERNEL
);
135 image
->entry
= &image
->head
;
136 image
->last_entry
= &image
->head
;
137 image
->control_page
= ~0; /* By default this does not apply */
138 image
->start
= entry
;
139 image
->type
= KEXEC_TYPE_DEFAULT
;
141 /* Initialize the list of control pages */
142 INIT_LIST_HEAD(&image
->control_pages
);
144 /* Initialize the list of destination pages */
145 INIT_LIST_HEAD(&image
->dest_pages
);
147 /* Initialize the list of unusable pages */
148 INIT_LIST_HEAD(&image
->unuseable_pages
);
150 /* Read in the segments */
151 image
->nr_segments
= nr_segments
;
152 segment_bytes
= nr_segments
* sizeof(*segments
);
153 result
= copy_from_user(image
->segment
, segments
, segment_bytes
);
160 * Verify we have good destination addresses. The caller is
161 * responsible for making certain we don't attempt to load
162 * the new image into invalid or reserved areas of RAM. This
163 * just verifies it is an address we can use.
165 * Since the kernel does everything in page size chunks ensure
166 * the destination addresses are page aligned. Too many
167 * special cases crop of when we don't do this. The most
168 * insidious is getting overlapping destination addresses
169 * simply because addresses are changed to page size
172 result
= -EADDRNOTAVAIL
;
173 for (i
= 0; i
< nr_segments
; i
++) {
174 unsigned long mstart
, mend
;
176 mstart
= image
->segment
[i
].mem
;
177 mend
= mstart
+ image
->segment
[i
].memsz
;
178 if ((mstart
& ~PAGE_MASK
) || (mend
& ~PAGE_MASK
))
180 if (mend
>= KEXEC_DESTINATION_MEMORY_LIMIT
)
184 /* Verify our destination addresses do not overlap.
185 * If we alloed overlapping destination addresses
186 * through very weird things can happen with no
187 * easy explanation as one segment stops on another.
190 for (i
= 0; i
< nr_segments
; i
++) {
191 unsigned long mstart
, mend
;
194 mstart
= image
->segment
[i
].mem
;
195 mend
= mstart
+ image
->segment
[i
].memsz
;
196 for (j
= 0; j
< i
; j
++) {
197 unsigned long pstart
, pend
;
198 pstart
= image
->segment
[j
].mem
;
199 pend
= pstart
+ image
->segment
[j
].memsz
;
200 /* Do the segments overlap ? */
201 if ((mend
> pstart
) && (mstart
< pend
))
206 /* Ensure our buffer sizes are strictly less than
207 * our memory sizes. This should always be the case,
208 * and it is easier to check up front than to be surprised
212 for (i
= 0; i
< nr_segments
; i
++) {
213 if (image
->segment
[i
].bufsz
> image
->segment
[i
].memsz
)
228 static int kimage_normal_alloc(struct kimage
**rimage
, unsigned long entry
,
229 unsigned long nr_segments
,
230 struct kexec_segment __user
*segments
)
233 struct kimage
*image
;
235 /* Allocate and initialize a controlling structure */
237 result
= do_kimage_alloc(&image
, entry
, nr_segments
, segments
);
244 * Find a location for the control code buffer, and add it
245 * the vector of segments so that it's pages will also be
246 * counted as destination pages.
249 image
->control_code_page
= kimage_alloc_control_pages(image
,
250 get_order(KEXEC_CONTROL_PAGE_SIZE
));
251 if (!image
->control_code_page
) {
252 printk(KERN_ERR
"Could not allocate control_code_buffer\n");
256 image
->swap_page
= kimage_alloc_control_pages(image
, 0);
257 if (!image
->swap_page
) {
258 printk(KERN_ERR
"Could not allocate swap buffer\n");
272 static int kimage_crash_alloc(struct kimage
**rimage
, unsigned long entry
,
273 unsigned long nr_segments
,
274 struct kexec_segment __user
*segments
)
277 struct kimage
*image
;
281 /* Verify we have a valid entry point */
282 if ((entry
< crashk_res
.start
) || (entry
> crashk_res
.end
)) {
283 result
= -EADDRNOTAVAIL
;
287 /* Allocate and initialize a controlling structure */
288 result
= do_kimage_alloc(&image
, entry
, nr_segments
, segments
);
292 /* Enable the special crash kernel control page
295 image
->control_page
= crashk_res
.start
;
296 image
->type
= KEXEC_TYPE_CRASH
;
299 * Verify we have good destination addresses. Normally
300 * the caller is responsible for making certain we don't
301 * attempt to load the new image into invalid or reserved
302 * areas of RAM. But crash kernels are preloaded into a
303 * reserved area of ram. We must ensure the addresses
304 * are in the reserved area otherwise preloading the
305 * kernel could corrupt things.
307 result
= -EADDRNOTAVAIL
;
308 for (i
= 0; i
< nr_segments
; i
++) {
309 unsigned long mstart
, mend
;
311 mstart
= image
->segment
[i
].mem
;
312 mend
= mstart
+ image
->segment
[i
].memsz
- 1;
313 /* Ensure we are within the crash kernel limits */
314 if ((mstart
< crashk_res
.start
) || (mend
> crashk_res
.end
))
319 * Find a location for the control code buffer, and add
320 * the vector of segments so that it's pages will also be
321 * counted as destination pages.
324 image
->control_code_page
= kimage_alloc_control_pages(image
,
325 get_order(KEXEC_CONTROL_PAGE_SIZE
));
326 if (!image
->control_code_page
) {
327 printk(KERN_ERR
"Could not allocate control_code_buffer\n");
341 static int kimage_is_destination_range(struct kimage
*image
,
347 for (i
= 0; i
< image
->nr_segments
; i
++) {
348 unsigned long mstart
, mend
;
350 mstart
= image
->segment
[i
].mem
;
351 mend
= mstart
+ image
->segment
[i
].memsz
;
352 if ((end
> mstart
) && (start
< mend
))
359 static struct page
*kimage_alloc_pages(gfp_t gfp_mask
, unsigned int order
)
363 pages
= alloc_pages(gfp_mask
, order
);
365 unsigned int count
, i
;
366 pages
->mapping
= NULL
;
367 set_page_private(pages
, order
);
369 for (i
= 0; i
< count
; i
++)
370 SetPageReserved(pages
+ i
);
376 static void kimage_free_pages(struct page
*page
)
378 unsigned int order
, count
, i
;
380 order
= page_private(page
);
382 for (i
= 0; i
< count
; i
++)
383 ClearPageReserved(page
+ i
);
384 __free_pages(page
, order
);
387 static void kimage_free_page_list(struct list_head
*list
)
389 struct list_head
*pos
, *next
;
391 list_for_each_safe(pos
, next
, list
) {
394 page
= list_entry(pos
, struct page
, lru
);
395 list_del(&page
->lru
);
396 kimage_free_pages(page
);
400 static struct page
*kimage_alloc_normal_control_pages(struct kimage
*image
,
403 /* Control pages are special, they are the intermediaries
404 * that are needed while we copy the rest of the pages
405 * to their final resting place. As such they must
406 * not conflict with either the destination addresses
407 * or memory the kernel is already using.
409 * The only case where we really need more than one of
410 * these are for architectures where we cannot disable
411 * the MMU and must instead generate an identity mapped
412 * page table for all of the memory.
414 * At worst this runs in O(N) of the image size.
416 struct list_head extra_pages
;
421 INIT_LIST_HEAD(&extra_pages
);
423 /* Loop while I can allocate a page and the page allocated
424 * is a destination page.
427 unsigned long pfn
, epfn
, addr
, eaddr
;
429 pages
= kimage_alloc_pages(GFP_KERNEL
, order
);
432 pfn
= page_to_pfn(pages
);
434 addr
= pfn
<< PAGE_SHIFT
;
435 eaddr
= epfn
<< PAGE_SHIFT
;
436 if ((epfn
>= (KEXEC_CONTROL_MEMORY_LIMIT
>> PAGE_SHIFT
)) ||
437 kimage_is_destination_range(image
, addr
, eaddr
)) {
438 list_add(&pages
->lru
, &extra_pages
);
444 /* Remember the allocated page... */
445 list_add(&pages
->lru
, &image
->control_pages
);
447 /* Because the page is already in it's destination
448 * location we will never allocate another page at
449 * that address. Therefore kimage_alloc_pages
450 * will not return it (again) and we don't need
451 * to give it an entry in image->segment[].
454 /* Deal with the destination pages I have inadvertently allocated.
456 * Ideally I would convert multi-page allocations into single
457 * page allocations, and add everything to image->dest_pages.
459 * For now it is simpler to just free the pages.
461 kimage_free_page_list(&extra_pages
);
466 static struct page
*kimage_alloc_crash_control_pages(struct kimage
*image
,
469 /* Control pages are special, they are the intermediaries
470 * that are needed while we copy the rest of the pages
471 * to their final resting place. As such they must
472 * not conflict with either the destination addresses
473 * or memory the kernel is already using.
475 * Control pages are also the only pags we must allocate
476 * when loading a crash kernel. All of the other pages
477 * are specified by the segments and we just memcpy
478 * into them directly.
480 * The only case where we really need more than one of
481 * these are for architectures where we cannot disable
482 * the MMU and must instead generate an identity mapped
483 * page table for all of the memory.
485 * Given the low demand this implements a very simple
486 * allocator that finds the first hole of the appropriate
487 * size in the reserved memory region, and allocates all
488 * of the memory up to and including the hole.
490 unsigned long hole_start
, hole_end
, size
;
494 size
= (1 << order
) << PAGE_SHIFT
;
495 hole_start
= (image
->control_page
+ (size
- 1)) & ~(size
- 1);
496 hole_end
= hole_start
+ size
- 1;
497 while (hole_end
<= crashk_res
.end
) {
500 if (hole_end
> KEXEC_CRASH_CONTROL_MEMORY_LIMIT
)
502 if (hole_end
> crashk_res
.end
)
504 /* See if I overlap any of the segments */
505 for (i
= 0; i
< image
->nr_segments
; i
++) {
506 unsigned long mstart
, mend
;
508 mstart
= image
->segment
[i
].mem
;
509 mend
= mstart
+ image
->segment
[i
].memsz
- 1;
510 if ((hole_end
>= mstart
) && (hole_start
<= mend
)) {
511 /* Advance the hole to the end of the segment */
512 hole_start
= (mend
+ (size
- 1)) & ~(size
- 1);
513 hole_end
= hole_start
+ size
- 1;
517 /* If I don't overlap any segments I have found my hole! */
518 if (i
== image
->nr_segments
) {
519 pages
= pfn_to_page(hole_start
>> PAGE_SHIFT
);
524 image
->control_page
= hole_end
;
530 struct page
*kimage_alloc_control_pages(struct kimage
*image
,
533 struct page
*pages
= NULL
;
535 switch (image
->type
) {
536 case KEXEC_TYPE_DEFAULT
:
537 pages
= kimage_alloc_normal_control_pages(image
, order
);
539 case KEXEC_TYPE_CRASH
:
540 pages
= kimage_alloc_crash_control_pages(image
, order
);
547 static int kimage_add_entry(struct kimage
*image
, kimage_entry_t entry
)
549 if (*image
->entry
!= 0)
552 if (image
->entry
== image
->last_entry
) {
553 kimage_entry_t
*ind_page
;
556 page
= kimage_alloc_page(image
, GFP_KERNEL
, KIMAGE_NO_DEST
);
560 ind_page
= page_address(page
);
561 *image
->entry
= virt_to_phys(ind_page
) | IND_INDIRECTION
;
562 image
->entry
= ind_page
;
563 image
->last_entry
= ind_page
+
564 ((PAGE_SIZE
/sizeof(kimage_entry_t
)) - 1);
566 *image
->entry
= entry
;
573 static int kimage_set_destination(struct kimage
*image
,
574 unsigned long destination
)
578 destination
&= PAGE_MASK
;
579 result
= kimage_add_entry(image
, destination
| IND_DESTINATION
);
581 image
->destination
= destination
;
587 static int kimage_add_page(struct kimage
*image
, unsigned long page
)
592 result
= kimage_add_entry(image
, page
| IND_SOURCE
);
594 image
->destination
+= PAGE_SIZE
;
600 static void kimage_free_extra_pages(struct kimage
*image
)
602 /* Walk through and free any extra destination pages I may have */
603 kimage_free_page_list(&image
->dest_pages
);
605 /* Walk through and free any unusable pages I have cached */
606 kimage_free_page_list(&image
->unuseable_pages
);
609 static void kimage_terminate(struct kimage
*image
)
611 if (*image
->entry
!= 0)
614 *image
->entry
= IND_DONE
;
617 #define for_each_kimage_entry(image, ptr, entry) \
618 for (ptr = &image->head; (entry = *ptr) && !(entry & IND_DONE); \
619 ptr = (entry & IND_INDIRECTION)? \
620 phys_to_virt((entry & PAGE_MASK)): ptr +1)
622 static void kimage_free_entry(kimage_entry_t entry
)
626 page
= pfn_to_page(entry
>> PAGE_SHIFT
);
627 kimage_free_pages(page
);
630 static void kimage_free(struct kimage
*image
)
632 kimage_entry_t
*ptr
, entry
;
633 kimage_entry_t ind
= 0;
638 kimage_free_extra_pages(image
);
639 for_each_kimage_entry(image
, ptr
, entry
) {
640 if (entry
& IND_INDIRECTION
) {
641 /* Free the previous indirection page */
642 if (ind
& IND_INDIRECTION
)
643 kimage_free_entry(ind
);
644 /* Save this indirection page until we are
649 else if (entry
& IND_SOURCE
)
650 kimage_free_entry(entry
);
652 /* Free the final indirection page */
653 if (ind
& IND_INDIRECTION
)
654 kimage_free_entry(ind
);
656 /* Handle any machine specific cleanup */
657 machine_kexec_cleanup(image
);
659 /* Free the kexec control pages... */
660 kimage_free_page_list(&image
->control_pages
);
664 static kimage_entry_t
*kimage_dst_used(struct kimage
*image
,
667 kimage_entry_t
*ptr
, entry
;
668 unsigned long destination
= 0;
670 for_each_kimage_entry(image
, ptr
, entry
) {
671 if (entry
& IND_DESTINATION
)
672 destination
= entry
& PAGE_MASK
;
673 else if (entry
& IND_SOURCE
) {
674 if (page
== destination
)
676 destination
+= PAGE_SIZE
;
683 static struct page
*kimage_alloc_page(struct kimage
*image
,
685 unsigned long destination
)
688 * Here we implement safeguards to ensure that a source page
689 * is not copied to its destination page before the data on
690 * the destination page is no longer useful.
692 * To do this we maintain the invariant that a source page is
693 * either its own destination page, or it is not a
694 * destination page at all.
696 * That is slightly stronger than required, but the proof
697 * that no problems will not occur is trivial, and the
698 * implementation is simply to verify.
700 * When allocating all pages normally this algorithm will run
701 * in O(N) time, but in the worst case it will run in O(N^2)
702 * time. If the runtime is a problem the data structures can
709 * Walk through the list of destination pages, and see if I
712 list_for_each_entry(page
, &image
->dest_pages
, lru
) {
713 addr
= page_to_pfn(page
) << PAGE_SHIFT
;
714 if (addr
== destination
) {
715 list_del(&page
->lru
);
723 /* Allocate a page, if we run out of memory give up */
724 page
= kimage_alloc_pages(gfp_mask
, 0);
727 /* If the page cannot be used file it away */
728 if (page_to_pfn(page
) >
729 (KEXEC_SOURCE_MEMORY_LIMIT
>> PAGE_SHIFT
)) {
730 list_add(&page
->lru
, &image
->unuseable_pages
);
733 addr
= page_to_pfn(page
) << PAGE_SHIFT
;
735 /* If it is the destination page we want use it */
736 if (addr
== destination
)
739 /* If the page is not a destination page use it */
740 if (!kimage_is_destination_range(image
, addr
,
745 * I know that the page is someones destination page.
746 * See if there is already a source page for this
747 * destination page. And if so swap the source pages.
749 old
= kimage_dst_used(image
, addr
);
752 unsigned long old_addr
;
753 struct page
*old_page
;
755 old_addr
= *old
& PAGE_MASK
;
756 old_page
= pfn_to_page(old_addr
>> PAGE_SHIFT
);
757 copy_highpage(page
, old_page
);
758 *old
= addr
| (*old
& ~PAGE_MASK
);
760 /* The old page I have found cannot be a
761 * destination page, so return it if it's
762 * gfp_flags honor the ones passed in.
764 if (!(gfp_mask
& __GFP_HIGHMEM
) &&
765 PageHighMem(old_page
)) {
766 kimage_free_pages(old_page
);
774 /* Place the page on the destination list I
777 list_add(&page
->lru
, &image
->dest_pages
);
784 static int kimage_load_normal_segment(struct kimage
*image
,
785 struct kexec_segment
*segment
)
788 unsigned long ubytes
, mbytes
;
790 unsigned char __user
*buf
;
794 ubytes
= segment
->bufsz
;
795 mbytes
= segment
->memsz
;
796 maddr
= segment
->mem
;
798 result
= kimage_set_destination(image
, maddr
);
805 size_t uchunk
, mchunk
;
807 page
= kimage_alloc_page(image
, GFP_HIGHUSER
, maddr
);
812 result
= kimage_add_page(image
, page_to_pfn(page
)
818 /* Start with a clear page */
820 ptr
+= maddr
& ~PAGE_MASK
;
821 mchunk
= PAGE_SIZE
- (maddr
& ~PAGE_MASK
);
829 result
= copy_from_user(ptr
, buf
, uchunk
);
844 static int kimage_load_crash_segment(struct kimage
*image
,
845 struct kexec_segment
*segment
)
847 /* For crash dumps kernels we simply copy the data from
848 * user space to it's destination.
849 * We do things a page at a time for the sake of kmap.
852 unsigned long ubytes
, mbytes
;
854 unsigned char __user
*buf
;
858 ubytes
= segment
->bufsz
;
859 mbytes
= segment
->memsz
;
860 maddr
= segment
->mem
;
864 size_t uchunk
, mchunk
;
866 page
= pfn_to_page(maddr
>> PAGE_SHIFT
);
872 ptr
+= maddr
& ~PAGE_MASK
;
873 mchunk
= PAGE_SIZE
- (maddr
& ~PAGE_MASK
);
878 if (uchunk
> ubytes
) {
880 /* Zero the trailing part of the page */
881 memset(ptr
+ uchunk
, 0, mchunk
- uchunk
);
883 result
= copy_from_user(ptr
, buf
, uchunk
);
884 kexec_flush_icache_page(page
);
899 static int kimage_load_segment(struct kimage
*image
,
900 struct kexec_segment
*segment
)
902 int result
= -ENOMEM
;
904 switch (image
->type
) {
905 case KEXEC_TYPE_DEFAULT
:
906 result
= kimage_load_normal_segment(image
, segment
);
908 case KEXEC_TYPE_CRASH
:
909 result
= kimage_load_crash_segment(image
, segment
);
917 * Exec Kernel system call: for obvious reasons only root may call it.
919 * This call breaks up into three pieces.
920 * - A generic part which loads the new kernel from the current
921 * address space, and very carefully places the data in the
924 * - A generic part that interacts with the kernel and tells all of
925 * the devices to shut down. Preventing on-going dmas, and placing
926 * the devices in a consistent state so a later kernel can
929 * - A machine specific part that includes the syscall number
930 * and the copies the image to it's final destination. And
931 * jumps into the image at entry.
933 * kexec does not sync, or unmount filesystems so if you need
934 * that to happen you need to do that yourself.
936 struct kimage
*kexec_image
;
937 struct kimage
*kexec_crash_image
;
939 static DEFINE_MUTEX(kexec_mutex
);
941 SYSCALL_DEFINE4(kexec_load
, unsigned long, entry
, unsigned long, nr_segments
,
942 struct kexec_segment __user
*, segments
, unsigned long, flags
)
944 struct kimage
**dest_image
, *image
;
947 /* We only trust the superuser with rebooting the system. */
948 if (!capable(CAP_SYS_BOOT
))
952 * Verify we have a legal set of flags
953 * This leaves us room for future extensions.
955 if ((flags
& KEXEC_FLAGS
) != (flags
& ~KEXEC_ARCH_MASK
))
958 /* Verify we are on the appropriate architecture */
959 if (((flags
& KEXEC_ARCH_MASK
) != KEXEC_ARCH
) &&
960 ((flags
& KEXEC_ARCH_MASK
) != KEXEC_ARCH_DEFAULT
))
963 /* Put an artificial cap on the number
964 * of segments passed to kexec_load.
966 if (nr_segments
> KEXEC_SEGMENT_MAX
)
972 /* Because we write directly to the reserved memory
973 * region when loading crash kernels we need a mutex here to
974 * prevent multiple crash kernels from attempting to load
975 * simultaneously, and to prevent a crash kernel from loading
976 * over the top of a in use crash kernel.
978 * KISS: always take the mutex.
980 if (!mutex_trylock(&kexec_mutex
))
983 dest_image
= &kexec_image
;
984 if (flags
& KEXEC_ON_CRASH
)
985 dest_image
= &kexec_crash_image
;
986 if (nr_segments
> 0) {
989 /* Loading another kernel to reboot into */
990 if ((flags
& KEXEC_ON_CRASH
) == 0)
991 result
= kimage_normal_alloc(&image
, entry
,
992 nr_segments
, segments
);
993 /* Loading another kernel to switch to if this one crashes */
994 else if (flags
& KEXEC_ON_CRASH
) {
995 /* Free any current crash dump kernel before
998 kimage_free(xchg(&kexec_crash_image
, NULL
));
999 result
= kimage_crash_alloc(&image
, entry
,
1000 nr_segments
, segments
);
1001 crash_map_reserved_pages();
1006 if (flags
& KEXEC_PRESERVE_CONTEXT
)
1007 image
->preserve_context
= 1;
1008 result
= machine_kexec_prepare(image
);
1012 for (i
= 0; i
< nr_segments
; i
++) {
1013 result
= kimage_load_segment(image
, &image
->segment
[i
]);
1017 kimage_terminate(image
);
1018 if (flags
& KEXEC_ON_CRASH
)
1019 crash_unmap_reserved_pages();
1021 /* Install the new kernel, and Uninstall the old */
1022 image
= xchg(dest_image
, image
);
1025 mutex_unlock(&kexec_mutex
);
1032 * Add and remove page tables for crashkernel memory
1034 * Provide an empty default implementation here -- architecture
1035 * code may override this
1037 void __weak
crash_map_reserved_pages(void)
1040 void __weak
crash_unmap_reserved_pages(void)
1043 #ifdef CONFIG_COMPAT
1044 asmlinkage
long compat_sys_kexec_load(unsigned long entry
,
1045 unsigned long nr_segments
,
1046 struct compat_kexec_segment __user
*segments
,
1047 unsigned long flags
)
1049 struct compat_kexec_segment in
;
1050 struct kexec_segment out
, __user
*ksegments
;
1051 unsigned long i
, result
;
1053 /* Don't allow clients that don't understand the native
1054 * architecture to do anything.
1056 if ((flags
& KEXEC_ARCH_MASK
) == KEXEC_ARCH_DEFAULT
)
1059 if (nr_segments
> KEXEC_SEGMENT_MAX
)
1062 ksegments
= compat_alloc_user_space(nr_segments
* sizeof(out
));
1063 for (i
=0; i
< nr_segments
; i
++) {
1064 result
= copy_from_user(&in
, &segments
[i
], sizeof(in
));
1068 out
.buf
= compat_ptr(in
.buf
);
1069 out
.bufsz
= in
.bufsz
;
1071 out
.memsz
= in
.memsz
;
1073 result
= copy_to_user(&ksegments
[i
], &out
, sizeof(out
));
1078 return sys_kexec_load(entry
, nr_segments
, ksegments
, flags
);
1082 void crash_kexec(struct pt_regs
*regs
)
1084 /* Take the kexec_mutex here to prevent sys_kexec_load
1085 * running on one cpu from replacing the crash kernel
1086 * we are using after a panic on a different cpu.
1088 * If the crash kernel was not located in a fixed area
1089 * of memory the xchg(&kexec_crash_image) would be
1090 * sufficient. But since I reuse the memory...
1092 if (mutex_trylock(&kexec_mutex
)) {
1093 if (kexec_crash_image
) {
1094 struct pt_regs fixed_regs
;
1096 crash_setup_regs(&fixed_regs
, regs
);
1097 crash_save_vmcoreinfo();
1098 machine_crash_shutdown(&fixed_regs
);
1099 machine_kexec(kexec_crash_image
);
1101 mutex_unlock(&kexec_mutex
);
1105 size_t crash_get_memory_size(void)
1108 mutex_lock(&kexec_mutex
);
1109 if (crashk_res
.end
!= crashk_res
.start
)
1110 size
= resource_size(&crashk_res
);
1111 mutex_unlock(&kexec_mutex
);
1115 void __weak
crash_free_reserved_phys_range(unsigned long begin
,
1120 for (addr
= begin
; addr
< end
; addr
+= PAGE_SIZE
) {
1121 ClearPageReserved(pfn_to_page(addr
>> PAGE_SHIFT
));
1122 init_page_count(pfn_to_page(addr
>> PAGE_SHIFT
));
1123 free_page((unsigned long)__va(addr
));
1128 int crash_shrink_memory(unsigned long new_size
)
1131 unsigned long start
, end
;
1132 unsigned long old_size
;
1133 struct resource
*ram_res
;
1135 mutex_lock(&kexec_mutex
);
1137 if (kexec_crash_image
) {
1141 start
= crashk_res
.start
;
1142 end
= crashk_res
.end
;
1143 old_size
= (end
== 0) ? 0 : end
- start
+ 1;
1144 if (new_size
>= old_size
) {
1145 ret
= (new_size
== old_size
) ? 0 : -EINVAL
;
1149 ram_res
= kzalloc(sizeof(*ram_res
), GFP_KERNEL
);
1155 start
= roundup(start
, KEXEC_CRASH_MEM_ALIGN
);
1156 end
= roundup(start
+ new_size
, KEXEC_CRASH_MEM_ALIGN
);
1158 crash_map_reserved_pages();
1159 crash_free_reserved_phys_range(end
, crashk_res
.end
);
1161 if ((start
== end
) && (crashk_res
.parent
!= NULL
))
1162 release_resource(&crashk_res
);
1164 ram_res
->start
= end
;
1165 ram_res
->end
= crashk_res
.end
;
1166 ram_res
->flags
= IORESOURCE_BUSY
| IORESOURCE_MEM
;
1167 ram_res
->name
= "System RAM";
1169 crashk_res
.end
= end
- 1;
1171 insert_resource(&iomem_resource
, ram_res
);
1172 crash_unmap_reserved_pages();
1175 mutex_unlock(&kexec_mutex
);
1179 static u32
*append_elf_note(u32
*buf
, char *name
, unsigned type
, void *data
,
1182 struct elf_note note
;
1184 note
.n_namesz
= strlen(name
) + 1;
1185 note
.n_descsz
= data_len
;
1187 memcpy(buf
, ¬e
, sizeof(note
));
1188 buf
+= (sizeof(note
) + 3)/4;
1189 memcpy(buf
, name
, note
.n_namesz
);
1190 buf
+= (note
.n_namesz
+ 3)/4;
1191 memcpy(buf
, data
, note
.n_descsz
);
1192 buf
+= (note
.n_descsz
+ 3)/4;
1197 static void final_note(u32
*buf
)
1199 struct elf_note note
;
1204 memcpy(buf
, ¬e
, sizeof(note
));
1207 void crash_save_cpu(struct pt_regs
*regs
, int cpu
)
1209 struct elf_prstatus prstatus
;
1212 if ((cpu
< 0) || (cpu
>= nr_cpu_ids
))
1215 /* Using ELF notes here is opportunistic.
1216 * I need a well defined structure format
1217 * for the data I pass, and I need tags
1218 * on the data to indicate what information I have
1219 * squirrelled away. ELF notes happen to provide
1220 * all of that, so there is no need to invent something new.
1222 buf
= (u32
*)per_cpu_ptr(crash_notes
, cpu
);
1225 memset(&prstatus
, 0, sizeof(prstatus
));
1226 prstatus
.pr_pid
= current
->pid
;
1227 elf_core_copy_kernel_regs(&prstatus
.pr_reg
, regs
);
1228 buf
= append_elf_note(buf
, KEXEC_CORE_NOTE_NAME
, NT_PRSTATUS
,
1229 &prstatus
, sizeof(prstatus
));
1233 static int __init
crash_notes_memory_init(void)
1235 /* Allocate memory for saving cpu registers. */
1236 crash_notes
= alloc_percpu(note_buf_t
);
1238 printk("Kexec: Memory allocation for saving cpu register"
1239 " states failed\n");
1244 module_init(crash_notes_memory_init
)
1248 * parsing the "crashkernel" commandline
1250 * this code is intended to be called from architecture specific code
1255 * This function parses command lines in the format
1257 * crashkernel=ramsize-range:size[,...][@offset]
1259 * The function returns 0 on success and -EINVAL on failure.
1261 static int __init
parse_crashkernel_mem(char *cmdline
,
1262 unsigned long long system_ram
,
1263 unsigned long long *crash_size
,
1264 unsigned long long *crash_base
)
1266 char *cur
= cmdline
, *tmp
;
1268 /* for each entry of the comma-separated list */
1270 unsigned long long start
, end
= ULLONG_MAX
, size
;
1272 /* get the start of the range */
1273 start
= memparse(cur
, &tmp
);
1275 pr_warning("crashkernel: Memory value expected\n");
1280 pr_warning("crashkernel: '-' expected\n");
1285 /* if no ':' is here, than we read the end */
1287 end
= memparse(cur
, &tmp
);
1289 pr_warning("crashkernel: Memory "
1290 "value expected\n");
1295 pr_warning("crashkernel: end <= start\n");
1301 pr_warning("crashkernel: ':' expected\n");
1306 size
= memparse(cur
, &tmp
);
1308 pr_warning("Memory value expected\n");
1312 if (size
>= system_ram
) {
1313 pr_warning("crashkernel: invalid size\n");
1318 if (system_ram
>= start
&& system_ram
< end
) {
1322 } while (*cur
++ == ',');
1324 if (*crash_size
> 0) {
1325 while (*cur
&& *cur
!= ' ' && *cur
!= '@')
1329 *crash_base
= memparse(cur
, &tmp
);
1331 pr_warning("Memory value expected "
1342 * That function parses "simple" (old) crashkernel command lines like
1344 * crashkernel=size[@offset]
1346 * It returns 0 on success and -EINVAL on failure.
1348 static int __init
parse_crashkernel_simple(char *cmdline
,
1349 unsigned long long *crash_size
,
1350 unsigned long long *crash_base
)
1352 char *cur
= cmdline
;
1354 *crash_size
= memparse(cmdline
, &cur
);
1355 if (cmdline
== cur
) {
1356 pr_warning("crashkernel: memory value expected\n");
1361 *crash_base
= memparse(cur
+1, &cur
);
1362 else if (*cur
!= ' ' && *cur
!= '\0') {
1363 pr_warning("crashkernel: unrecognized char\n");
1371 * That function is the entry point for command line parsing and should be
1372 * called from the arch-specific code.
1374 int __init
parse_crashkernel(char *cmdline
,
1375 unsigned long long system_ram
,
1376 unsigned long long *crash_size
,
1377 unsigned long long *crash_base
)
1379 char *p
= cmdline
, *ck_cmdline
= NULL
;
1380 char *first_colon
, *first_space
;
1382 BUG_ON(!crash_size
|| !crash_base
);
1386 /* find crashkernel and use the last one if there are more */
1387 p
= strstr(p
, "crashkernel=");
1390 p
= strstr(p
+1, "crashkernel=");
1396 ck_cmdline
+= 12; /* strlen("crashkernel=") */
1399 * if the commandline contains a ':', then that's the extended
1400 * syntax -- if not, it must be the classic syntax
1402 first_colon
= strchr(ck_cmdline
, ':');
1403 first_space
= strchr(ck_cmdline
, ' ');
1404 if (first_colon
&& (!first_space
|| first_colon
< first_space
))
1405 return parse_crashkernel_mem(ck_cmdline
, system_ram
,
1406 crash_size
, crash_base
);
1408 return parse_crashkernel_simple(ck_cmdline
, crash_size
,
1415 static void update_vmcoreinfo_note(void)
1417 u32
*buf
= vmcoreinfo_note
;
1419 if (!vmcoreinfo_size
)
1421 buf
= append_elf_note(buf
, VMCOREINFO_NOTE_NAME
, 0, vmcoreinfo_data
,
1426 void crash_save_vmcoreinfo(void)
1428 vmcoreinfo_append_str("CRASHTIME=%ld", get_seconds());
1429 update_vmcoreinfo_note();
1432 void vmcoreinfo_append_str(const char *fmt
, ...)
1438 va_start(args
, fmt
);
1439 r
= vsnprintf(buf
, sizeof(buf
), fmt
, args
);
1442 if (r
+ vmcoreinfo_size
> vmcoreinfo_max_size
)
1443 r
= vmcoreinfo_max_size
- vmcoreinfo_size
;
1445 memcpy(&vmcoreinfo_data
[vmcoreinfo_size
], buf
, r
);
1447 vmcoreinfo_size
+= r
;
1451 * provide an empty default implementation here -- architecture
1452 * code may override this
1454 void __attribute__ ((weak
)) arch_crash_save_vmcoreinfo(void)
1457 unsigned long __attribute__ ((weak
)) paddr_vmcoreinfo_note(void)
1459 return __pa((unsigned long)(char *)&vmcoreinfo_note
);
1462 static int __init
crash_save_vmcoreinfo_init(void)
1464 VMCOREINFO_OSRELEASE(init_uts_ns
.name
.release
);
1465 VMCOREINFO_PAGESIZE(PAGE_SIZE
);
1467 VMCOREINFO_SYMBOL(init_uts_ns
);
1468 VMCOREINFO_SYMBOL(node_online_map
);
1470 VMCOREINFO_SYMBOL(swapper_pg_dir
);
1472 VMCOREINFO_SYMBOL(_stext
);
1473 VMCOREINFO_SYMBOL(vmlist
);
1475 #ifndef CONFIG_NEED_MULTIPLE_NODES
1476 VMCOREINFO_SYMBOL(mem_map
);
1477 VMCOREINFO_SYMBOL(contig_page_data
);
1479 #ifdef CONFIG_SPARSEMEM
1480 VMCOREINFO_SYMBOL(mem_section
);
1481 VMCOREINFO_LENGTH(mem_section
, NR_SECTION_ROOTS
);
1482 VMCOREINFO_STRUCT_SIZE(mem_section
);
1483 VMCOREINFO_OFFSET(mem_section
, section_mem_map
);
1485 VMCOREINFO_STRUCT_SIZE(page
);
1486 VMCOREINFO_STRUCT_SIZE(pglist_data
);
1487 VMCOREINFO_STRUCT_SIZE(zone
);
1488 VMCOREINFO_STRUCT_SIZE(free_area
);
1489 VMCOREINFO_STRUCT_SIZE(list_head
);
1490 VMCOREINFO_SIZE(nodemask_t
);
1491 VMCOREINFO_OFFSET(page
, flags
);
1492 VMCOREINFO_OFFSET(page
, _count
);
1493 VMCOREINFO_OFFSET(page
, mapping
);
1494 VMCOREINFO_OFFSET(page
, lru
);
1495 VMCOREINFO_OFFSET(pglist_data
, node_zones
);
1496 VMCOREINFO_OFFSET(pglist_data
, nr_zones
);
1497 #ifdef CONFIG_FLAT_NODE_MEM_MAP
1498 VMCOREINFO_OFFSET(pglist_data
, node_mem_map
);
1500 VMCOREINFO_OFFSET(pglist_data
, node_start_pfn
);
1501 VMCOREINFO_OFFSET(pglist_data
, node_spanned_pages
);
1502 VMCOREINFO_OFFSET(pglist_data
, node_id
);
1503 VMCOREINFO_OFFSET(zone
, free_area
);
1504 VMCOREINFO_OFFSET(zone
, vm_stat
);
1505 VMCOREINFO_OFFSET(zone
, spanned_pages
);
1506 VMCOREINFO_OFFSET(free_area
, free_list
);
1507 VMCOREINFO_OFFSET(list_head
, next
);
1508 VMCOREINFO_OFFSET(list_head
, prev
);
1509 VMCOREINFO_OFFSET(vm_struct
, addr
);
1510 VMCOREINFO_LENGTH(zone
.free_area
, MAX_ORDER
);
1511 log_buf_kexec_setup();
1512 VMCOREINFO_LENGTH(free_area
.free_list
, MIGRATE_TYPES
);
1513 VMCOREINFO_NUMBER(NR_FREE_PAGES
);
1514 VMCOREINFO_NUMBER(PG_lru
);
1515 VMCOREINFO_NUMBER(PG_private
);
1516 VMCOREINFO_NUMBER(PG_swapcache
);
1518 arch_crash_save_vmcoreinfo();
1519 update_vmcoreinfo_note();
1524 module_init(crash_save_vmcoreinfo_init
)
1527 * Move into place and start executing a preloaded standalone
1528 * executable. If nothing was preloaded return an error.
1530 int kernel_kexec(void)
1534 if (!mutex_trylock(&kexec_mutex
))
1541 #ifdef CONFIG_KEXEC_JUMP
1542 if (kexec_image
->preserve_context
) {
1543 lock_system_sleep();
1544 pm_prepare_console();
1545 error
= freeze_processes();
1548 goto Restore_console
;
1551 error
= dpm_suspend_start(PMSG_FREEZE
);
1553 goto Resume_console
;
1554 /* At this point, dpm_suspend_start() has been called,
1555 * but *not* dpm_suspend_end(). We *must* call
1556 * dpm_suspend_end() now. Otherwise, drivers for
1557 * some devices (e.g. interrupt controllers) become
1558 * desynchronized with the actual state of the
1559 * hardware at resume time, and evil weirdness ensues.
1561 error
= dpm_suspend_end(PMSG_FREEZE
);
1563 goto Resume_devices
;
1564 error
= disable_nonboot_cpus();
1567 local_irq_disable();
1568 error
= syscore_suspend();
1574 kernel_restart_prepare(NULL
);
1575 printk(KERN_EMERG
"Starting new kernel\n");
1579 machine_kexec(kexec_image
);
1581 #ifdef CONFIG_KEXEC_JUMP
1582 if (kexec_image
->preserve_context
) {
1587 enable_nonboot_cpus();
1588 dpm_resume_start(PMSG_RESTORE
);
1590 dpm_resume_end(PMSG_RESTORE
);
1595 pm_restore_console();
1596 unlock_system_sleep();
1601 mutex_unlock(&kexec_mutex
);