2 * kexec.c - kexec system call
3 * Copyright (C) 2002-2004 Eric Biederman <ebiederm@xmission.com>
5 * This source code is licensed under the GNU General Public License,
6 * Version 2. See the file COPYING for more details.
9 #define pr_fmt(fmt) "kexec: " fmt
11 #include <linux/capability.h>
13 #include <linux/file.h>
14 #include <linux/slab.h>
16 #include <linux/kexec.h>
17 #include <linux/mutex.h>
18 #include <linux/list.h>
19 #include <linux/highmem.h>
20 #include <linux/syscalls.h>
21 #include <linux/reboot.h>
22 #include <linux/ioport.h>
23 #include <linux/hardirq.h>
24 #include <linux/elf.h>
25 #include <linux/elfcore.h>
26 #include <linux/utsname.h>
27 #include <linux/numa.h>
28 #include <linux/suspend.h>
29 #include <linux/device.h>
30 #include <linux/freezer.h>
32 #include <linux/cpu.h>
33 #include <linux/console.h>
34 #include <linux/vmalloc.h>
35 #include <linux/swap.h>
36 #include <linux/syscore_ops.h>
37 #include <linux/compiler.h>
38 #include <linux/hugetlb.h>
41 #include <asm/uaccess.h>
43 #include <asm/sections.h>
45 #include <crypto/hash.h>
46 #include <crypto/sha.h>
48 /* Per cpu memory for storing cpu states in case of system crash. */
49 note_buf_t __percpu
*crash_notes
;
51 /* vmcoreinfo stuff */
52 static unsigned char vmcoreinfo_data
[VMCOREINFO_BYTES
];
53 u32 vmcoreinfo_note
[VMCOREINFO_NOTE_SIZE
/4];
54 size_t vmcoreinfo_size
;
55 size_t vmcoreinfo_max_size
= sizeof(vmcoreinfo_data
);
57 /* Flag to indicate we are going to kexec a new kernel */
58 bool kexec_in_progress
= false;
61 * Declare these symbols weak so that if architecture provides a purgatory,
62 * these will be overridden.
64 char __weak kexec_purgatory
[0];
65 size_t __weak kexec_purgatory_size
= 0;
67 static int kexec_calculate_store_digests(struct kimage
*image
);
69 /* Location of the reserved area for the crash kernel */
70 struct resource crashk_res
= {
71 .name
= "Crash kernel",
74 .flags
= IORESOURCE_BUSY
| IORESOURCE_MEM
76 struct resource crashk_low_res
= {
77 .name
= "Crash kernel",
80 .flags
= IORESOURCE_BUSY
| IORESOURCE_MEM
83 int kexec_should_crash(struct task_struct
*p
)
85 if (in_interrupt() || !p
->pid
|| is_global_init(p
) || panic_on_oops
)
91 * When kexec transitions to the new kernel there is a one-to-one
92 * mapping between physical and virtual addresses. On processors
93 * where you can disable the MMU this is trivial, and easy. For
94 * others it is still a simple predictable page table to setup.
96 * In that environment kexec copies the new kernel to its final
97 * resting place. This means I can only support memory whose
98 * physical address can fit in an unsigned long. In particular
99 * addresses where (pfn << PAGE_SHIFT) > ULONG_MAX cannot be handled.
100 * If the assembly stub has more restrictive requirements
101 * KEXEC_SOURCE_MEMORY_LIMIT and KEXEC_DEST_MEMORY_LIMIT can be
102 * defined more restrictively in <asm/kexec.h>.
104 * The code for the transition from the current kernel to the
105 * the new kernel is placed in the control_code_buffer, whose size
106 * is given by KEXEC_CONTROL_PAGE_SIZE. In the best case only a single
107 * page of memory is necessary, but some architectures require more.
108 * Because this memory must be identity mapped in the transition from
109 * virtual to physical addresses it must live in the range
110 * 0 - TASK_SIZE, as only the user space mappings are arbitrarily
113 * The assembly stub in the control code buffer is passed a linked list
114 * of descriptor pages detailing the source pages of the new kernel,
115 * and the destination addresses of those source pages. As this data
116 * structure is not used in the context of the current OS, it must
119 * The code has been made to work with highmem pages and will use a
120 * destination page in its final resting place (if it happens
121 * to allocate it). The end product of this is that most of the
122 * physical address space, and most of RAM can be used.
124 * Future directions include:
125 * - allocating a page table with the control code buffer identity
126 * mapped, to simplify machine_kexec and make kexec_on_panic more
131 * KIMAGE_NO_DEST is an impossible destination address..., for
132 * allocating pages whose destination address we do not care about.
134 #define KIMAGE_NO_DEST (-1UL)
136 static int kimage_is_destination_range(struct kimage
*image
,
137 unsigned long start
, unsigned long end
);
138 static struct page
*kimage_alloc_page(struct kimage
*image
,
142 static int copy_user_segment_list(struct kimage
*image
,
143 unsigned long nr_segments
,
144 struct kexec_segment __user
*segments
)
147 size_t segment_bytes
;
149 /* Read in the segments */
150 image
->nr_segments
= nr_segments
;
151 segment_bytes
= nr_segments
* sizeof(*segments
);
152 ret
= copy_from_user(image
->segment
, segments
, segment_bytes
);
159 static int sanity_check_segment_list(struct kimage
*image
)
162 unsigned long nr_segments
= image
->nr_segments
;
165 * Verify we have good destination addresses. The caller is
166 * responsible for making certain we don't attempt to load
167 * the new image into invalid or reserved areas of RAM. This
168 * just verifies it is an address we can use.
170 * Since the kernel does everything in page size chunks ensure
171 * the destination addresses are page aligned. Too many
172 * special cases crop of when we don't do this. The most
173 * insidious is getting overlapping destination addresses
174 * simply because addresses are changed to page size
177 result
= -EADDRNOTAVAIL
;
178 for (i
= 0; i
< nr_segments
; i
++) {
179 unsigned long mstart
, mend
;
181 mstart
= image
->segment
[i
].mem
;
182 mend
= mstart
+ image
->segment
[i
].memsz
;
183 if ((mstart
& ~PAGE_MASK
) || (mend
& ~PAGE_MASK
))
185 if (mend
>= KEXEC_DESTINATION_MEMORY_LIMIT
)
189 /* Verify our destination addresses do not overlap.
190 * If we alloed overlapping destination addresses
191 * through very weird things can happen with no
192 * easy explanation as one segment stops on another.
195 for (i
= 0; i
< nr_segments
; i
++) {
196 unsigned long mstart
, mend
;
199 mstart
= image
->segment
[i
].mem
;
200 mend
= mstart
+ image
->segment
[i
].memsz
;
201 for (j
= 0; j
< i
; j
++) {
202 unsigned long pstart
, pend
;
203 pstart
= image
->segment
[j
].mem
;
204 pend
= pstart
+ image
->segment
[j
].memsz
;
205 /* Do the segments overlap ? */
206 if ((mend
> pstart
) && (mstart
< pend
))
211 /* Ensure our buffer sizes are strictly less than
212 * our memory sizes. This should always be the case,
213 * and it is easier to check up front than to be surprised
217 for (i
= 0; i
< nr_segments
; i
++) {
218 if (image
->segment
[i
].bufsz
> image
->segment
[i
].memsz
)
223 * Verify we have good destination addresses. Normally
224 * the caller is responsible for making certain we don't
225 * attempt to load the new image into invalid or reserved
226 * areas of RAM. But crash kernels are preloaded into a
227 * reserved area of ram. We must ensure the addresses
228 * are in the reserved area otherwise preloading the
229 * kernel could corrupt things.
232 if (image
->type
== KEXEC_TYPE_CRASH
) {
233 result
= -EADDRNOTAVAIL
;
234 for (i
= 0; i
< nr_segments
; i
++) {
235 unsigned long mstart
, mend
;
237 mstart
= image
->segment
[i
].mem
;
238 mend
= mstart
+ image
->segment
[i
].memsz
- 1;
239 /* Ensure we are within the crash kernel limits */
240 if ((mstart
< crashk_res
.start
) ||
241 (mend
> crashk_res
.end
))
249 static struct kimage
*do_kimage_alloc_init(void)
251 struct kimage
*image
;
253 /* Allocate a controlling structure */
254 image
= kzalloc(sizeof(*image
), GFP_KERNEL
);
259 image
->entry
= &image
->head
;
260 image
->last_entry
= &image
->head
;
261 image
->control_page
= ~0; /* By default this does not apply */
262 image
->type
= KEXEC_TYPE_DEFAULT
;
264 /* Initialize the list of control pages */
265 INIT_LIST_HEAD(&image
->control_pages
);
267 /* Initialize the list of destination pages */
268 INIT_LIST_HEAD(&image
->dest_pages
);
270 /* Initialize the list of unusable pages */
271 INIT_LIST_HEAD(&image
->unusable_pages
);
276 static void kimage_free_page_list(struct list_head
*list
);
278 static int kimage_alloc_init(struct kimage
**rimage
, unsigned long entry
,
279 unsigned long nr_segments
,
280 struct kexec_segment __user
*segments
,
284 struct kimage
*image
;
285 bool kexec_on_panic
= flags
& KEXEC_ON_CRASH
;
287 if (kexec_on_panic
) {
288 /* Verify we have a valid entry point */
289 if ((entry
< crashk_res
.start
) || (entry
> crashk_res
.end
))
290 return -EADDRNOTAVAIL
;
293 /* Allocate and initialize a controlling structure */
294 image
= do_kimage_alloc_init();
298 image
->start
= entry
;
300 ret
= copy_user_segment_list(image
, nr_segments
, segments
);
304 ret
= sanity_check_segment_list(image
);
308 /* Enable the special crash kernel control page allocation policy. */
309 if (kexec_on_panic
) {
310 image
->control_page
= crashk_res
.start
;
311 image
->type
= KEXEC_TYPE_CRASH
;
315 * Find a location for the control code buffer, and add it
316 * the vector of segments so that it's pages will also be
317 * counted as destination pages.
320 image
->control_code_page
= kimage_alloc_control_pages(image
,
321 get_order(KEXEC_CONTROL_PAGE_SIZE
));
322 if (!image
->control_code_page
) {
323 pr_err("Could not allocate control_code_buffer\n");
327 if (!kexec_on_panic
) {
328 image
->swap_page
= kimage_alloc_control_pages(image
, 0);
329 if (!image
->swap_page
) {
330 pr_err("Could not allocate swap buffer\n");
331 goto out_free_control_pages
;
337 out_free_control_pages
:
338 kimage_free_page_list(&image
->control_pages
);
344 static int copy_file_from_fd(int fd
, void **buf
, unsigned long *buf_len
)
346 struct fd f
= fdget(fd
);
355 ret
= vfs_getattr(&f
.file
->f_path
, &stat
);
359 if (stat
.size
> INT_MAX
) {
364 /* Don't hand 0 to vmalloc, it whines. */
365 if (stat
.size
== 0) {
370 *buf
= vmalloc(stat
.size
);
377 while (pos
< stat
.size
) {
378 bytes
= kernel_read(f
.file
, pos
, (char *)(*buf
) + pos
,
391 if (pos
!= stat
.size
) {
403 /* Architectures can provide this probe function */
404 int __weak
arch_kexec_kernel_image_probe(struct kimage
*image
, void *buf
,
405 unsigned long buf_len
)
410 void * __weak
arch_kexec_kernel_image_load(struct kimage
*image
)
412 return ERR_PTR(-ENOEXEC
);
415 void __weak
arch_kimage_file_post_load_cleanup(struct kimage
*image
)
419 int __weak
arch_kexec_kernel_verify_sig(struct kimage
*image
, void *buf
,
420 unsigned long buf_len
)
422 return -EKEYREJECTED
;
425 /* Apply relocations of type RELA */
427 arch_kexec_apply_relocations_add(const Elf_Ehdr
*ehdr
, Elf_Shdr
*sechdrs
,
430 pr_err("RELA relocation unsupported.\n");
434 /* Apply relocations of type REL */
436 arch_kexec_apply_relocations(const Elf_Ehdr
*ehdr
, Elf_Shdr
*sechdrs
,
439 pr_err("REL relocation unsupported.\n");
444 * Free up memory used by kernel, initrd, and comand line. This is temporary
445 * memory allocation which is not needed any more after these buffers have
446 * been loaded into separate segments and have been copied elsewhere.
448 static void kimage_file_post_load_cleanup(struct kimage
*image
)
450 struct purgatory_info
*pi
= &image
->purgatory_info
;
452 vfree(image
->kernel_buf
);
453 image
->kernel_buf
= NULL
;
455 vfree(image
->initrd_buf
);
456 image
->initrd_buf
= NULL
;
458 kfree(image
->cmdline_buf
);
459 image
->cmdline_buf
= NULL
;
461 vfree(pi
->purgatory_buf
);
462 pi
->purgatory_buf
= NULL
;
467 /* See if architecture has anything to cleanup post load */
468 arch_kimage_file_post_load_cleanup(image
);
471 * Above call should have called into bootloader to free up
472 * any data stored in kimage->image_loader_data. It should
473 * be ok now to free it up.
475 kfree(image
->image_loader_data
);
476 image
->image_loader_data
= NULL
;
480 * In file mode list of segments is prepared by kernel. Copy relevant
481 * data from user space, do error checking, prepare segment list
484 kimage_file_prepare_segments(struct kimage
*image
, int kernel_fd
, int initrd_fd
,
485 const char __user
*cmdline_ptr
,
486 unsigned long cmdline_len
, unsigned flags
)
491 ret
= copy_file_from_fd(kernel_fd
, &image
->kernel_buf
,
492 &image
->kernel_buf_len
);
496 /* Call arch image probe handlers */
497 ret
= arch_kexec_kernel_image_probe(image
, image
->kernel_buf
,
498 image
->kernel_buf_len
);
503 #ifdef CONFIG_KEXEC_VERIFY_SIG
504 ret
= arch_kexec_kernel_verify_sig(image
, image
->kernel_buf
,
505 image
->kernel_buf_len
);
507 pr_debug("kernel signature verification failed.\n");
510 pr_debug("kernel signature verification successful.\n");
512 /* It is possible that there no initramfs is being loaded */
513 if (!(flags
& KEXEC_FILE_NO_INITRAMFS
)) {
514 ret
= copy_file_from_fd(initrd_fd
, &image
->initrd_buf
,
515 &image
->initrd_buf_len
);
521 image
->cmdline_buf
= kzalloc(cmdline_len
, GFP_KERNEL
);
522 if (!image
->cmdline_buf
) {
527 ret
= copy_from_user(image
->cmdline_buf
, cmdline_ptr
,
534 image
->cmdline_buf_len
= cmdline_len
;
536 /* command line should be a string with last byte null */
537 if (image
->cmdline_buf
[cmdline_len
- 1] != '\0') {
543 /* Call arch image load handlers */
544 ldata
= arch_kexec_kernel_image_load(image
);
547 ret
= PTR_ERR(ldata
);
551 image
->image_loader_data
= ldata
;
553 /* In case of error, free up all allocated memory in this function */
555 kimage_file_post_load_cleanup(image
);
560 kimage_file_alloc_init(struct kimage
**rimage
, int kernel_fd
,
561 int initrd_fd
, const char __user
*cmdline_ptr
,
562 unsigned long cmdline_len
, unsigned long flags
)
565 struct kimage
*image
;
566 bool kexec_on_panic
= flags
& KEXEC_FILE_ON_CRASH
;
568 image
= do_kimage_alloc_init();
572 image
->file_mode
= 1;
574 if (kexec_on_panic
) {
575 /* Enable special crash kernel control page alloc policy. */
576 image
->control_page
= crashk_res
.start
;
577 image
->type
= KEXEC_TYPE_CRASH
;
580 ret
= kimage_file_prepare_segments(image
, kernel_fd
, initrd_fd
,
581 cmdline_ptr
, cmdline_len
, flags
);
585 ret
= sanity_check_segment_list(image
);
587 goto out_free_post_load_bufs
;
590 image
->control_code_page
= kimage_alloc_control_pages(image
,
591 get_order(KEXEC_CONTROL_PAGE_SIZE
));
592 if (!image
->control_code_page
) {
593 pr_err("Could not allocate control_code_buffer\n");
594 goto out_free_post_load_bufs
;
597 if (!kexec_on_panic
) {
598 image
->swap_page
= kimage_alloc_control_pages(image
, 0);
599 if (!image
->swap_page
) {
600 pr_err(KERN_ERR
"Could not allocate swap buffer\n");
601 goto out_free_control_pages
;
607 out_free_control_pages
:
608 kimage_free_page_list(&image
->control_pages
);
609 out_free_post_load_bufs
:
610 kimage_file_post_load_cleanup(image
);
616 static int kimage_is_destination_range(struct kimage
*image
,
622 for (i
= 0; i
< image
->nr_segments
; i
++) {
623 unsigned long mstart
, mend
;
625 mstart
= image
->segment
[i
].mem
;
626 mend
= mstart
+ image
->segment
[i
].memsz
;
627 if ((end
> mstart
) && (start
< mend
))
634 static struct page
*kimage_alloc_pages(gfp_t gfp_mask
, unsigned int order
)
638 pages
= alloc_pages(gfp_mask
, order
);
640 unsigned int count
, i
;
641 pages
->mapping
= NULL
;
642 set_page_private(pages
, order
);
644 for (i
= 0; i
< count
; i
++)
645 SetPageReserved(pages
+ i
);
651 static void kimage_free_pages(struct page
*page
)
653 unsigned int order
, count
, i
;
655 order
= page_private(page
);
657 for (i
= 0; i
< count
; i
++)
658 ClearPageReserved(page
+ i
);
659 __free_pages(page
, order
);
662 static void kimage_free_page_list(struct list_head
*list
)
664 struct list_head
*pos
, *next
;
666 list_for_each_safe(pos
, next
, list
) {
669 page
= list_entry(pos
, struct page
, lru
);
670 list_del(&page
->lru
);
671 kimage_free_pages(page
);
675 static struct page
*kimage_alloc_normal_control_pages(struct kimage
*image
,
678 /* Control pages are special, they are the intermediaries
679 * that are needed while we copy the rest of the pages
680 * to their final resting place. As such they must
681 * not conflict with either the destination addresses
682 * or memory the kernel is already using.
684 * The only case where we really need more than one of
685 * these are for architectures where we cannot disable
686 * the MMU and must instead generate an identity mapped
687 * page table for all of the memory.
689 * At worst this runs in O(N) of the image size.
691 struct list_head extra_pages
;
696 INIT_LIST_HEAD(&extra_pages
);
698 /* Loop while I can allocate a page and the page allocated
699 * is a destination page.
702 unsigned long pfn
, epfn
, addr
, eaddr
;
704 pages
= kimage_alloc_pages(GFP_KERNEL
, order
);
707 pfn
= page_to_pfn(pages
);
709 addr
= pfn
<< PAGE_SHIFT
;
710 eaddr
= epfn
<< PAGE_SHIFT
;
711 if ((epfn
>= (KEXEC_CONTROL_MEMORY_LIMIT
>> PAGE_SHIFT
)) ||
712 kimage_is_destination_range(image
, addr
, eaddr
)) {
713 list_add(&pages
->lru
, &extra_pages
);
719 /* Remember the allocated page... */
720 list_add(&pages
->lru
, &image
->control_pages
);
722 /* Because the page is already in it's destination
723 * location we will never allocate another page at
724 * that address. Therefore kimage_alloc_pages
725 * will not return it (again) and we don't need
726 * to give it an entry in image->segment[].
729 /* Deal with the destination pages I have inadvertently allocated.
731 * Ideally I would convert multi-page allocations into single
732 * page allocations, and add everything to image->dest_pages.
734 * For now it is simpler to just free the pages.
736 kimage_free_page_list(&extra_pages
);
741 static struct page
*kimage_alloc_crash_control_pages(struct kimage
*image
,
744 /* Control pages are special, they are the intermediaries
745 * that are needed while we copy the rest of the pages
746 * to their final resting place. As such they must
747 * not conflict with either the destination addresses
748 * or memory the kernel is already using.
750 * Control pages are also the only pags we must allocate
751 * when loading a crash kernel. All of the other pages
752 * are specified by the segments and we just memcpy
753 * into them directly.
755 * The only case where we really need more than one of
756 * these are for architectures where we cannot disable
757 * the MMU and must instead generate an identity mapped
758 * page table for all of the memory.
760 * Given the low demand this implements a very simple
761 * allocator that finds the first hole of the appropriate
762 * size in the reserved memory region, and allocates all
763 * of the memory up to and including the hole.
765 unsigned long hole_start
, hole_end
, size
;
769 size
= (1 << order
) << PAGE_SHIFT
;
770 hole_start
= (image
->control_page
+ (size
- 1)) & ~(size
- 1);
771 hole_end
= hole_start
+ size
- 1;
772 while (hole_end
<= crashk_res
.end
) {
775 if (hole_end
> KEXEC_CRASH_CONTROL_MEMORY_LIMIT
)
777 /* See if I overlap any of the segments */
778 for (i
= 0; i
< image
->nr_segments
; i
++) {
779 unsigned long mstart
, mend
;
781 mstart
= image
->segment
[i
].mem
;
782 mend
= mstart
+ image
->segment
[i
].memsz
- 1;
783 if ((hole_end
>= mstart
) && (hole_start
<= mend
)) {
784 /* Advance the hole to the end of the segment */
785 hole_start
= (mend
+ (size
- 1)) & ~(size
- 1);
786 hole_end
= hole_start
+ size
- 1;
790 /* If I don't overlap any segments I have found my hole! */
791 if (i
== image
->nr_segments
) {
792 pages
= pfn_to_page(hole_start
>> PAGE_SHIFT
);
797 image
->control_page
= hole_end
;
803 struct page
*kimage_alloc_control_pages(struct kimage
*image
,
806 struct page
*pages
= NULL
;
808 switch (image
->type
) {
809 case KEXEC_TYPE_DEFAULT
:
810 pages
= kimage_alloc_normal_control_pages(image
, order
);
812 case KEXEC_TYPE_CRASH
:
813 pages
= kimage_alloc_crash_control_pages(image
, order
);
820 static int kimage_add_entry(struct kimage
*image
, kimage_entry_t entry
)
822 if (*image
->entry
!= 0)
825 if (image
->entry
== image
->last_entry
) {
826 kimage_entry_t
*ind_page
;
829 page
= kimage_alloc_page(image
, GFP_KERNEL
, KIMAGE_NO_DEST
);
833 ind_page
= page_address(page
);
834 *image
->entry
= virt_to_phys(ind_page
) | IND_INDIRECTION
;
835 image
->entry
= ind_page
;
836 image
->last_entry
= ind_page
+
837 ((PAGE_SIZE
/sizeof(kimage_entry_t
)) - 1);
839 *image
->entry
= entry
;
846 static int kimage_set_destination(struct kimage
*image
,
847 unsigned long destination
)
851 destination
&= PAGE_MASK
;
852 result
= kimage_add_entry(image
, destination
| IND_DESTINATION
);
854 image
->destination
= destination
;
860 static int kimage_add_page(struct kimage
*image
, unsigned long page
)
865 result
= kimage_add_entry(image
, page
| IND_SOURCE
);
867 image
->destination
+= PAGE_SIZE
;
873 static void kimage_free_extra_pages(struct kimage
*image
)
875 /* Walk through and free any extra destination pages I may have */
876 kimage_free_page_list(&image
->dest_pages
);
878 /* Walk through and free any unusable pages I have cached */
879 kimage_free_page_list(&image
->unusable_pages
);
882 static void kimage_terminate(struct kimage
*image
)
884 if (*image
->entry
!= 0)
887 *image
->entry
= IND_DONE
;
890 #define for_each_kimage_entry(image, ptr, entry) \
891 for (ptr = &image->head; (entry = *ptr) && !(entry & IND_DONE); \
892 ptr = (entry & IND_INDIRECTION) ? \
893 phys_to_virt((entry & PAGE_MASK)) : ptr + 1)
895 static void kimage_free_entry(kimage_entry_t entry
)
899 page
= pfn_to_page(entry
>> PAGE_SHIFT
);
900 kimage_free_pages(page
);
903 static void kimage_free(struct kimage
*image
)
905 kimage_entry_t
*ptr
, entry
;
906 kimage_entry_t ind
= 0;
911 kimage_free_extra_pages(image
);
912 for_each_kimage_entry(image
, ptr
, entry
) {
913 if (entry
& IND_INDIRECTION
) {
914 /* Free the previous indirection page */
915 if (ind
& IND_INDIRECTION
)
916 kimage_free_entry(ind
);
917 /* Save this indirection page until we are
921 } else if (entry
& IND_SOURCE
)
922 kimage_free_entry(entry
);
924 /* Free the final indirection page */
925 if (ind
& IND_INDIRECTION
)
926 kimage_free_entry(ind
);
928 /* Handle any machine specific cleanup */
929 machine_kexec_cleanup(image
);
931 /* Free the kexec control pages... */
932 kimage_free_page_list(&image
->control_pages
);
935 * Free up any temporary buffers allocated. This might hit if
936 * error occurred much later after buffer allocation.
938 if (image
->file_mode
)
939 kimage_file_post_load_cleanup(image
);
944 static kimage_entry_t
*kimage_dst_used(struct kimage
*image
,
947 kimage_entry_t
*ptr
, entry
;
948 unsigned long destination
= 0;
950 for_each_kimage_entry(image
, ptr
, entry
) {
951 if (entry
& IND_DESTINATION
)
952 destination
= entry
& PAGE_MASK
;
953 else if (entry
& IND_SOURCE
) {
954 if (page
== destination
)
956 destination
+= PAGE_SIZE
;
963 static struct page
*kimage_alloc_page(struct kimage
*image
,
965 unsigned long destination
)
968 * Here we implement safeguards to ensure that a source page
969 * is not copied to its destination page before the data on
970 * the destination page is no longer useful.
972 * To do this we maintain the invariant that a source page is
973 * either its own destination page, or it is not a
974 * destination page at all.
976 * That is slightly stronger than required, but the proof
977 * that no problems will not occur is trivial, and the
978 * implementation is simply to verify.
980 * When allocating all pages normally this algorithm will run
981 * in O(N) time, but in the worst case it will run in O(N^2)
982 * time. If the runtime is a problem the data structures can
989 * Walk through the list of destination pages, and see if I
992 list_for_each_entry(page
, &image
->dest_pages
, lru
) {
993 addr
= page_to_pfn(page
) << PAGE_SHIFT
;
994 if (addr
== destination
) {
995 list_del(&page
->lru
);
1001 kimage_entry_t
*old
;
1003 /* Allocate a page, if we run out of memory give up */
1004 page
= kimage_alloc_pages(gfp_mask
, 0);
1007 /* If the page cannot be used file it away */
1008 if (page_to_pfn(page
) >
1009 (KEXEC_SOURCE_MEMORY_LIMIT
>> PAGE_SHIFT
)) {
1010 list_add(&page
->lru
, &image
->unusable_pages
);
1013 addr
= page_to_pfn(page
) << PAGE_SHIFT
;
1015 /* If it is the destination page we want use it */
1016 if (addr
== destination
)
1019 /* If the page is not a destination page use it */
1020 if (!kimage_is_destination_range(image
, addr
,
1025 * I know that the page is someones destination page.
1026 * See if there is already a source page for this
1027 * destination page. And if so swap the source pages.
1029 old
= kimage_dst_used(image
, addr
);
1032 unsigned long old_addr
;
1033 struct page
*old_page
;
1035 old_addr
= *old
& PAGE_MASK
;
1036 old_page
= pfn_to_page(old_addr
>> PAGE_SHIFT
);
1037 copy_highpage(page
, old_page
);
1038 *old
= addr
| (*old
& ~PAGE_MASK
);
1040 /* The old page I have found cannot be a
1041 * destination page, so return it if it's
1042 * gfp_flags honor the ones passed in.
1044 if (!(gfp_mask
& __GFP_HIGHMEM
) &&
1045 PageHighMem(old_page
)) {
1046 kimage_free_pages(old_page
);
1053 /* Place the page on the destination list I
1054 * will use it later.
1056 list_add(&page
->lru
, &image
->dest_pages
);
1063 static int kimage_load_normal_segment(struct kimage
*image
,
1064 struct kexec_segment
*segment
)
1066 unsigned long maddr
;
1067 size_t ubytes
, mbytes
;
1069 unsigned char __user
*buf
= NULL
;
1070 unsigned char *kbuf
= NULL
;
1073 if (image
->file_mode
)
1074 kbuf
= segment
->kbuf
;
1077 ubytes
= segment
->bufsz
;
1078 mbytes
= segment
->memsz
;
1079 maddr
= segment
->mem
;
1081 result
= kimage_set_destination(image
, maddr
);
1088 size_t uchunk
, mchunk
;
1090 page
= kimage_alloc_page(image
, GFP_HIGHUSER
, maddr
);
1095 result
= kimage_add_page(image
, page_to_pfn(page
)
1101 /* Start with a clear page */
1103 ptr
+= maddr
& ~PAGE_MASK
;
1104 mchunk
= min_t(size_t, mbytes
,
1105 PAGE_SIZE
- (maddr
& ~PAGE_MASK
));
1106 uchunk
= min(ubytes
, mchunk
);
1108 /* For file based kexec, source pages are in kernel memory */
1109 if (image
->file_mode
)
1110 memcpy(ptr
, kbuf
, uchunk
);
1112 result
= copy_from_user(ptr
, buf
, uchunk
);
1120 if (image
->file_mode
)
1130 static int kimage_load_crash_segment(struct kimage
*image
,
1131 struct kexec_segment
*segment
)
1133 /* For crash dumps kernels we simply copy the data from
1134 * user space to it's destination.
1135 * We do things a page at a time for the sake of kmap.
1137 unsigned long maddr
;
1138 size_t ubytes
, mbytes
;
1140 unsigned char __user
*buf
= NULL
;
1141 unsigned char *kbuf
= NULL
;
1144 if (image
->file_mode
)
1145 kbuf
= segment
->kbuf
;
1148 ubytes
= segment
->bufsz
;
1149 mbytes
= segment
->memsz
;
1150 maddr
= segment
->mem
;
1154 size_t uchunk
, mchunk
;
1156 page
= pfn_to_page(maddr
>> PAGE_SHIFT
);
1162 ptr
+= maddr
& ~PAGE_MASK
;
1163 mchunk
= min_t(size_t, mbytes
,
1164 PAGE_SIZE
- (maddr
& ~PAGE_MASK
));
1165 uchunk
= min(ubytes
, mchunk
);
1166 if (mchunk
> uchunk
) {
1167 /* Zero the trailing part of the page */
1168 memset(ptr
+ uchunk
, 0, mchunk
- uchunk
);
1171 /* For file based kexec, source pages are in kernel memory */
1172 if (image
->file_mode
)
1173 memcpy(ptr
, kbuf
, uchunk
);
1175 result
= copy_from_user(ptr
, buf
, uchunk
);
1176 kexec_flush_icache_page(page
);
1184 if (image
->file_mode
)
1194 static int kimage_load_segment(struct kimage
*image
,
1195 struct kexec_segment
*segment
)
1197 int result
= -ENOMEM
;
1199 switch (image
->type
) {
1200 case KEXEC_TYPE_DEFAULT
:
1201 result
= kimage_load_normal_segment(image
, segment
);
1203 case KEXEC_TYPE_CRASH
:
1204 result
= kimage_load_crash_segment(image
, segment
);
1212 * Exec Kernel system call: for obvious reasons only root may call it.
1214 * This call breaks up into three pieces.
1215 * - A generic part which loads the new kernel from the current
1216 * address space, and very carefully places the data in the
1219 * - A generic part that interacts with the kernel and tells all of
1220 * the devices to shut down. Preventing on-going dmas, and placing
1221 * the devices in a consistent state so a later kernel can
1222 * reinitialize them.
1224 * - A machine specific part that includes the syscall number
1225 * and then copies the image to it's final destination. And
1226 * jumps into the image at entry.
1228 * kexec does not sync, or unmount filesystems so if you need
1229 * that to happen you need to do that yourself.
1231 struct kimage
*kexec_image
;
1232 struct kimage
*kexec_crash_image
;
1233 int kexec_load_disabled
;
1235 static DEFINE_MUTEX(kexec_mutex
);
1237 SYSCALL_DEFINE4(kexec_load
, unsigned long, entry
, unsigned long, nr_segments
,
1238 struct kexec_segment __user
*, segments
, unsigned long, flags
)
1240 struct kimage
**dest_image
, *image
;
1243 /* We only trust the superuser with rebooting the system. */
1244 if (!capable(CAP_SYS_BOOT
) || kexec_load_disabled
)
1248 * Verify we have a legal set of flags
1249 * This leaves us room for future extensions.
1251 if ((flags
& KEXEC_FLAGS
) != (flags
& ~KEXEC_ARCH_MASK
))
1254 /* Verify we are on the appropriate architecture */
1255 if (((flags
& KEXEC_ARCH_MASK
) != KEXEC_ARCH
) &&
1256 ((flags
& KEXEC_ARCH_MASK
) != KEXEC_ARCH_DEFAULT
))
1259 /* Put an artificial cap on the number
1260 * of segments passed to kexec_load.
1262 if (nr_segments
> KEXEC_SEGMENT_MAX
)
1268 /* Because we write directly to the reserved memory
1269 * region when loading crash kernels we need a mutex here to
1270 * prevent multiple crash kernels from attempting to load
1271 * simultaneously, and to prevent a crash kernel from loading
1272 * over the top of a in use crash kernel.
1274 * KISS: always take the mutex.
1276 if (!mutex_trylock(&kexec_mutex
))
1279 dest_image
= &kexec_image
;
1280 if (flags
& KEXEC_ON_CRASH
)
1281 dest_image
= &kexec_crash_image
;
1282 if (nr_segments
> 0) {
1285 /* Loading another kernel to reboot into */
1286 if ((flags
& KEXEC_ON_CRASH
) == 0)
1287 result
= kimage_alloc_init(&image
, entry
, nr_segments
,
1289 /* Loading another kernel to switch to if this one crashes */
1290 else if (flags
& KEXEC_ON_CRASH
) {
1291 /* Free any current crash dump kernel before
1294 kimage_free(xchg(&kexec_crash_image
, NULL
));
1295 result
= kimage_alloc_init(&image
, entry
, nr_segments
,
1297 crash_map_reserved_pages();
1302 if (flags
& KEXEC_PRESERVE_CONTEXT
)
1303 image
->preserve_context
= 1;
1304 result
= machine_kexec_prepare(image
);
1308 for (i
= 0; i
< nr_segments
; i
++) {
1309 result
= kimage_load_segment(image
, &image
->segment
[i
]);
1313 kimage_terminate(image
);
1314 if (flags
& KEXEC_ON_CRASH
)
1315 crash_unmap_reserved_pages();
1317 /* Install the new kernel, and Uninstall the old */
1318 image
= xchg(dest_image
, image
);
1321 mutex_unlock(&kexec_mutex
);
1328 * Add and remove page tables for crashkernel memory
1330 * Provide an empty default implementation here -- architecture
1331 * code may override this
1333 void __weak
crash_map_reserved_pages(void)
1336 void __weak
crash_unmap_reserved_pages(void)
1339 #ifdef CONFIG_COMPAT
1340 COMPAT_SYSCALL_DEFINE4(kexec_load
, compat_ulong_t
, entry
,
1341 compat_ulong_t
, nr_segments
,
1342 struct compat_kexec_segment __user
*, segments
,
1343 compat_ulong_t
, flags
)
1345 struct compat_kexec_segment in
;
1346 struct kexec_segment out
, __user
*ksegments
;
1347 unsigned long i
, result
;
1349 /* Don't allow clients that don't understand the native
1350 * architecture to do anything.
1352 if ((flags
& KEXEC_ARCH_MASK
) == KEXEC_ARCH_DEFAULT
)
1355 if (nr_segments
> KEXEC_SEGMENT_MAX
)
1358 ksegments
= compat_alloc_user_space(nr_segments
* sizeof(out
));
1359 for (i
= 0; i
< nr_segments
; i
++) {
1360 result
= copy_from_user(&in
, &segments
[i
], sizeof(in
));
1364 out
.buf
= compat_ptr(in
.buf
);
1365 out
.bufsz
= in
.bufsz
;
1367 out
.memsz
= in
.memsz
;
1369 result
= copy_to_user(&ksegments
[i
], &out
, sizeof(out
));
1374 return sys_kexec_load(entry
, nr_segments
, ksegments
, flags
);
1378 SYSCALL_DEFINE5(kexec_file_load
, int, kernel_fd
, int, initrd_fd
,
1379 unsigned long, cmdline_len
, const char __user
*, cmdline_ptr
,
1380 unsigned long, flags
)
1383 struct kimage
**dest_image
, *image
;
1385 /* We only trust the superuser with rebooting the system. */
1386 if (!capable(CAP_SYS_BOOT
) || kexec_load_disabled
)
1389 /* Make sure we have a legal set of flags */
1390 if (flags
!= (flags
& KEXEC_FILE_FLAGS
))
1395 if (!mutex_trylock(&kexec_mutex
))
1398 dest_image
= &kexec_image
;
1399 if (flags
& KEXEC_FILE_ON_CRASH
)
1400 dest_image
= &kexec_crash_image
;
1402 if (flags
& KEXEC_FILE_UNLOAD
)
1406 * In case of crash, new kernel gets loaded in reserved region. It is
1407 * same memory where old crash kernel might be loaded. Free any
1408 * current crash dump kernel before we corrupt it.
1410 if (flags
& KEXEC_FILE_ON_CRASH
)
1411 kimage_free(xchg(&kexec_crash_image
, NULL
));
1413 ret
= kimage_file_alloc_init(&image
, kernel_fd
, initrd_fd
, cmdline_ptr
,
1414 cmdline_len
, flags
);
1418 ret
= machine_kexec_prepare(image
);
1422 ret
= kexec_calculate_store_digests(image
);
1426 for (i
= 0; i
< image
->nr_segments
; i
++) {
1427 struct kexec_segment
*ksegment
;
1429 ksegment
= &image
->segment
[i
];
1430 pr_debug("Loading segment %d: buf=0x%p bufsz=0x%zx mem=0x%lx memsz=0x%zx\n",
1431 i
, ksegment
->buf
, ksegment
->bufsz
, ksegment
->mem
,
1434 ret
= kimage_load_segment(image
, &image
->segment
[i
]);
1439 kimage_terminate(image
);
1442 * Free up any temporary buffers allocated which are not needed
1443 * after image has been loaded
1445 kimage_file_post_load_cleanup(image
);
1447 image
= xchg(dest_image
, image
);
1449 mutex_unlock(&kexec_mutex
);
1454 void crash_kexec(struct pt_regs
*regs
)
1456 /* Take the kexec_mutex here to prevent sys_kexec_load
1457 * running on one cpu from replacing the crash kernel
1458 * we are using after a panic on a different cpu.
1460 * If the crash kernel was not located in a fixed area
1461 * of memory the xchg(&kexec_crash_image) would be
1462 * sufficient. But since I reuse the memory...
1464 if (mutex_trylock(&kexec_mutex
)) {
1465 if (kexec_crash_image
) {
1466 struct pt_regs fixed_regs
;
1468 crash_setup_regs(&fixed_regs
, regs
);
1469 crash_save_vmcoreinfo();
1470 machine_crash_shutdown(&fixed_regs
);
1471 machine_kexec(kexec_crash_image
);
1473 mutex_unlock(&kexec_mutex
);
1477 size_t crash_get_memory_size(void)
1480 mutex_lock(&kexec_mutex
);
1481 if (crashk_res
.end
!= crashk_res
.start
)
1482 size
= resource_size(&crashk_res
);
1483 mutex_unlock(&kexec_mutex
);
1487 void __weak
crash_free_reserved_phys_range(unsigned long begin
,
1492 for (addr
= begin
; addr
< end
; addr
+= PAGE_SIZE
)
1493 free_reserved_page(pfn_to_page(addr
>> PAGE_SHIFT
));
1496 int crash_shrink_memory(unsigned long new_size
)
1499 unsigned long start
, end
;
1500 unsigned long old_size
;
1501 struct resource
*ram_res
;
1503 mutex_lock(&kexec_mutex
);
1505 if (kexec_crash_image
) {
1509 start
= crashk_res
.start
;
1510 end
= crashk_res
.end
;
1511 old_size
= (end
== 0) ? 0 : end
- start
+ 1;
1512 if (new_size
>= old_size
) {
1513 ret
= (new_size
== old_size
) ? 0 : -EINVAL
;
1517 ram_res
= kzalloc(sizeof(*ram_res
), GFP_KERNEL
);
1523 start
= roundup(start
, KEXEC_CRASH_MEM_ALIGN
);
1524 end
= roundup(start
+ new_size
, KEXEC_CRASH_MEM_ALIGN
);
1526 crash_map_reserved_pages();
1527 crash_free_reserved_phys_range(end
, crashk_res
.end
);
1529 if ((start
== end
) && (crashk_res
.parent
!= NULL
))
1530 release_resource(&crashk_res
);
1532 ram_res
->start
= end
;
1533 ram_res
->end
= crashk_res
.end
;
1534 ram_res
->flags
= IORESOURCE_BUSY
| IORESOURCE_MEM
;
1535 ram_res
->name
= "System RAM";
1537 crashk_res
.end
= end
- 1;
1539 insert_resource(&iomem_resource
, ram_res
);
1540 crash_unmap_reserved_pages();
1543 mutex_unlock(&kexec_mutex
);
1547 static u32
*append_elf_note(u32
*buf
, char *name
, unsigned type
, void *data
,
1550 struct elf_note note
;
1552 note
.n_namesz
= strlen(name
) + 1;
1553 note
.n_descsz
= data_len
;
1555 memcpy(buf
, ¬e
, sizeof(note
));
1556 buf
+= (sizeof(note
) + 3)/4;
1557 memcpy(buf
, name
, note
.n_namesz
);
1558 buf
+= (note
.n_namesz
+ 3)/4;
1559 memcpy(buf
, data
, note
.n_descsz
);
1560 buf
+= (note
.n_descsz
+ 3)/4;
1565 static void final_note(u32
*buf
)
1567 struct elf_note note
;
1572 memcpy(buf
, ¬e
, sizeof(note
));
1575 void crash_save_cpu(struct pt_regs
*regs
, int cpu
)
1577 struct elf_prstatus prstatus
;
1580 if ((cpu
< 0) || (cpu
>= nr_cpu_ids
))
1583 /* Using ELF notes here is opportunistic.
1584 * I need a well defined structure format
1585 * for the data I pass, and I need tags
1586 * on the data to indicate what information I have
1587 * squirrelled away. ELF notes happen to provide
1588 * all of that, so there is no need to invent something new.
1590 buf
= (u32
*)per_cpu_ptr(crash_notes
, cpu
);
1593 memset(&prstatus
, 0, sizeof(prstatus
));
1594 prstatus
.pr_pid
= current
->pid
;
1595 elf_core_copy_kernel_regs(&prstatus
.pr_reg
, regs
);
1596 buf
= append_elf_note(buf
, KEXEC_CORE_NOTE_NAME
, NT_PRSTATUS
,
1597 &prstatus
, sizeof(prstatus
));
1601 static int __init
crash_notes_memory_init(void)
1603 /* Allocate memory for saving cpu registers. */
1604 crash_notes
= alloc_percpu(note_buf_t
);
1606 pr_warn("Kexec: Memory allocation for saving cpu register states failed\n");
1611 subsys_initcall(crash_notes_memory_init
);
1615 * parsing the "crashkernel" commandline
1617 * this code is intended to be called from architecture specific code
1622 * This function parses command lines in the format
1624 * crashkernel=ramsize-range:size[,...][@offset]
1626 * The function returns 0 on success and -EINVAL on failure.
1628 static int __init
parse_crashkernel_mem(char *cmdline
,
1629 unsigned long long system_ram
,
1630 unsigned long long *crash_size
,
1631 unsigned long long *crash_base
)
1633 char *cur
= cmdline
, *tmp
;
1635 /* for each entry of the comma-separated list */
1637 unsigned long long start
, end
= ULLONG_MAX
, size
;
1639 /* get the start of the range */
1640 start
= memparse(cur
, &tmp
);
1642 pr_warn("crashkernel: Memory value expected\n");
1647 pr_warn("crashkernel: '-' expected\n");
1652 /* if no ':' is here, than we read the end */
1654 end
= memparse(cur
, &tmp
);
1656 pr_warn("crashkernel: Memory value expected\n");
1661 pr_warn("crashkernel: end <= start\n");
1667 pr_warn("crashkernel: ':' expected\n");
1672 size
= memparse(cur
, &tmp
);
1674 pr_warn("Memory value expected\n");
1678 if (size
>= system_ram
) {
1679 pr_warn("crashkernel: invalid size\n");
1684 if (system_ram
>= start
&& system_ram
< end
) {
1688 } while (*cur
++ == ',');
1690 if (*crash_size
> 0) {
1691 while (*cur
&& *cur
!= ' ' && *cur
!= '@')
1695 *crash_base
= memparse(cur
, &tmp
);
1697 pr_warn("Memory value expected after '@'\n");
1707 * That function parses "simple" (old) crashkernel command lines like
1709 * crashkernel=size[@offset]
1711 * It returns 0 on success and -EINVAL on failure.
1713 static int __init
parse_crashkernel_simple(char *cmdline
,
1714 unsigned long long *crash_size
,
1715 unsigned long long *crash_base
)
1717 char *cur
= cmdline
;
1719 *crash_size
= memparse(cmdline
, &cur
);
1720 if (cmdline
== cur
) {
1721 pr_warn("crashkernel: memory value expected\n");
1726 *crash_base
= memparse(cur
+1, &cur
);
1727 else if (*cur
!= ' ' && *cur
!= '\0') {
1728 pr_warn("crashkernel: unrecognized char\n");
1735 #define SUFFIX_HIGH 0
1736 #define SUFFIX_LOW 1
1737 #define SUFFIX_NULL 2
1738 static __initdata
char *suffix_tbl
[] = {
1739 [SUFFIX_HIGH
] = ",high",
1740 [SUFFIX_LOW
] = ",low",
1741 [SUFFIX_NULL
] = NULL
,
1745 * That function parses "suffix" crashkernel command lines like
1747 * crashkernel=size,[high|low]
1749 * It returns 0 on success and -EINVAL on failure.
1751 static int __init
parse_crashkernel_suffix(char *cmdline
,
1752 unsigned long long *crash_size
,
1753 unsigned long long *crash_base
,
1756 char *cur
= cmdline
;
1758 *crash_size
= memparse(cmdline
, &cur
);
1759 if (cmdline
== cur
) {
1760 pr_warn("crashkernel: memory value expected\n");
1764 /* check with suffix */
1765 if (strncmp(cur
, suffix
, strlen(suffix
))) {
1766 pr_warn("crashkernel: unrecognized char\n");
1769 cur
+= strlen(suffix
);
1770 if (*cur
!= ' ' && *cur
!= '\0') {
1771 pr_warn("crashkernel: unrecognized char\n");
1778 static __init
char *get_last_crashkernel(char *cmdline
,
1782 char *p
= cmdline
, *ck_cmdline
= NULL
;
1784 /* find crashkernel and use the last one if there are more */
1785 p
= strstr(p
, name
);
1787 char *end_p
= strchr(p
, ' ');
1791 end_p
= p
+ strlen(p
);
1796 /* skip the one with any known suffix */
1797 for (i
= 0; suffix_tbl
[i
]; i
++) {
1798 q
= end_p
- strlen(suffix_tbl
[i
]);
1799 if (!strncmp(q
, suffix_tbl
[i
],
1800 strlen(suffix_tbl
[i
])))
1805 q
= end_p
- strlen(suffix
);
1806 if (!strncmp(q
, suffix
, strlen(suffix
)))
1810 p
= strstr(p
+1, name
);
1819 static int __init
__parse_crashkernel(char *cmdline
,
1820 unsigned long long system_ram
,
1821 unsigned long long *crash_size
,
1822 unsigned long long *crash_base
,
1826 char *first_colon
, *first_space
;
1829 BUG_ON(!crash_size
|| !crash_base
);
1833 ck_cmdline
= get_last_crashkernel(cmdline
, name
, suffix
);
1838 ck_cmdline
+= strlen(name
);
1841 return parse_crashkernel_suffix(ck_cmdline
, crash_size
,
1842 crash_base
, suffix
);
1844 * if the commandline contains a ':', then that's the extended
1845 * syntax -- if not, it must be the classic syntax
1847 first_colon
= strchr(ck_cmdline
, ':');
1848 first_space
= strchr(ck_cmdline
, ' ');
1849 if (first_colon
&& (!first_space
|| first_colon
< first_space
))
1850 return parse_crashkernel_mem(ck_cmdline
, system_ram
,
1851 crash_size
, crash_base
);
1853 return parse_crashkernel_simple(ck_cmdline
, crash_size
, crash_base
);
1857 * That function is the entry point for command line parsing and should be
1858 * called from the arch-specific code.
1860 int __init
parse_crashkernel(char *cmdline
,
1861 unsigned long long system_ram
,
1862 unsigned long long *crash_size
,
1863 unsigned long long *crash_base
)
1865 return __parse_crashkernel(cmdline
, system_ram
, crash_size
, crash_base
,
1866 "crashkernel=", NULL
);
1869 int __init
parse_crashkernel_high(char *cmdline
,
1870 unsigned long long system_ram
,
1871 unsigned long long *crash_size
,
1872 unsigned long long *crash_base
)
1874 return __parse_crashkernel(cmdline
, system_ram
, crash_size
, crash_base
,
1875 "crashkernel=", suffix_tbl
[SUFFIX_HIGH
]);
1878 int __init
parse_crashkernel_low(char *cmdline
,
1879 unsigned long long system_ram
,
1880 unsigned long long *crash_size
,
1881 unsigned long long *crash_base
)
1883 return __parse_crashkernel(cmdline
, system_ram
, crash_size
, crash_base
,
1884 "crashkernel=", suffix_tbl
[SUFFIX_LOW
]);
1887 static void update_vmcoreinfo_note(void)
1889 u32
*buf
= vmcoreinfo_note
;
1891 if (!vmcoreinfo_size
)
1893 buf
= append_elf_note(buf
, VMCOREINFO_NOTE_NAME
, 0, vmcoreinfo_data
,
1898 void crash_save_vmcoreinfo(void)
1900 vmcoreinfo_append_str("CRASHTIME=%ld\n", get_seconds());
1901 update_vmcoreinfo_note();
1904 void vmcoreinfo_append_str(const char *fmt
, ...)
1910 va_start(args
, fmt
);
1911 r
= vscnprintf(buf
, sizeof(buf
), fmt
, args
);
1914 r
= min(r
, vmcoreinfo_max_size
- vmcoreinfo_size
);
1916 memcpy(&vmcoreinfo_data
[vmcoreinfo_size
], buf
, r
);
1918 vmcoreinfo_size
+= r
;
1922 * provide an empty default implementation here -- architecture
1923 * code may override this
1925 void __weak
arch_crash_save_vmcoreinfo(void)
1928 unsigned long __weak
paddr_vmcoreinfo_note(void)
1930 return __pa((unsigned long)(char *)&vmcoreinfo_note
);
1933 static int __init
crash_save_vmcoreinfo_init(void)
1935 VMCOREINFO_OSRELEASE(init_uts_ns
.name
.release
);
1936 VMCOREINFO_PAGESIZE(PAGE_SIZE
);
1938 VMCOREINFO_SYMBOL(init_uts_ns
);
1939 VMCOREINFO_SYMBOL(node_online_map
);
1941 VMCOREINFO_SYMBOL(swapper_pg_dir
);
1943 VMCOREINFO_SYMBOL(_stext
);
1944 VMCOREINFO_SYMBOL(vmap_area_list
);
1946 #ifndef CONFIG_NEED_MULTIPLE_NODES
1947 VMCOREINFO_SYMBOL(mem_map
);
1948 VMCOREINFO_SYMBOL(contig_page_data
);
1950 #ifdef CONFIG_SPARSEMEM
1951 VMCOREINFO_SYMBOL(mem_section
);
1952 VMCOREINFO_LENGTH(mem_section
, NR_SECTION_ROOTS
);
1953 VMCOREINFO_STRUCT_SIZE(mem_section
);
1954 VMCOREINFO_OFFSET(mem_section
, section_mem_map
);
1956 VMCOREINFO_STRUCT_SIZE(page
);
1957 VMCOREINFO_STRUCT_SIZE(pglist_data
);
1958 VMCOREINFO_STRUCT_SIZE(zone
);
1959 VMCOREINFO_STRUCT_SIZE(free_area
);
1960 VMCOREINFO_STRUCT_SIZE(list_head
);
1961 VMCOREINFO_SIZE(nodemask_t
);
1962 VMCOREINFO_OFFSET(page
, flags
);
1963 VMCOREINFO_OFFSET(page
, _count
);
1964 VMCOREINFO_OFFSET(page
, mapping
);
1965 VMCOREINFO_OFFSET(page
, lru
);
1966 VMCOREINFO_OFFSET(page
, _mapcount
);
1967 VMCOREINFO_OFFSET(page
, private);
1968 VMCOREINFO_OFFSET(pglist_data
, node_zones
);
1969 VMCOREINFO_OFFSET(pglist_data
, nr_zones
);
1970 #ifdef CONFIG_FLAT_NODE_MEM_MAP
1971 VMCOREINFO_OFFSET(pglist_data
, node_mem_map
);
1973 VMCOREINFO_OFFSET(pglist_data
, node_start_pfn
);
1974 VMCOREINFO_OFFSET(pglist_data
, node_spanned_pages
);
1975 VMCOREINFO_OFFSET(pglist_data
, node_id
);
1976 VMCOREINFO_OFFSET(zone
, free_area
);
1977 VMCOREINFO_OFFSET(zone
, vm_stat
);
1978 VMCOREINFO_OFFSET(zone
, spanned_pages
);
1979 VMCOREINFO_OFFSET(free_area
, free_list
);
1980 VMCOREINFO_OFFSET(list_head
, next
);
1981 VMCOREINFO_OFFSET(list_head
, prev
);
1982 VMCOREINFO_OFFSET(vmap_area
, va_start
);
1983 VMCOREINFO_OFFSET(vmap_area
, list
);
1984 VMCOREINFO_LENGTH(zone
.free_area
, MAX_ORDER
);
1985 log_buf_kexec_setup();
1986 VMCOREINFO_LENGTH(free_area
.free_list
, MIGRATE_TYPES
);
1987 VMCOREINFO_NUMBER(NR_FREE_PAGES
);
1988 VMCOREINFO_NUMBER(PG_lru
);
1989 VMCOREINFO_NUMBER(PG_private
);
1990 VMCOREINFO_NUMBER(PG_swapcache
);
1991 VMCOREINFO_NUMBER(PG_slab
);
1992 #ifdef CONFIG_MEMORY_FAILURE
1993 VMCOREINFO_NUMBER(PG_hwpoison
);
1995 VMCOREINFO_NUMBER(PG_head_mask
);
1996 VMCOREINFO_NUMBER(PAGE_BUDDY_MAPCOUNT_VALUE
);
1997 #ifdef CONFIG_HUGETLBFS
1998 VMCOREINFO_SYMBOL(free_huge_page
);
2001 arch_crash_save_vmcoreinfo();
2002 update_vmcoreinfo_note();
2007 subsys_initcall(crash_save_vmcoreinfo_init
);
2009 static int __kexec_add_segment(struct kimage
*image
, char *buf
,
2010 unsigned long bufsz
, unsigned long mem
,
2011 unsigned long memsz
)
2013 struct kexec_segment
*ksegment
;
2015 ksegment
= &image
->segment
[image
->nr_segments
];
2016 ksegment
->kbuf
= buf
;
2017 ksegment
->bufsz
= bufsz
;
2018 ksegment
->mem
= mem
;
2019 ksegment
->memsz
= memsz
;
2020 image
->nr_segments
++;
2025 static int locate_mem_hole_top_down(unsigned long start
, unsigned long end
,
2026 struct kexec_buf
*kbuf
)
2028 struct kimage
*image
= kbuf
->image
;
2029 unsigned long temp_start
, temp_end
;
2031 temp_end
= min(end
, kbuf
->buf_max
);
2032 temp_start
= temp_end
- kbuf
->memsz
;
2035 /* align down start */
2036 temp_start
= temp_start
& (~(kbuf
->buf_align
- 1));
2038 if (temp_start
< start
|| temp_start
< kbuf
->buf_min
)
2041 temp_end
= temp_start
+ kbuf
->memsz
- 1;
2044 * Make sure this does not conflict with any of existing
2047 if (kimage_is_destination_range(image
, temp_start
, temp_end
)) {
2048 temp_start
= temp_start
- PAGE_SIZE
;
2052 /* We found a suitable memory range */
2056 /* If we are here, we found a suitable memory range */
2057 __kexec_add_segment(image
, kbuf
->buffer
, kbuf
->bufsz
, temp_start
,
2060 /* Success, stop navigating through remaining System RAM ranges */
2064 static int locate_mem_hole_bottom_up(unsigned long start
, unsigned long end
,
2065 struct kexec_buf
*kbuf
)
2067 struct kimage
*image
= kbuf
->image
;
2068 unsigned long temp_start
, temp_end
;
2070 temp_start
= max(start
, kbuf
->buf_min
);
2073 temp_start
= ALIGN(temp_start
, kbuf
->buf_align
);
2074 temp_end
= temp_start
+ kbuf
->memsz
- 1;
2076 if (temp_end
> end
|| temp_end
> kbuf
->buf_max
)
2079 * Make sure this does not conflict with any of existing
2082 if (kimage_is_destination_range(image
, temp_start
, temp_end
)) {
2083 temp_start
= temp_start
+ PAGE_SIZE
;
2087 /* We found a suitable memory range */
2091 /* If we are here, we found a suitable memory range */
2092 __kexec_add_segment(image
, kbuf
->buffer
, kbuf
->bufsz
, temp_start
,
2095 /* Success, stop navigating through remaining System RAM ranges */
2099 static int locate_mem_hole_callback(u64 start
, u64 end
, void *arg
)
2101 struct kexec_buf
*kbuf
= (struct kexec_buf
*)arg
;
2102 unsigned long sz
= end
- start
+ 1;
2104 /* Returning 0 will take to next memory range */
2105 if (sz
< kbuf
->memsz
)
2108 if (end
< kbuf
->buf_min
|| start
> kbuf
->buf_max
)
2112 * Allocate memory top down with-in ram range. Otherwise bottom up
2116 return locate_mem_hole_top_down(start
, end
, kbuf
);
2117 return locate_mem_hole_bottom_up(start
, end
, kbuf
);
2121 * Helper function for placing a buffer in a kexec segment. This assumes
2122 * that kexec_mutex is held.
2124 int kexec_add_buffer(struct kimage
*image
, char *buffer
, unsigned long bufsz
,
2125 unsigned long memsz
, unsigned long buf_align
,
2126 unsigned long buf_min
, unsigned long buf_max
,
2127 bool top_down
, unsigned long *load_addr
)
2130 struct kexec_segment
*ksegment
;
2131 struct kexec_buf buf
, *kbuf
;
2134 /* Currently adding segment this way is allowed only in file mode */
2135 if (!image
->file_mode
)
2138 if (image
->nr_segments
>= KEXEC_SEGMENT_MAX
)
2142 * Make sure we are not trying to add buffer after allocating
2143 * control pages. All segments need to be placed first before
2144 * any control pages are allocated. As control page allocation
2145 * logic goes through list of segments to make sure there are
2146 * no destination overlaps.
2148 if (!list_empty(&image
->control_pages
)) {
2153 memset(&buf
, 0, sizeof(struct kexec_buf
));
2155 kbuf
->image
= image
;
2156 kbuf
->buffer
= buffer
;
2157 kbuf
->bufsz
= bufsz
;
2159 kbuf
->memsz
= ALIGN(memsz
, PAGE_SIZE
);
2160 kbuf
->buf_align
= max(buf_align
, PAGE_SIZE
);
2161 kbuf
->buf_min
= buf_min
;
2162 kbuf
->buf_max
= buf_max
;
2163 kbuf
->top_down
= top_down
;
2165 /* Walk the RAM ranges and allocate a suitable range for the buffer */
2166 if (image
->type
== KEXEC_TYPE_CRASH
)
2167 ret
= walk_iomem_res("Crash kernel",
2168 IORESOURCE_MEM
| IORESOURCE_BUSY
,
2169 crashk_res
.start
, crashk_res
.end
, kbuf
,
2170 locate_mem_hole_callback
);
2172 ret
= walk_system_ram_res(0, -1, kbuf
,
2173 locate_mem_hole_callback
);
2175 /* A suitable memory range could not be found for buffer */
2176 return -EADDRNOTAVAIL
;
2179 /* Found a suitable memory range */
2180 ksegment
= &image
->segment
[image
->nr_segments
- 1];
2181 *load_addr
= ksegment
->mem
;
2185 /* Calculate and store the digest of segments */
2186 static int kexec_calculate_store_digests(struct kimage
*image
)
2188 struct crypto_shash
*tfm
;
2189 struct shash_desc
*desc
;
2190 int ret
= 0, i
, j
, zero_buf_sz
, sha_region_sz
;
2191 size_t desc_size
, nullsz
;
2194 struct kexec_sha_region
*sha_regions
;
2195 struct purgatory_info
*pi
= &image
->purgatory_info
;
2197 zero_buf
= __va(page_to_pfn(ZERO_PAGE(0)) << PAGE_SHIFT
);
2198 zero_buf_sz
= PAGE_SIZE
;
2200 tfm
= crypto_alloc_shash("sha256", 0, 0);
2206 desc_size
= crypto_shash_descsize(tfm
) + sizeof(*desc
);
2207 desc
= kzalloc(desc_size
, GFP_KERNEL
);
2213 sha_region_sz
= KEXEC_SEGMENT_MAX
* sizeof(struct kexec_sha_region
);
2214 sha_regions
= vzalloc(sha_region_sz
);
2221 ret
= crypto_shash_init(desc
);
2223 goto out_free_sha_regions
;
2225 digest
= kzalloc(SHA256_DIGEST_SIZE
, GFP_KERNEL
);
2228 goto out_free_sha_regions
;
2231 for (j
= i
= 0; i
< image
->nr_segments
; i
++) {
2232 struct kexec_segment
*ksegment
;
2234 ksegment
= &image
->segment
[i
];
2236 * Skip purgatory as it will be modified once we put digest
2237 * info in purgatory.
2239 if (ksegment
->kbuf
== pi
->purgatory_buf
)
2242 ret
= crypto_shash_update(desc
, ksegment
->kbuf
,
2248 * Assume rest of the buffer is filled with zero and
2249 * update digest accordingly.
2251 nullsz
= ksegment
->memsz
- ksegment
->bufsz
;
2253 unsigned long bytes
= nullsz
;
2255 if (bytes
> zero_buf_sz
)
2256 bytes
= zero_buf_sz
;
2257 ret
= crypto_shash_update(desc
, zero_buf
, bytes
);
2266 sha_regions
[j
].start
= ksegment
->mem
;
2267 sha_regions
[j
].len
= ksegment
->memsz
;
2272 ret
= crypto_shash_final(desc
, digest
);
2274 goto out_free_digest
;
2275 ret
= kexec_purgatory_get_set_symbol(image
, "sha_regions",
2276 sha_regions
, sha_region_sz
, 0);
2278 goto out_free_digest
;
2280 ret
= kexec_purgatory_get_set_symbol(image
, "sha256_digest",
2281 digest
, SHA256_DIGEST_SIZE
, 0);
2283 goto out_free_digest
;
2288 out_free_sha_regions
:
2298 /* Actually load purgatory. Lot of code taken from kexec-tools */
2299 static int __kexec_load_purgatory(struct kimage
*image
, unsigned long min
,
2300 unsigned long max
, int top_down
)
2302 struct purgatory_info
*pi
= &image
->purgatory_info
;
2303 unsigned long align
, buf_align
, bss_align
, buf_sz
, bss_sz
, bss_pad
;
2304 unsigned long memsz
, entry
, load_addr
, curr_load_addr
, bss_addr
, offset
;
2305 unsigned char *buf_addr
, *src
;
2306 int i
, ret
= 0, entry_sidx
= -1;
2307 const Elf_Shdr
*sechdrs_c
;
2308 Elf_Shdr
*sechdrs
= NULL
;
2309 void *purgatory_buf
= NULL
;
2312 * sechdrs_c points to section headers in purgatory and are read
2313 * only. No modifications allowed.
2315 sechdrs_c
= (void *)pi
->ehdr
+ pi
->ehdr
->e_shoff
;
2318 * We can not modify sechdrs_c[] and its fields. It is read only.
2319 * Copy it over to a local copy where one can store some temporary
2320 * data and free it at the end. We need to modify ->sh_addr and
2321 * ->sh_offset fields to keep track of permanent and temporary
2322 * locations of sections.
2324 sechdrs
= vzalloc(pi
->ehdr
->e_shnum
* sizeof(Elf_Shdr
));
2328 memcpy(sechdrs
, sechdrs_c
, pi
->ehdr
->e_shnum
* sizeof(Elf_Shdr
));
2331 * We seem to have multiple copies of sections. First copy is which
2332 * is embedded in kernel in read only section. Some of these sections
2333 * will be copied to a temporary buffer and relocated. And these
2334 * sections will finally be copied to their final destination at
2335 * segment load time.
2337 * Use ->sh_offset to reflect section address in memory. It will
2338 * point to original read only copy if section is not allocatable.
2339 * Otherwise it will point to temporary copy which will be relocated.
2341 * Use ->sh_addr to contain final address of the section where it
2342 * will go during execution time.
2344 for (i
= 0; i
< pi
->ehdr
->e_shnum
; i
++) {
2345 if (sechdrs
[i
].sh_type
== SHT_NOBITS
)
2348 sechdrs
[i
].sh_offset
= (unsigned long)pi
->ehdr
+
2349 sechdrs
[i
].sh_offset
;
2353 * Identify entry point section and make entry relative to section
2356 entry
= pi
->ehdr
->e_entry
;
2357 for (i
= 0; i
< pi
->ehdr
->e_shnum
; i
++) {
2358 if (!(sechdrs
[i
].sh_flags
& SHF_ALLOC
))
2361 if (!(sechdrs
[i
].sh_flags
& SHF_EXECINSTR
))
2364 /* Make entry section relative */
2365 if (sechdrs
[i
].sh_addr
<= pi
->ehdr
->e_entry
&&
2366 ((sechdrs
[i
].sh_addr
+ sechdrs
[i
].sh_size
) >
2367 pi
->ehdr
->e_entry
)) {
2369 entry
-= sechdrs
[i
].sh_addr
;
2374 /* Determine how much memory is needed to load relocatable object. */
2380 for (i
= 0; i
< pi
->ehdr
->e_shnum
; i
++) {
2381 if (!(sechdrs
[i
].sh_flags
& SHF_ALLOC
))
2384 align
= sechdrs
[i
].sh_addralign
;
2385 if (sechdrs
[i
].sh_type
!= SHT_NOBITS
) {
2386 if (buf_align
< align
)
2388 buf_sz
= ALIGN(buf_sz
, align
);
2389 buf_sz
+= sechdrs
[i
].sh_size
;
2392 if (bss_align
< align
)
2394 bss_sz
= ALIGN(bss_sz
, align
);
2395 bss_sz
+= sechdrs
[i
].sh_size
;
2399 /* Determine the bss padding required to align bss properly */
2401 if (buf_sz
& (bss_align
- 1))
2402 bss_pad
= bss_align
- (buf_sz
& (bss_align
- 1));
2404 memsz
= buf_sz
+ bss_pad
+ bss_sz
;
2406 /* Allocate buffer for purgatory */
2407 purgatory_buf
= vzalloc(buf_sz
);
2408 if (!purgatory_buf
) {
2413 if (buf_align
< bss_align
)
2414 buf_align
= bss_align
;
2416 /* Add buffer to segment list */
2417 ret
= kexec_add_buffer(image
, purgatory_buf
, buf_sz
, memsz
,
2418 buf_align
, min
, max
, top_down
,
2419 &pi
->purgatory_load_addr
);
2423 /* Load SHF_ALLOC sections */
2424 buf_addr
= purgatory_buf
;
2425 load_addr
= curr_load_addr
= pi
->purgatory_load_addr
;
2426 bss_addr
= load_addr
+ buf_sz
+ bss_pad
;
2428 for (i
= 0; i
< pi
->ehdr
->e_shnum
; i
++) {
2429 if (!(sechdrs
[i
].sh_flags
& SHF_ALLOC
))
2432 align
= sechdrs
[i
].sh_addralign
;
2433 if (sechdrs
[i
].sh_type
!= SHT_NOBITS
) {
2434 curr_load_addr
= ALIGN(curr_load_addr
, align
);
2435 offset
= curr_load_addr
- load_addr
;
2436 /* We already modifed ->sh_offset to keep src addr */
2437 src
= (char *) sechdrs
[i
].sh_offset
;
2438 memcpy(buf_addr
+ offset
, src
, sechdrs
[i
].sh_size
);
2440 /* Store load address and source address of section */
2441 sechdrs
[i
].sh_addr
= curr_load_addr
;
2444 * This section got copied to temporary buffer. Update
2445 * ->sh_offset accordingly.
2447 sechdrs
[i
].sh_offset
= (unsigned long)(buf_addr
+ offset
);
2449 /* Advance to the next address */
2450 curr_load_addr
+= sechdrs
[i
].sh_size
;
2452 bss_addr
= ALIGN(bss_addr
, align
);
2453 sechdrs
[i
].sh_addr
= bss_addr
;
2454 bss_addr
+= sechdrs
[i
].sh_size
;
2458 /* Update entry point based on load address of text section */
2459 if (entry_sidx
>= 0)
2460 entry
+= sechdrs
[entry_sidx
].sh_addr
;
2462 /* Make kernel jump to purgatory after shutdown */
2463 image
->start
= entry
;
2465 /* Used later to get/set symbol values */
2466 pi
->sechdrs
= sechdrs
;
2469 * Used later to identify which section is purgatory and skip it
2470 * from checksumming.
2472 pi
->purgatory_buf
= purgatory_buf
;
2476 vfree(purgatory_buf
);
2480 static int kexec_apply_relocations(struct kimage
*image
)
2483 struct purgatory_info
*pi
= &image
->purgatory_info
;
2484 Elf_Shdr
*sechdrs
= pi
->sechdrs
;
2486 /* Apply relocations */
2487 for (i
= 0; i
< pi
->ehdr
->e_shnum
; i
++) {
2488 Elf_Shdr
*section
, *symtab
;
2490 if (sechdrs
[i
].sh_type
!= SHT_RELA
&&
2491 sechdrs
[i
].sh_type
!= SHT_REL
)
2495 * For section of type SHT_RELA/SHT_REL,
2496 * ->sh_link contains section header index of associated
2497 * symbol table. And ->sh_info contains section header
2498 * index of section to which relocations apply.
2500 if (sechdrs
[i
].sh_info
>= pi
->ehdr
->e_shnum
||
2501 sechdrs
[i
].sh_link
>= pi
->ehdr
->e_shnum
)
2504 section
= &sechdrs
[sechdrs
[i
].sh_info
];
2505 symtab
= &sechdrs
[sechdrs
[i
].sh_link
];
2507 if (!(section
->sh_flags
& SHF_ALLOC
))
2511 * symtab->sh_link contain section header index of associated
2514 if (symtab
->sh_link
>= pi
->ehdr
->e_shnum
)
2515 /* Invalid section number? */
2519 * Respective archicture needs to provide support for applying
2520 * relocations of type SHT_RELA/SHT_REL.
2522 if (sechdrs
[i
].sh_type
== SHT_RELA
)
2523 ret
= arch_kexec_apply_relocations_add(pi
->ehdr
,
2525 else if (sechdrs
[i
].sh_type
== SHT_REL
)
2526 ret
= arch_kexec_apply_relocations(pi
->ehdr
,
2535 /* Load relocatable purgatory object and relocate it appropriately */
2536 int kexec_load_purgatory(struct kimage
*image
, unsigned long min
,
2537 unsigned long max
, int top_down
,
2538 unsigned long *load_addr
)
2540 struct purgatory_info
*pi
= &image
->purgatory_info
;
2543 if (kexec_purgatory_size
<= 0)
2546 if (kexec_purgatory_size
< sizeof(Elf_Ehdr
))
2549 pi
->ehdr
= (Elf_Ehdr
*)kexec_purgatory
;
2551 if (memcmp(pi
->ehdr
->e_ident
, ELFMAG
, SELFMAG
) != 0
2552 || pi
->ehdr
->e_type
!= ET_REL
2553 || !elf_check_arch(pi
->ehdr
)
2554 || pi
->ehdr
->e_shentsize
!= sizeof(Elf_Shdr
))
2557 if (pi
->ehdr
->e_shoff
>= kexec_purgatory_size
2558 || (pi
->ehdr
->e_shnum
* sizeof(Elf_Shdr
) >
2559 kexec_purgatory_size
- pi
->ehdr
->e_shoff
))
2562 ret
= __kexec_load_purgatory(image
, min
, max
, top_down
);
2566 ret
= kexec_apply_relocations(image
);
2570 *load_addr
= pi
->purgatory_load_addr
;
2574 vfree(pi
->purgatory_buf
);
2578 static Elf_Sym
*kexec_purgatory_find_symbol(struct purgatory_info
*pi
,
2587 if (!pi
->sechdrs
|| !pi
->ehdr
)
2590 sechdrs
= pi
->sechdrs
;
2593 for (i
= 0; i
< ehdr
->e_shnum
; i
++) {
2594 if (sechdrs
[i
].sh_type
!= SHT_SYMTAB
)
2597 if (sechdrs
[i
].sh_link
>= ehdr
->e_shnum
)
2598 /* Invalid strtab section number */
2600 strtab
= (char *)sechdrs
[sechdrs
[i
].sh_link
].sh_offset
;
2601 syms
= (Elf_Sym
*)sechdrs
[i
].sh_offset
;
2603 /* Go through symbols for a match */
2604 for (k
= 0; k
< sechdrs
[i
].sh_size
/sizeof(Elf_Sym
); k
++) {
2605 if (ELF_ST_BIND(syms
[k
].st_info
) != STB_GLOBAL
)
2608 if (strcmp(strtab
+ syms
[k
].st_name
, name
) != 0)
2611 if (syms
[k
].st_shndx
== SHN_UNDEF
||
2612 syms
[k
].st_shndx
>= ehdr
->e_shnum
) {
2613 pr_debug("Symbol: %s has bad section index %d.\n",
2614 name
, syms
[k
].st_shndx
);
2618 /* Found the symbol we are looking for */
2626 void *kexec_purgatory_get_symbol_addr(struct kimage
*image
, const char *name
)
2628 struct purgatory_info
*pi
= &image
->purgatory_info
;
2632 sym
= kexec_purgatory_find_symbol(pi
, name
);
2634 return ERR_PTR(-EINVAL
);
2636 sechdr
= &pi
->sechdrs
[sym
->st_shndx
];
2639 * Returns the address where symbol will finally be loaded after
2640 * kexec_load_segment()
2642 return (void *)(sechdr
->sh_addr
+ sym
->st_value
);
2646 * Get or set value of a symbol. If "get_value" is true, symbol value is
2647 * returned in buf otherwise symbol value is set based on value in buf.
2649 int kexec_purgatory_get_set_symbol(struct kimage
*image
, const char *name
,
2650 void *buf
, unsigned int size
, bool get_value
)
2654 struct purgatory_info
*pi
= &image
->purgatory_info
;
2657 sym
= kexec_purgatory_find_symbol(pi
, name
);
2661 if (sym
->st_size
!= size
) {
2662 pr_err("symbol %s size mismatch: expected %lu actual %u\n",
2663 name
, (unsigned long)sym
->st_size
, size
);
2667 sechdrs
= pi
->sechdrs
;
2669 if (sechdrs
[sym
->st_shndx
].sh_type
== SHT_NOBITS
) {
2670 pr_err("symbol %s is in a bss section. Cannot %s\n", name
,
2671 get_value
? "get" : "set");
2675 sym_buf
= (unsigned char *)sechdrs
[sym
->st_shndx
].sh_offset
+
2679 memcpy((void *)buf
, sym_buf
, size
);
2681 memcpy((void *)sym_buf
, buf
, size
);
2687 * Move into place and start executing a preloaded standalone
2688 * executable. If nothing was preloaded return an error.
2690 int kernel_kexec(void)
2694 if (!mutex_trylock(&kexec_mutex
))
2701 #ifdef CONFIG_KEXEC_JUMP
2702 if (kexec_image
->preserve_context
) {
2703 lock_system_sleep();
2704 pm_prepare_console();
2705 error
= freeze_processes();
2708 goto Restore_console
;
2711 error
= dpm_suspend_start(PMSG_FREEZE
);
2713 goto Resume_console
;
2714 /* At this point, dpm_suspend_start() has been called,
2715 * but *not* dpm_suspend_end(). We *must* call
2716 * dpm_suspend_end() now. Otherwise, drivers for
2717 * some devices (e.g. interrupt controllers) become
2718 * desynchronized with the actual state of the
2719 * hardware at resume time, and evil weirdness ensues.
2721 error
= dpm_suspend_end(PMSG_FREEZE
);
2723 goto Resume_devices
;
2724 error
= disable_nonboot_cpus();
2727 local_irq_disable();
2728 error
= syscore_suspend();
2734 kexec_in_progress
= true;
2735 kernel_restart_prepare(NULL
);
2736 migrate_to_reboot_cpu();
2739 * migrate_to_reboot_cpu() disables CPU hotplug assuming that
2740 * no further code needs to use CPU hotplug (which is true in
2741 * the reboot case). However, the kexec path depends on using
2742 * CPU hotplug again; so re-enable it here.
2744 cpu_hotplug_enable();
2745 pr_emerg("Starting new kernel\n");
2749 machine_kexec(kexec_image
);
2751 #ifdef CONFIG_KEXEC_JUMP
2752 if (kexec_image
->preserve_context
) {
2757 enable_nonboot_cpus();
2758 dpm_resume_start(PMSG_RESTORE
);
2760 dpm_resume_end(PMSG_RESTORE
);
2765 pm_restore_console();
2766 unlock_system_sleep();
2771 mutex_unlock(&kexec_mutex
);