2 * Copyright IBM Corp. 2005, 2011
4 * Author(s): Rolf Adelsberger,
5 * Heiko Carstens <heiko.carstens@de.ibm.com>
6 * Michael Holzheu <holzheu@linux.vnet.ibm.com>
9 #include <linux/device.h>
11 #include <linux/kexec.h>
12 #include <linux/delay.h>
13 #include <linux/reboot.h>
14 #include <linux/ftrace.h>
15 #include <linux/debug_locks.h>
16 #include <linux/suspend.h>
18 #include <asm/setup.h>
19 #include <asm/pgtable.h>
20 #include <asm/pgalloc.h>
22 #include <asm/reset.h>
26 #include <asm/asm-offsets.h>
27 #include <asm/os_info.h>
29 typedef void (*relocate_kernel_t
)(kimage_entry_t
*, unsigned long);
31 extern const unsigned char relocate_kernel
[];
32 extern const unsigned long long relocate_kernel_len
;
34 #ifdef CONFIG_CRASH_DUMP
37 * Create ELF notes for one CPU
39 static void add_elf_notes(int cpu
)
41 struct save_area
*sa
= (void *) 4608 + store_prefix();
44 memcpy((void *) (4608UL + sa
->pref_reg
), sa
, sizeof(*sa
));
45 ptr
= (u64
*) per_cpu_ptr(crash_notes
, cpu
);
46 ptr
= fill_cpu_elf_notes(ptr
, sa
);
47 memset(ptr
, 0, sizeof(struct elf_note
));
51 * Initialize CPU ELF notes
55 unsigned long sa
= S390_lowcore
.prefixreg_save_area
+ SAVE_AREA_BASE
;
58 this_cpu
= smp_find_processor_id(stap());
59 add_elf_notes(this_cpu
);
60 for_each_online_cpu(cpu
) {
63 if (smp_store_status(cpu
))
67 /* Copy dump CPU store status info to absolute zero */
68 memcpy((void *) SAVE_AREA_BASE
, (void *) sa
, sizeof(struct save_area
));
72 * PM notifier callback for kdump
74 static int machine_kdump_pm_cb(struct notifier_block
*nb
, unsigned long action
,
78 case PM_SUSPEND_PREPARE
:
79 case PM_HIBERNATION_PREPARE
:
81 crash_map_reserved_pages();
84 case PM_POST_HIBERNATION
:
86 crash_unmap_reserved_pages();
94 static int __init
machine_kdump_pm_init(void)
96 pm_notifier(machine_kdump_pm_cb
, 0);
99 arch_initcall(machine_kdump_pm_init
);
103 * Start kdump: We expect here that a store status has been done on our CPU
105 static void __do_machine_kdump(void *image
)
107 #ifdef CONFIG_CRASH_DUMP
108 int (*start_kdump
)(int) = (void *)((struct kimage
*) image
)->start
;
111 __load_psw_mask(PSW_MASK_BASE
| PSW_DEFAULT_KEY
| PSW_MASK_EA
| PSW_MASK_BA
);
117 * Check if kdump checksums are valid: We call purgatory with parameter "0"
119 static int kdump_csum_valid(struct kimage
*image
)
121 #ifdef CONFIG_CRASH_DUMP
122 int (*start_kdump
)(int) = (void *)image
->start
;
125 __arch_local_irq_stnsm(0xfb); /* disable DAT */
127 __arch_local_irq_stosm(0x04); /* enable DAT */
128 return rc
? 0 : -EINVAL
;
135 * Map or unmap crashkernel memory
137 static void crash_map_pages(int enable
)
139 unsigned long size
= resource_size(&crashk_res
);
141 BUG_ON(crashk_res
.start
% KEXEC_CRASH_MEM_ALIGN
||
142 size
% KEXEC_CRASH_MEM_ALIGN
);
144 vmem_add_mapping(crashk_res
.start
, size
);
146 vmem_remove_mapping(crashk_res
.start
, size
);
148 os_info_crashkernel_add(crashk_res
.start
, size
);
150 os_info_crashkernel_add(0, 0);
155 * Map crashkernel memory
157 void crash_map_reserved_pages(void)
163 * Unmap crashkernel memory
165 void crash_unmap_reserved_pages(void)
171 * Give back memory to hypervisor before new kdump is loaded
173 static int machine_kexec_prepare_kdump(void)
175 #ifdef CONFIG_CRASH_DUMP
177 diag10_range(PFN_DOWN(crashk_res
.start
),
178 PFN_DOWN(crashk_res
.end
- crashk_res
.start
+ 1));
185 int machine_kexec_prepare(struct kimage
*image
)
187 void *reboot_code_buffer
;
189 /* Can't replace kernel image since it is read-only. */
190 if (ipl_flags
& IPL_NSS_VALID
)
193 if (image
->type
== KEXEC_TYPE_CRASH
)
194 return machine_kexec_prepare_kdump();
196 /* We don't support anything but the default image type for now. */
197 if (image
->type
!= KEXEC_TYPE_DEFAULT
)
200 /* Get the destination where the assembler code should be copied to.*/
201 reboot_code_buffer
= (void *) page_to_phys(image
->control_code_page
);
204 memcpy(reboot_code_buffer
, relocate_kernel
, relocate_kernel_len
);
208 void machine_kexec_cleanup(struct kimage
*image
)
212 void arch_crash_save_vmcoreinfo(void)
214 VMCOREINFO_SYMBOL(lowcore_ptr
);
215 VMCOREINFO_SYMBOL(high_memory
);
216 VMCOREINFO_LENGTH(lowcore_ptr
, NR_CPUS
);
219 void machine_shutdown(void)
223 void machine_crash_shutdown(struct pt_regs
*regs
)
230 static void __do_machine_kexec(void *data
)
232 relocate_kernel_t data_mover
;
233 struct kimage
*image
= data
;
235 data_mover
= (relocate_kernel_t
) page_to_phys(image
->control_code_page
);
237 /* Call the moving routine */
238 (*data_mover
)(&image
->head
, image
->start
);
242 * Reset system and call either kdump or normal kexec
244 static void __machine_kexec(void *data
)
246 struct kimage
*image
= data
;
248 __arch_local_irq_stosm(0x04); /* enable DAT */
252 if (image
->type
== KEXEC_TYPE_CRASH
) {
254 s390_reset_system(__do_machine_kdump
, data
);
256 s390_reset_system(__do_machine_kexec
, data
);
258 disabled_wait((unsigned long) __builtin_return_address(0));
262 * Do either kdump or normal kexec. In case of kdump we first ask
263 * purgatory, if kdump checksums are valid.
265 void machine_kexec(struct kimage
*image
)
267 if (image
->type
== KEXEC_TYPE_CRASH
&& !kdump_csum_valid(image
))
271 smp_call_ipl_cpu(__machine_kexec
, image
);