2 * Copyright IBM Corp. 2005, 2011
4 * Author(s): Rolf Adelsberger,
5 * Heiko Carstens <heiko.carstens@de.ibm.com>
6 * Michael Holzheu <holzheu@linux.vnet.ibm.com>
9 #include <linux/device.h>
11 #include <linux/kexec.h>
12 #include <linux/delay.h>
13 #include <linux/reboot.h>
14 #include <linux/ftrace.h>
15 #include <linux/debug_locks.h>
16 #include <linux/suspend.h>
18 #include <asm/setup.h>
19 #include <asm/pgtable.h>
20 #include <asm/pgalloc.h>
22 #include <asm/reset.h>
26 #include <asm/asm-offsets.h>
27 #include <asm/os_info.h>
28 #include <asm/switch_to.h>
30 typedef void (*relocate_kernel_t
)(kimage_entry_t
*, unsigned long);
32 extern const unsigned char relocate_kernel
[];
33 extern const unsigned long long relocate_kernel_len
;
35 #ifdef CONFIG_CRASH_DUMP
38 * Create ELF notes for one CPU
40 static void add_elf_notes(int cpu
)
42 struct save_area
*sa
= (void *) 4608 + store_prefix();
45 memcpy((void *) (4608UL + sa
->pref_reg
), sa
, sizeof(*sa
));
46 ptr
= (u64
*) per_cpu_ptr(crash_notes
, cpu
);
47 ptr
= fill_cpu_elf_notes(ptr
, sa
, NULL
);
48 memset(ptr
, 0, sizeof(struct elf_note
));
52 * Initialize CPU ELF notes
54 static void setup_regs(void)
56 unsigned long sa
= S390_lowcore
.prefixreg_save_area
+ SAVE_AREA_BASE
;
60 /* Get lowcore pointer from store status of this CPU (absolute zero) */
61 lc
= (struct _lowcore
*)(unsigned long)S390_lowcore
.prefixreg_save_area
;
62 this_cpu
= smp_find_processor_id(stap());
63 add_elf_notes(this_cpu
);
64 for_each_online_cpu(cpu
) {
67 if (smp_store_status(cpu
))
72 save_vx_regs_safe((void *) lc
->vector_save_area_addr
);
73 /* Copy dump CPU store status info to absolute zero */
74 memcpy((void *) SAVE_AREA_BASE
, (void *) sa
, sizeof(struct save_area
));
78 * PM notifier callback for kdump
80 static int machine_kdump_pm_cb(struct notifier_block
*nb
, unsigned long action
,
84 case PM_SUSPEND_PREPARE
:
85 case PM_HIBERNATION_PREPARE
:
87 crash_map_reserved_pages();
90 case PM_POST_HIBERNATION
:
92 crash_unmap_reserved_pages();
100 static int __init
machine_kdump_pm_init(void)
102 pm_notifier(machine_kdump_pm_cb
, 0);
105 arch_initcall(machine_kdump_pm_init
);
109 * Start kdump: We expect here that a store status has been done on our CPU
111 static void __do_machine_kdump(void *image
)
113 #ifdef CONFIG_CRASH_DUMP
114 int (*start_kdump
)(int) = (void *)((struct kimage
*) image
)->start
;
117 __load_psw_mask(PSW_MASK_BASE
| PSW_DEFAULT_KEY
| PSW_MASK_EA
| PSW_MASK_BA
);
123 * Check if kdump checksums are valid: We call purgatory with parameter "0"
125 static int kdump_csum_valid(struct kimage
*image
)
127 #ifdef CONFIG_CRASH_DUMP
128 int (*start_kdump
)(int) = (void *)image
->start
;
131 __arch_local_irq_stnsm(0xfb); /* disable DAT */
133 __arch_local_irq_stosm(0x04); /* enable DAT */
134 return rc
? 0 : -EINVAL
;
141 * Map or unmap crashkernel memory
143 static void crash_map_pages(int enable
)
145 unsigned long size
= resource_size(&crashk_res
);
147 BUG_ON(crashk_res
.start
% KEXEC_CRASH_MEM_ALIGN
||
148 size
% KEXEC_CRASH_MEM_ALIGN
);
150 vmem_add_mapping(crashk_res
.start
, size
);
152 vmem_remove_mapping(crashk_res
.start
, size
);
154 os_info_crashkernel_add(crashk_res
.start
, size
);
156 os_info_crashkernel_add(0, 0);
161 * Map crashkernel memory
163 void crash_map_reserved_pages(void)
169 * Unmap crashkernel memory
171 void crash_unmap_reserved_pages(void)
177 * Give back memory to hypervisor before new kdump is loaded
179 static int machine_kexec_prepare_kdump(void)
181 #ifdef CONFIG_CRASH_DUMP
183 diag10_range(PFN_DOWN(crashk_res
.start
),
184 PFN_DOWN(crashk_res
.end
- crashk_res
.start
+ 1));
191 int machine_kexec_prepare(struct kimage
*image
)
193 void *reboot_code_buffer
;
195 /* Can't replace kernel image since it is read-only. */
196 if (ipl_flags
& IPL_NSS_VALID
)
199 if (image
->type
== KEXEC_TYPE_CRASH
)
200 return machine_kexec_prepare_kdump();
202 /* We don't support anything but the default image type for now. */
203 if (image
->type
!= KEXEC_TYPE_DEFAULT
)
206 /* Get the destination where the assembler code should be copied to.*/
207 reboot_code_buffer
= (void *) page_to_phys(image
->control_code_page
);
210 memcpy(reboot_code_buffer
, relocate_kernel
, relocate_kernel_len
);
214 void machine_kexec_cleanup(struct kimage
*image
)
218 void arch_crash_save_vmcoreinfo(void)
220 VMCOREINFO_SYMBOL(lowcore_ptr
);
221 VMCOREINFO_SYMBOL(high_memory
);
222 VMCOREINFO_LENGTH(lowcore_ptr
, NR_CPUS
);
225 void machine_shutdown(void)
229 void machine_crash_shutdown(struct pt_regs
*regs
)
236 static void __do_machine_kexec(void *data
)
238 relocate_kernel_t data_mover
;
239 struct kimage
*image
= data
;
241 data_mover
= (relocate_kernel_t
) page_to_phys(image
->control_code_page
);
243 /* Call the moving routine */
244 (*data_mover
)(&image
->head
, image
->start
);
248 * Reset system and call either kdump or normal kexec
250 static void __machine_kexec(void *data
)
252 struct kimage
*image
= data
;
254 __arch_local_irq_stosm(0x04); /* enable DAT */
258 if (image
->type
== KEXEC_TYPE_CRASH
) {
260 s390_reset_system(__do_machine_kdump
, data
);
262 s390_reset_system(__do_machine_kexec
, data
);
264 disabled_wait((unsigned long) __builtin_return_address(0));
268 * Do either kdump or normal kexec. In case of kdump we first ask
269 * purgatory, if kdump checksums are valid.
271 void machine_kexec(struct kimage
*image
)
273 if (image
->type
== KEXEC_TYPE_CRASH
&& !kdump_csum_valid(image
))
277 smp_call_ipl_cpu(__machine_kexec
, image
);