2 * Copyright IBM Corp. 2005, 2011
4 * Author(s): Rolf Adelsberger,
5 * Heiko Carstens <heiko.carstens@de.ibm.com>
6 * Michael Holzheu <holzheu@linux.vnet.ibm.com>
9 #include <linux/device.h>
11 #include <linux/kexec.h>
12 #include <linux/delay.h>
13 #include <linux/reboot.h>
14 #include <linux/ftrace.h>
15 #include <linux/debug_locks.h>
16 #include <linux/suspend.h>
18 #include <asm/setup.h>
19 #include <asm/pgtable.h>
20 #include <asm/pgalloc.h>
22 #include <asm/reset.h>
26 #include <asm/asm-offsets.h>
27 #include <asm/os_info.h>
28 #include <asm/switch_to.h>
30 typedef void (*relocate_kernel_t
)(kimage_entry_t
*, unsigned long);
32 extern const unsigned char relocate_kernel
[];
33 extern const unsigned long long relocate_kernel_len
;
35 #ifdef CONFIG_CRASH_DUMP
38 * Create ELF notes for one CPU
40 static void add_elf_notes(int cpu
)
42 struct save_area
*sa
= (void *) 4608 + store_prefix();
45 memcpy((void *) (4608UL + sa
->pref_reg
), sa
, sizeof(*sa
));
46 ptr
= (u64
*) per_cpu_ptr(crash_notes
, cpu
);
47 ptr
= fill_cpu_elf_notes(ptr
, sa
, NULL
);
48 memset(ptr
, 0, sizeof(struct elf_note
));
52 * Initialize CPU ELF notes
54 static void setup_regs(void)
56 unsigned long sa
= S390_lowcore
.prefixreg_save_area
+ SAVE_AREA_BASE
;
60 /* Get lowcore pointer from store status of this CPU (absolute zero) */
61 lc
= (struct _lowcore
*)(unsigned long)S390_lowcore
.prefixreg_save_area
;
62 this_cpu
= smp_find_processor_id(stap());
63 add_elf_notes(this_cpu
);
64 for_each_online_cpu(cpu
) {
67 if (smp_store_status(cpu
))
72 save_vx_regs_safe((void *) lc
->vector_save_area_addr
);
73 /* Copy dump CPU store status info to absolute zero */
74 memcpy((void *) SAVE_AREA_BASE
, (void *) sa
, sizeof(struct save_area
));
78 * PM notifier callback for kdump
80 static int machine_kdump_pm_cb(struct notifier_block
*nb
, unsigned long action
,
84 case PM_SUSPEND_PREPARE
:
85 case PM_HIBERNATION_PREPARE
:
87 crash_map_reserved_pages();
90 case PM_POST_HIBERNATION
:
92 crash_unmap_reserved_pages();
100 static int __init
machine_kdump_pm_init(void)
102 pm_notifier(machine_kdump_pm_cb
, 0);
105 arch_initcall(machine_kdump_pm_init
);
108 * Start kdump: We expect here that a store status has been done on our CPU
110 static void __do_machine_kdump(void *image
)
112 int (*start_kdump
)(int) = (void *)((struct kimage
*) image
)->start
;
114 __load_psw_mask(PSW_MASK_BASE
| PSW_DEFAULT_KEY
| PSW_MASK_EA
| PSW_MASK_BA
);
120 * Check if kdump checksums are valid: We call purgatory with parameter "0"
122 static int kdump_csum_valid(struct kimage
*image
)
124 #ifdef CONFIG_CRASH_DUMP
125 int (*start_kdump
)(int) = (void *)image
->start
;
128 __arch_local_irq_stnsm(0xfb); /* disable DAT */
130 __arch_local_irq_stosm(0x04); /* enable DAT */
131 return rc
? 0 : -EINVAL
;
138 * Map or unmap crashkernel memory
140 static void crash_map_pages(int enable
)
142 unsigned long size
= resource_size(&crashk_res
);
144 BUG_ON(crashk_res
.start
% KEXEC_CRASH_MEM_ALIGN
||
145 size
% KEXEC_CRASH_MEM_ALIGN
);
147 vmem_add_mapping(crashk_res
.start
, size
);
149 vmem_remove_mapping(crashk_res
.start
, size
);
151 os_info_crashkernel_add(crashk_res
.start
, size
);
153 os_info_crashkernel_add(0, 0);
158 * Map crashkernel memory
160 void crash_map_reserved_pages(void)
166 * Unmap crashkernel memory
168 void crash_unmap_reserved_pages(void)
174 * Give back memory to hypervisor before new kdump is loaded
176 static int machine_kexec_prepare_kdump(void)
178 #ifdef CONFIG_CRASH_DUMP
180 diag10_range(PFN_DOWN(crashk_res
.start
),
181 PFN_DOWN(crashk_res
.end
- crashk_res
.start
+ 1));
188 int machine_kexec_prepare(struct kimage
*image
)
190 void *reboot_code_buffer
;
192 /* Can't replace kernel image since it is read-only. */
193 if (ipl_flags
& IPL_NSS_VALID
)
196 if (image
->type
== KEXEC_TYPE_CRASH
)
197 return machine_kexec_prepare_kdump();
199 /* We don't support anything but the default image type for now. */
200 if (image
->type
!= KEXEC_TYPE_DEFAULT
)
203 /* Get the destination where the assembler code should be copied to.*/
204 reboot_code_buffer
= (void *) page_to_phys(image
->control_code_page
);
207 memcpy(reboot_code_buffer
, relocate_kernel
, relocate_kernel_len
);
211 void machine_kexec_cleanup(struct kimage
*image
)
215 void arch_crash_save_vmcoreinfo(void)
217 VMCOREINFO_SYMBOL(lowcore_ptr
);
218 VMCOREINFO_SYMBOL(high_memory
);
219 VMCOREINFO_LENGTH(lowcore_ptr
, NR_CPUS
);
222 void machine_shutdown(void)
226 void machine_crash_shutdown(struct pt_regs
*regs
)
233 static void __do_machine_kexec(void *data
)
235 relocate_kernel_t data_mover
;
236 struct kimage
*image
= data
;
238 data_mover
= (relocate_kernel_t
) page_to_phys(image
->control_code_page
);
240 /* Call the moving routine */
241 (*data_mover
)(&image
->head
, image
->start
);
245 * Reset system and call either kdump or normal kexec
247 static void __machine_kexec(void *data
)
249 __arch_local_irq_stosm(0x04); /* enable DAT */
253 #ifdef CONFIG_CRASH_DUMP
254 if (((struct kimage
*) data
)->type
== KEXEC_TYPE_CRASH
) {
257 s390_reset_system(setup_regs
, __do_machine_kdump
, data
);
260 s390_reset_system(NULL
, __do_machine_kexec
, data
);
261 disabled_wait((unsigned long) __builtin_return_address(0));
265 * Do either kdump or normal kexec. In case of kdump we first ask
266 * purgatory, if kdump checksums are valid.
268 void machine_kexec(struct kimage
*image
)
270 if (image
->type
== KEXEC_TYPE_CRASH
&& !kdump_csum_valid(image
))
274 smp_call_ipl_cpu(__machine_kexec
, image
);