spi-topcliff-pch: Fix issue for transmitting over 4KByte
[zen-stable.git] / arch / s390 / kernel / machine_kexec.c
blob47b168fb29c45f1c475bcf7cc50348b354fba262
1 /*
2 * arch/s390/kernel/machine_kexec.c
4 * Copyright IBM Corp. 2005,2011
6 * Author(s): Rolf Adelsberger,
7 * Heiko Carstens <heiko.carstens@de.ibm.com>
8 * Michael Holzheu <holzheu@linux.vnet.ibm.com>
9 */
11 #include <linux/device.h>
12 #include <linux/mm.h>
13 #include <linux/kexec.h>
14 #include <linux/delay.h>
15 #include <linux/reboot.h>
16 #include <linux/ftrace.h>
17 #include <asm/cio.h>
18 #include <asm/setup.h>
19 #include <asm/pgtable.h>
20 #include <asm/pgalloc.h>
21 #include <asm/system.h>
22 #include <asm/smp.h>
23 #include <asm/reset.h>
24 #include <asm/ipl.h>
25 #include <asm/diag.h>
26 #include <asm/asm-offsets.h>
28 typedef void (*relocate_kernel_t)(kimage_entry_t *, unsigned long);
30 extern const unsigned char relocate_kernel[];
31 extern const unsigned long long relocate_kernel_len;
33 #ifdef CONFIG_CRASH_DUMP
35 void *fill_cpu_elf_notes(void *ptr, struct save_area *sa);
38 * Create ELF notes for one CPU
40 static void add_elf_notes(int cpu)
42 struct save_area *sa = (void *) 4608 + store_prefix();
43 void *ptr;
45 memcpy((void *) (4608UL + sa->pref_reg), sa, sizeof(*sa));
46 ptr = (u64 *) per_cpu_ptr(crash_notes, cpu);
47 ptr = fill_cpu_elf_notes(ptr, sa);
48 memset(ptr, 0, sizeof(struct elf_note));
52 * Store status of next available physical CPU
54 static int store_status_next(int start_cpu, int this_cpu)
56 struct save_area *sa = (void *) 4608 + store_prefix();
57 int cpu, rc;
59 for (cpu = start_cpu; cpu < 65536; cpu++) {
60 if (cpu == this_cpu)
61 continue;
62 do {
63 rc = raw_sigp(cpu, sigp_stop_and_store_status);
64 } while (rc == sigp_busy);
65 if (rc != sigp_order_code_accepted)
66 continue;
67 if (sa->pref_reg)
68 return cpu;
70 return -1;
74 * Initialize CPU ELF notes
76 void setup_regs(void)
78 unsigned long sa = S390_lowcore.prefixreg_save_area + SAVE_AREA_BASE;
79 int cpu, this_cpu, phys_cpu = 0, first = 1;
81 this_cpu = stap();
83 if (!S390_lowcore.prefixreg_save_area)
84 first = 0;
85 for_each_online_cpu(cpu) {
86 if (first) {
87 add_elf_notes(cpu);
88 first = 0;
89 continue;
91 phys_cpu = store_status_next(phys_cpu, this_cpu);
92 if (phys_cpu == -1)
93 break;
94 add_elf_notes(cpu);
95 phys_cpu++;
97 /* Copy dump CPU store status info to absolute zero */
98 memcpy((void *) SAVE_AREA_BASE, (void *) sa, sizeof(struct save_area));
101 #endif
104 * Start kdump: We expect here that a store status has been done on our CPU
106 static void __do_machine_kdump(void *image)
108 #ifdef CONFIG_CRASH_DUMP
109 int (*start_kdump)(int) = (void *)((struct kimage *) image)->start;
111 __load_psw_mask(PSW_MASK_BASE | PSW_DEFAULT_KEY | PSW_MASK_EA | PSW_MASK_BA);
112 setup_regs();
113 start_kdump(1);
114 #endif
118 * Check if kdump checksums are valid: We call purgatory with parameter "0"
120 static int kdump_csum_valid(struct kimage *image)
122 #ifdef CONFIG_CRASH_DUMP
123 int (*start_kdump)(int) = (void *)image->start;
124 int rc;
126 __arch_local_irq_stnsm(0xfb); /* disable DAT */
127 rc = start_kdump(0);
128 __arch_local_irq_stosm(0x04); /* enable DAT */
129 return rc ? 0 : -EINVAL;
130 #else
131 return -EINVAL;
132 #endif
136 * Map or unmap crashkernel memory
138 static void crash_map_pages(int enable)
140 unsigned long size = resource_size(&crashk_res);
142 BUG_ON(crashk_res.start % KEXEC_CRASH_MEM_ALIGN ||
143 size % KEXEC_CRASH_MEM_ALIGN);
144 if (enable)
145 vmem_add_mapping(crashk_res.start, size);
146 else
147 vmem_remove_mapping(crashk_res.start, size);
151 * Map crashkernel memory
153 void crash_map_reserved_pages(void)
155 crash_map_pages(1);
159 * Unmap crashkernel memory
161 void crash_unmap_reserved_pages(void)
163 crash_map_pages(0);
167 * Give back memory to hypervisor before new kdump is loaded
169 static int machine_kexec_prepare_kdump(void)
171 #ifdef CONFIG_CRASH_DUMP
172 if (MACHINE_IS_VM)
173 diag10_range(PFN_DOWN(crashk_res.start),
174 PFN_DOWN(crashk_res.end - crashk_res.start + 1));
175 return 0;
176 #else
177 return -EINVAL;
178 #endif
181 int machine_kexec_prepare(struct kimage *image)
183 void *reboot_code_buffer;
185 /* Can't replace kernel image since it is read-only. */
186 if (ipl_flags & IPL_NSS_VALID)
187 return -ENOSYS;
189 if (image->type == KEXEC_TYPE_CRASH)
190 return machine_kexec_prepare_kdump();
192 /* We don't support anything but the default image type for now. */
193 if (image->type != KEXEC_TYPE_DEFAULT)
194 return -EINVAL;
196 /* Get the destination where the assembler code should be copied to.*/
197 reboot_code_buffer = (void *) page_to_phys(image->control_code_page);
199 /* Then copy it */
200 memcpy(reboot_code_buffer, relocate_kernel, relocate_kernel_len);
201 return 0;
204 void machine_kexec_cleanup(struct kimage *image)
208 void arch_crash_save_vmcoreinfo(void)
210 VMCOREINFO_SYMBOL(lowcore_ptr);
211 VMCOREINFO_SYMBOL(high_memory);
212 VMCOREINFO_LENGTH(lowcore_ptr, NR_CPUS);
215 void machine_shutdown(void)
220 * Do normal kexec
222 static void __do_machine_kexec(void *data)
224 relocate_kernel_t data_mover;
225 struct kimage *image = data;
227 data_mover = (relocate_kernel_t) page_to_phys(image->control_code_page);
229 /* Call the moving routine */
230 (*data_mover)(&image->head, image->start);
234 * Reset system and call either kdump or normal kexec
236 static void __machine_kexec(void *data)
238 struct kimage *image = data;
240 pfault_fini();
241 if (image->type == KEXEC_TYPE_CRASH)
242 s390_reset_system(__do_machine_kdump, data);
243 else
244 s390_reset_system(__do_machine_kexec, data);
245 disabled_wait((unsigned long) __builtin_return_address(0));
249 * Do either kdump or normal kexec. In case of kdump we first ask
250 * purgatory, if kdump checksums are valid.
252 void machine_kexec(struct kimage *image)
254 if (image->type == KEXEC_TYPE_CRASH && !kdump_csum_valid(image))
255 return;
256 tracer_disable();
257 smp_send_stop();
258 smp_switch_to_ipl_cpu(__machine_kexec, image);