2 * S390 kdump implementation
4 * Copyright IBM Corp. 2011
5 * Author(s): Michael Holzheu <holzheu@linux.vnet.ibm.com>
8 #include <linux/crash_dump.h>
9 #include <asm/lowcore.h>
10 #include <linux/kernel.h>
11 #include <linux/module.h>
12 #include <linux/gfp.h>
13 #include <linux/slab.h>
14 #include <linux/bootmem.h>
15 #include <linux/elf.h>
16 #include <asm/asm-offsets.h>
17 #include <linux/memblock.h>
18 #include <asm/os_info.h>
23 #define PTR_ADD(x, y) (((char *) (x)) + ((unsigned long) (y)))
24 #define PTR_SUB(x, y) (((char *) (x)) - ((unsigned long) (y)))
25 #define PTR_DIFF(x, y) ((unsigned long)(((char *) (x)) - ((unsigned long) (y))))
27 static struct memblock_region oldmem_region
;
29 static struct memblock_type oldmem_type
= {
33 .regions
= &oldmem_region
,
37 struct list_head list
;
49 __vector128 vxrs_high
[16];
52 static LIST_HEAD(dump_save_areas
);
55 * Allocate a save area
57 struct save_area
* __init
save_area_alloc(bool is_boot_cpu
)
61 sa
= (void *) memblock_alloc(sizeof(*sa
), 8);
63 list_add(&sa
->list
, &dump_save_areas
);
65 list_add_tail(&sa
->list
, &dump_save_areas
);
70 * Return the address of the save area for the boot CPU
72 struct save_area
* __init
save_area_boot_cpu(void)
74 if (list_empty(&dump_save_areas
))
76 return list_first_entry(&dump_save_areas
, struct save_area
, list
);
80 * Copy CPU registers into the save area
82 void __init
save_area_add_regs(struct save_area
*sa
, void *regs
)
86 lc
= (struct lowcore
*)(regs
- __LC_FPREGS_SAVE_AREA
);
87 memcpy(&sa
->psw
, &lc
->psw_save_area
, sizeof(sa
->psw
));
88 memcpy(&sa
->ctrs
, &lc
->cregs_save_area
, sizeof(sa
->ctrs
));
89 memcpy(&sa
->gprs
, &lc
->gpregs_save_area
, sizeof(sa
->gprs
));
90 memcpy(&sa
->acrs
, &lc
->access_regs_save_area
, sizeof(sa
->acrs
));
91 memcpy(&sa
->fprs
, &lc
->floating_pt_save_area
, sizeof(sa
->fprs
));
92 memcpy(&sa
->fpc
, &lc
->fpt_creg_save_area
, sizeof(sa
->fpc
));
93 memcpy(&sa
->prefix
, &lc
->prefixreg_save_area
, sizeof(sa
->prefix
));
94 memcpy(&sa
->todpreg
, &lc
->tod_progreg_save_area
, sizeof(sa
->todpreg
));
95 memcpy(&sa
->timer
, &lc
->cpu_timer_save_area
, sizeof(sa
->timer
));
96 memcpy(&sa
->todcmp
, &lc
->clock_comp_save_area
, sizeof(sa
->todcmp
));
100 * Copy vector registers into the save area
102 void __init
save_area_add_vxrs(struct save_area
*sa
, __vector128
*vxrs
)
106 /* Copy lower halves of vector registers 0-15 */
107 for (i
= 0; i
< 16; i
++)
108 memcpy(&sa
->vxrs_low
[i
], &vxrs
[i
].u
[2], 8);
109 /* Copy vector registers 16-31 */
110 memcpy(sa
->vxrs_high
, vxrs
+ 16, 16 * sizeof(__vector128
));
114 * Return physical address for virtual address
116 static inline void *load_real_addr(void *addr
)
118 unsigned long real_addr
;
125 : "=a" (real_addr
) : "a" (addr
) : "cc");
126 return (void *)real_addr
;
130 * Copy memory of the old, dumped system to a kernel space virtual address
132 int copy_oldmem_kernel(void *dst
, void *src
, size_t count
)
134 unsigned long from
, len
;
140 if (!OLDMEM_BASE
&& from
< sclp
.hsa_size
) {
141 /* Copy from zfcpdump HSA area */
142 len
= min(count
, sclp
.hsa_size
- from
);
143 rc
= memcpy_hsa_kernel(dst
, from
, len
);
147 /* Check for swapped kdump oldmem areas */
148 if (OLDMEM_BASE
&& from
- OLDMEM_BASE
< OLDMEM_SIZE
) {
150 len
= min(count
, OLDMEM_SIZE
- from
);
151 } else if (OLDMEM_BASE
&& from
< OLDMEM_SIZE
) {
152 len
= min(count
, OLDMEM_SIZE
- from
);
157 if (is_vmalloc_or_module_addr(dst
)) {
158 ra
= load_real_addr(dst
);
159 len
= min(PAGE_SIZE
- offset_in_page(ra
), len
);
163 if (memcpy_real(ra
, (void *) from
, len
))
174 * Copy memory of the old, dumped system to a user space virtual address
176 int copy_oldmem_user(void __user
*dst
, void *src
, size_t count
)
178 unsigned long from
, len
;
183 if (!OLDMEM_BASE
&& from
< sclp
.hsa_size
) {
184 /* Copy from zfcpdump HSA area */
185 len
= min(count
, sclp
.hsa_size
- from
);
186 rc
= memcpy_hsa_user(dst
, from
, len
);
190 /* Check for swapped kdump oldmem areas */
191 if (OLDMEM_BASE
&& from
- OLDMEM_BASE
< OLDMEM_SIZE
) {
193 len
= min(count
, OLDMEM_SIZE
- from
);
194 } else if (OLDMEM_BASE
&& from
< OLDMEM_SIZE
) {
195 len
= min(count
, OLDMEM_SIZE
- from
);
200 rc
= copy_to_user_real(dst
, (void *) from
, count
);
212 * Copy one page from "oldmem"
214 ssize_t
copy_oldmem_page(unsigned long pfn
, char *buf
, size_t csize
,
215 unsigned long offset
, int userbuf
)
222 src
= (void *) (pfn
<< PAGE_SHIFT
) + offset
;
224 rc
= copy_oldmem_user((void __force __user
*) buf
, src
, csize
);
226 rc
= copy_oldmem_kernel((void *) buf
, src
, csize
);
231 * Remap "oldmem" for kdump
233 * For the kdump reserved memory this functions performs a swap operation:
234 * [0 - OLDMEM_SIZE] is mapped to [OLDMEM_BASE - OLDMEM_BASE + OLDMEM_SIZE]
236 static int remap_oldmem_pfn_range_kdump(struct vm_area_struct
*vma
,
237 unsigned long from
, unsigned long pfn
,
238 unsigned long size
, pgprot_t prot
)
240 unsigned long size_old
;
243 if (pfn
< OLDMEM_SIZE
>> PAGE_SHIFT
) {
244 size_old
= min(size
, OLDMEM_SIZE
- (pfn
<< PAGE_SHIFT
));
245 rc
= remap_pfn_range(vma
, from
,
246 pfn
+ (OLDMEM_BASE
>> PAGE_SHIFT
),
248 if (rc
|| size
== size_old
)
252 pfn
+= size_old
>> PAGE_SHIFT
;
254 return remap_pfn_range(vma
, from
, pfn
, size
, prot
);
258 * Remap "oldmem" for zfcpdump
260 * We only map available memory above HSA size. Memory below HSA size
261 * is read on demand using the copy_oldmem_page() function.
263 static int remap_oldmem_pfn_range_zfcpdump(struct vm_area_struct
*vma
,
266 unsigned long size
, pgprot_t prot
)
268 unsigned long hsa_end
= sclp
.hsa_size
;
269 unsigned long size_hsa
;
271 if (pfn
< hsa_end
>> PAGE_SHIFT
) {
272 size_hsa
= min(size
, hsa_end
- (pfn
<< PAGE_SHIFT
));
273 if (size
== size_hsa
)
277 pfn
+= size_hsa
>> PAGE_SHIFT
;
279 return remap_pfn_range(vma
, from
, pfn
, size
, prot
);
283 * Remap "oldmem" for kdump or zfcpdump
285 int remap_oldmem_pfn_range(struct vm_area_struct
*vma
, unsigned long from
,
286 unsigned long pfn
, unsigned long size
, pgprot_t prot
)
289 return remap_oldmem_pfn_range_kdump(vma
, from
, pfn
, size
, prot
);
291 return remap_oldmem_pfn_range_zfcpdump(vma
, from
, pfn
, size
,
296 * Alloc memory and panic in case of ENOMEM
298 static void *kzalloc_panic(int len
)
302 rc
= kzalloc(len
, GFP_KERNEL
);
304 panic("s390 kdump kzalloc (%d) failed", len
);
309 * Initialize ELF note
311 static void *nt_init_name(void *buf
, Elf64_Word type
, void *desc
, int d_len
,
317 note
= (Elf64_Nhdr
*)buf
;
318 note
->n_namesz
= strlen(name
) + 1;
319 note
->n_descsz
= d_len
;
321 len
= sizeof(Elf64_Nhdr
);
323 memcpy(buf
+ len
, name
, note
->n_namesz
);
324 len
= roundup(len
+ note
->n_namesz
, 4);
326 memcpy(buf
+ len
, desc
, note
->n_descsz
);
327 len
= roundup(len
+ note
->n_descsz
, 4);
329 return PTR_ADD(buf
, len
);
332 static inline void *nt_init(void *buf
, Elf64_Word type
, void *desc
, int d_len
)
334 return nt_init_name(buf
, type
, desc
, d_len
, KEXEC_CORE_NOTE_NAME
);
338 * Fill ELF notes for one CPU with save area registers
340 static void *fill_cpu_elf_notes(void *ptr
, int cpu
, struct save_area
*sa
)
342 struct elf_prstatus nt_prstatus
;
343 elf_fpregset_t nt_fpregset
;
345 /* Prepare prstatus note */
346 memset(&nt_prstatus
, 0, sizeof(nt_prstatus
));
347 memcpy(&nt_prstatus
.pr_reg
.gprs
, sa
->gprs
, sizeof(sa
->gprs
));
348 memcpy(&nt_prstatus
.pr_reg
.psw
, sa
->psw
, sizeof(sa
->psw
));
349 memcpy(&nt_prstatus
.pr_reg
.acrs
, sa
->acrs
, sizeof(sa
->acrs
));
350 nt_prstatus
.pr_pid
= cpu
;
351 /* Prepare fpregset (floating point) note */
352 memset(&nt_fpregset
, 0, sizeof(nt_fpregset
));
353 memcpy(&nt_fpregset
.fpc
, &sa
->fpc
, sizeof(sa
->fpc
));
354 memcpy(&nt_fpregset
.fprs
, &sa
->fprs
, sizeof(sa
->fprs
));
355 /* Create ELF notes for the CPU */
356 ptr
= nt_init(ptr
, NT_PRSTATUS
, &nt_prstatus
, sizeof(nt_prstatus
));
357 ptr
= nt_init(ptr
, NT_PRFPREG
, &nt_fpregset
, sizeof(nt_fpregset
));
358 ptr
= nt_init(ptr
, NT_S390_TIMER
, &sa
->timer
, sizeof(sa
->timer
));
359 ptr
= nt_init(ptr
, NT_S390_TODCMP
, &sa
->todcmp
, sizeof(sa
->todcmp
));
360 ptr
= nt_init(ptr
, NT_S390_TODPREG
, &sa
->todpreg
, sizeof(sa
->todpreg
));
361 ptr
= nt_init(ptr
, NT_S390_CTRS
, &sa
->ctrs
, sizeof(sa
->ctrs
));
362 ptr
= nt_init(ptr
, NT_S390_PREFIX
, &sa
->prefix
, sizeof(sa
->prefix
));
363 if (MACHINE_HAS_VX
) {
364 ptr
= nt_init(ptr
, NT_S390_VXRS_HIGH
,
365 &sa
->vxrs_high
, sizeof(sa
->vxrs_high
));
366 ptr
= nt_init(ptr
, NT_S390_VXRS_LOW
,
367 &sa
->vxrs_low
, sizeof(sa
->vxrs_low
));
373 * Initialize prpsinfo note (new kernel)
375 static void *nt_prpsinfo(void *ptr
)
377 struct elf_prpsinfo prpsinfo
;
379 memset(&prpsinfo
, 0, sizeof(prpsinfo
));
380 prpsinfo
.pr_sname
= 'R';
381 strcpy(prpsinfo
.pr_fname
, "vmlinux");
382 return nt_init(ptr
, NT_PRPSINFO
, &prpsinfo
, sizeof(prpsinfo
));
386 * Get vmcoreinfo using lowcore->vmcore_info (new kernel)
388 static void *get_vmcoreinfo_old(unsigned long *size
)
390 char nt_name
[11], *vmcoreinfo
;
394 if (copy_oldmem_kernel(&addr
, &S390_lowcore
.vmcore_info
, sizeof(addr
)))
396 memset(nt_name
, 0, sizeof(nt_name
));
397 if (copy_oldmem_kernel(¬e
, addr
, sizeof(note
)))
399 if (copy_oldmem_kernel(nt_name
, addr
+ sizeof(note
),
400 sizeof(nt_name
) - 1))
402 if (strcmp(nt_name
, "VMCOREINFO") != 0)
404 vmcoreinfo
= kzalloc_panic(note
.n_descsz
);
405 if (copy_oldmem_kernel(vmcoreinfo
, addr
+ 24, note
.n_descsz
))
407 *size
= note
.n_descsz
;
412 * Initialize vmcoreinfo note (new kernel)
414 static void *nt_vmcoreinfo(void *ptr
)
419 vmcoreinfo
= os_info_old_entry(OS_INFO_VMCOREINFO
, &size
);
421 vmcoreinfo
= get_vmcoreinfo_old(&size
);
424 return nt_init_name(ptr
, 0, vmcoreinfo
, size
, "VMCOREINFO");
428 * Initialize ELF header (new kernel)
430 static void *ehdr_init(Elf64_Ehdr
*ehdr
, int mem_chunk_cnt
)
432 memset(ehdr
, 0, sizeof(*ehdr
));
433 memcpy(ehdr
->e_ident
, ELFMAG
, SELFMAG
);
434 ehdr
->e_ident
[EI_CLASS
] = ELFCLASS64
;
435 ehdr
->e_ident
[EI_DATA
] = ELFDATA2MSB
;
436 ehdr
->e_ident
[EI_VERSION
] = EV_CURRENT
;
437 memset(ehdr
->e_ident
+ EI_PAD
, 0, EI_NIDENT
- EI_PAD
);
438 ehdr
->e_type
= ET_CORE
;
439 ehdr
->e_machine
= EM_S390
;
440 ehdr
->e_version
= EV_CURRENT
;
441 ehdr
->e_phoff
= sizeof(Elf64_Ehdr
);
442 ehdr
->e_ehsize
= sizeof(Elf64_Ehdr
);
443 ehdr
->e_phentsize
= sizeof(Elf64_Phdr
);
444 ehdr
->e_phnum
= mem_chunk_cnt
+ 1;
449 * Return CPU count for ELF header (new kernel)
451 static int get_cpu_cnt(void)
453 struct save_area
*sa
;
456 list_for_each_entry(sa
, &dump_save_areas
, list
)
463 * Return memory chunk count for ELF header (new kernel)
465 static int get_mem_chunk_cnt(void)
470 for_each_mem_range(idx
, &memblock
.physmem
, &oldmem_type
, NUMA_NO_NODE
,
471 MEMBLOCK_NONE
, NULL
, NULL
, NULL
)
477 * Initialize ELF loads (new kernel)
479 static void loads_init(Elf64_Phdr
*phdr
, u64 loads_offset
)
481 phys_addr_t start
, end
;
484 for_each_mem_range(idx
, &memblock
.physmem
, &oldmem_type
, NUMA_NO_NODE
,
485 MEMBLOCK_NONE
, &start
, &end
, NULL
) {
486 phdr
->p_filesz
= end
- start
;
487 phdr
->p_type
= PT_LOAD
;
488 phdr
->p_offset
= start
;
489 phdr
->p_vaddr
= start
;
490 phdr
->p_paddr
= start
;
491 phdr
->p_memsz
= end
- start
;
492 phdr
->p_flags
= PF_R
| PF_W
| PF_X
;
493 phdr
->p_align
= PAGE_SIZE
;
499 * Initialize notes (new kernel)
501 static void *notes_init(Elf64_Phdr
*phdr
, void *ptr
, u64 notes_offset
)
503 struct save_area
*sa
;
504 void *ptr_start
= ptr
;
507 ptr
= nt_prpsinfo(ptr
);
510 list_for_each_entry(sa
, &dump_save_areas
, list
)
512 ptr
= fill_cpu_elf_notes(ptr
, cpu
++, sa
);
513 ptr
= nt_vmcoreinfo(ptr
);
514 memset(phdr
, 0, sizeof(*phdr
));
515 phdr
->p_type
= PT_NOTE
;
516 phdr
->p_offset
= notes_offset
;
517 phdr
->p_filesz
= (unsigned long) PTR_SUB(ptr
, ptr_start
);
518 phdr
->p_memsz
= phdr
->p_filesz
;
523 * Create ELF core header (new kernel)
525 int elfcorehdr_alloc(unsigned long long *addr
, unsigned long long *size
)
527 Elf64_Phdr
*phdr_notes
, *phdr_loads
;
533 /* If we are not in kdump or zfcpdump mode return */
534 if (!OLDMEM_BASE
&& ipl_info
.type
!= IPL_TYPE_FCP_DUMP
)
536 /* If we cannot get HSA size for zfcpdump return error */
537 if (ipl_info
.type
== IPL_TYPE_FCP_DUMP
&& !sclp
.hsa_size
)
540 /* For kdump, exclude previous crashkernel memory */
542 oldmem_region
.base
= OLDMEM_BASE
;
543 oldmem_region
.size
= OLDMEM_SIZE
;
544 oldmem_type
.total_size
= OLDMEM_SIZE
;
547 mem_chunk_cnt
= get_mem_chunk_cnt();
549 alloc_size
= 0x1000 + get_cpu_cnt() * 0x4a0 +
550 mem_chunk_cnt
* sizeof(Elf64_Phdr
);
551 hdr
= kzalloc_panic(alloc_size
);
552 /* Init elf header */
553 ptr
= ehdr_init(hdr
, mem_chunk_cnt
);
554 /* Init program headers */
556 ptr
= PTR_ADD(ptr
, sizeof(Elf64_Phdr
));
558 ptr
= PTR_ADD(ptr
, sizeof(Elf64_Phdr
) * mem_chunk_cnt
);
560 hdr_off
= PTR_DIFF(ptr
, hdr
);
561 ptr
= notes_init(phdr_notes
, ptr
, ((unsigned long) hdr
) + hdr_off
);
563 hdr_off
= PTR_DIFF(ptr
, hdr
);
564 loads_init(phdr_loads
, hdr_off
);
565 *addr
= (unsigned long long) hdr
;
566 *size
= (unsigned long long) hdr_off
;
567 BUG_ON(elfcorehdr_size
> alloc_size
);
572 * Free ELF core header (new kernel)
574 void elfcorehdr_free(unsigned long long addr
)
576 kfree((void *)(unsigned long)addr
);
580 * Read from ELF header
582 ssize_t
elfcorehdr_read(char *buf
, size_t count
, u64
*ppos
)
584 void *src
= (void *)(unsigned long)*ppos
;
586 memcpy(buf
, src
, count
);
592 * Read from ELF notes data
594 ssize_t
elfcorehdr_read_notes(char *buf
, size_t count
, u64
*ppos
)
596 void *src
= (void *)(unsigned long)*ppos
;
598 memcpy(buf
, src
, count
);