2 * S390 kdump implementation
4 * Copyright IBM Corp. 2011
5 * Author(s): Michael Holzheu <holzheu@linux.vnet.ibm.com>
8 #include <linux/crash_dump.h>
9 #include <asm/lowcore.h>
10 #include <linux/kernel.h>
11 #include <linux/init.h>
13 #include <linux/gfp.h>
14 #include <linux/slab.h>
15 #include <linux/bootmem.h>
16 #include <linux/elf.h>
17 #include <asm/asm-offsets.h>
18 #include <linux/memblock.h>
19 #include <asm/os_info.h>
24 #define PTR_ADD(x, y) (((char *) (x)) + ((unsigned long) (y)))
25 #define PTR_SUB(x, y) (((char *) (x)) - ((unsigned long) (y)))
26 #define PTR_DIFF(x, y) ((unsigned long)(((char *) (x)) - ((unsigned long) (y))))
28 static struct memblock_region oldmem_region
;
30 static struct memblock_type oldmem_type
= {
34 .regions
= &oldmem_region
,
39 struct list_head list
;
51 __vector128 vxrs_high
[16];
54 static LIST_HEAD(dump_save_areas
);
57 * Allocate a save area
59 struct save_area
* __init
save_area_alloc(bool is_boot_cpu
)
63 sa
= (void *) memblock_alloc(sizeof(*sa
), 8);
65 list_add(&sa
->list
, &dump_save_areas
);
67 list_add_tail(&sa
->list
, &dump_save_areas
);
72 * Return the address of the save area for the boot CPU
74 struct save_area
* __init
save_area_boot_cpu(void)
76 return list_first_entry_or_null(&dump_save_areas
, struct save_area
, list
);
80 * Copy CPU registers into the save area
82 void __init
save_area_add_regs(struct save_area
*sa
, void *regs
)
86 lc
= (struct lowcore
*)(regs
- __LC_FPREGS_SAVE_AREA
);
87 memcpy(&sa
->psw
, &lc
->psw_save_area
, sizeof(sa
->psw
));
88 memcpy(&sa
->ctrs
, &lc
->cregs_save_area
, sizeof(sa
->ctrs
));
89 memcpy(&sa
->gprs
, &lc
->gpregs_save_area
, sizeof(sa
->gprs
));
90 memcpy(&sa
->acrs
, &lc
->access_regs_save_area
, sizeof(sa
->acrs
));
91 memcpy(&sa
->fprs
, &lc
->floating_pt_save_area
, sizeof(sa
->fprs
));
92 memcpy(&sa
->fpc
, &lc
->fpt_creg_save_area
, sizeof(sa
->fpc
));
93 memcpy(&sa
->prefix
, &lc
->prefixreg_save_area
, sizeof(sa
->prefix
));
94 memcpy(&sa
->todpreg
, &lc
->tod_progreg_save_area
, sizeof(sa
->todpreg
));
95 memcpy(&sa
->timer
, &lc
->cpu_timer_save_area
, sizeof(sa
->timer
));
96 memcpy(&sa
->todcmp
, &lc
->clock_comp_save_area
, sizeof(sa
->todcmp
));
100 * Copy vector registers into the save area
102 void __init
save_area_add_vxrs(struct save_area
*sa
, __vector128
*vxrs
)
106 /* Copy lower halves of vector registers 0-15 */
107 for (i
= 0; i
< 16; i
++)
108 memcpy(&sa
->vxrs_low
[i
], &vxrs
[i
].u
[2], 8);
109 /* Copy vector registers 16-31 */
110 memcpy(sa
->vxrs_high
, vxrs
+ 16, 16 * sizeof(__vector128
));
114 * Return physical address for virtual address
116 static inline void *load_real_addr(void *addr
)
118 unsigned long real_addr
;
125 : "=a" (real_addr
) : "a" (addr
) : "cc");
126 return (void *)real_addr
;
130 * Copy memory of the old, dumped system to a kernel space virtual address
132 int copy_oldmem_kernel(void *dst
, void *src
, size_t count
)
134 unsigned long from
, len
;
140 if (!OLDMEM_BASE
&& from
< sclp
.hsa_size
) {
141 /* Copy from zfcpdump HSA area */
142 len
= min(count
, sclp
.hsa_size
- from
);
143 rc
= memcpy_hsa_kernel(dst
, from
, len
);
147 /* Check for swapped kdump oldmem areas */
148 if (OLDMEM_BASE
&& from
- OLDMEM_BASE
< OLDMEM_SIZE
) {
150 len
= min(count
, OLDMEM_SIZE
- from
);
151 } else if (OLDMEM_BASE
&& from
< OLDMEM_SIZE
) {
152 len
= min(count
, OLDMEM_SIZE
- from
);
157 if (is_vmalloc_or_module_addr(dst
)) {
158 ra
= load_real_addr(dst
);
159 len
= min(PAGE_SIZE
- offset_in_page(ra
), len
);
163 if (memcpy_real(ra
, (void *) from
, len
))
174 * Copy memory of the old, dumped system to a user space virtual address
176 static int copy_oldmem_user(void __user
*dst
, void *src
, size_t count
)
178 unsigned long from
, len
;
183 if (!OLDMEM_BASE
&& from
< sclp
.hsa_size
) {
184 /* Copy from zfcpdump HSA area */
185 len
= min(count
, sclp
.hsa_size
- from
);
186 rc
= memcpy_hsa_user(dst
, from
, len
);
190 /* Check for swapped kdump oldmem areas */
191 if (OLDMEM_BASE
&& from
- OLDMEM_BASE
< OLDMEM_SIZE
) {
193 len
= min(count
, OLDMEM_SIZE
- from
);
194 } else if (OLDMEM_BASE
&& from
< OLDMEM_SIZE
) {
195 len
= min(count
, OLDMEM_SIZE
- from
);
200 rc
= copy_to_user_real(dst
, (void *) from
, count
);
212 * Copy one page from "oldmem"
214 ssize_t
copy_oldmem_page(unsigned long pfn
, char *buf
, size_t csize
,
215 unsigned long offset
, int userbuf
)
222 src
= (void *) (pfn
<< PAGE_SHIFT
) + offset
;
224 rc
= copy_oldmem_user((void __force __user
*) buf
, src
, csize
);
226 rc
= copy_oldmem_kernel((void *) buf
, src
, csize
);
231 * Remap "oldmem" for kdump
233 * For the kdump reserved memory this functions performs a swap operation:
234 * [0 - OLDMEM_SIZE] is mapped to [OLDMEM_BASE - OLDMEM_BASE + OLDMEM_SIZE]
236 static int remap_oldmem_pfn_range_kdump(struct vm_area_struct
*vma
,
237 unsigned long from
, unsigned long pfn
,
238 unsigned long size
, pgprot_t prot
)
240 unsigned long size_old
;
243 if (pfn
< OLDMEM_SIZE
>> PAGE_SHIFT
) {
244 size_old
= min(size
, OLDMEM_SIZE
- (pfn
<< PAGE_SHIFT
));
245 rc
= remap_pfn_range(vma
, from
,
246 pfn
+ (OLDMEM_BASE
>> PAGE_SHIFT
),
248 if (rc
|| size
== size_old
)
252 pfn
+= size_old
>> PAGE_SHIFT
;
254 return remap_pfn_range(vma
, from
, pfn
, size
, prot
);
258 * Remap "oldmem" for zfcpdump
260 * We only map available memory above HSA size. Memory below HSA size
261 * is read on demand using the copy_oldmem_page() function.
263 static int remap_oldmem_pfn_range_zfcpdump(struct vm_area_struct
*vma
,
266 unsigned long size
, pgprot_t prot
)
268 unsigned long hsa_end
= sclp
.hsa_size
;
269 unsigned long size_hsa
;
271 if (pfn
< hsa_end
>> PAGE_SHIFT
) {
272 size_hsa
= min(size
, hsa_end
- (pfn
<< PAGE_SHIFT
));
273 if (size
== size_hsa
)
277 pfn
+= size_hsa
>> PAGE_SHIFT
;
279 return remap_pfn_range(vma
, from
, pfn
, size
, prot
);
283 * Remap "oldmem" for kdump or zfcpdump
285 int remap_oldmem_pfn_range(struct vm_area_struct
*vma
, unsigned long from
,
286 unsigned long pfn
, unsigned long size
, pgprot_t prot
)
289 return remap_oldmem_pfn_range_kdump(vma
, from
, pfn
, size
, prot
);
291 return remap_oldmem_pfn_range_zfcpdump(vma
, from
, pfn
, size
,
296 * Alloc memory and panic in case of ENOMEM
298 static void *kzalloc_panic(int len
)
302 rc
= kzalloc(len
, GFP_KERNEL
);
304 panic("s390 kdump kzalloc (%d) failed", len
);
309 * Initialize ELF note
311 static void *nt_init_name(void *buf
, Elf64_Word type
, void *desc
, int d_len
,
317 note
= (Elf64_Nhdr
*)buf
;
318 note
->n_namesz
= strlen(name
) + 1;
319 note
->n_descsz
= d_len
;
321 len
= sizeof(Elf64_Nhdr
);
323 memcpy(buf
+ len
, name
, note
->n_namesz
);
324 len
= roundup(len
+ note
->n_namesz
, 4);
326 memcpy(buf
+ len
, desc
, note
->n_descsz
);
327 len
= roundup(len
+ note
->n_descsz
, 4);
329 return PTR_ADD(buf
, len
);
332 static inline void *nt_init(void *buf
, Elf64_Word type
, void *desc
, int d_len
)
334 const char *note_name
= "LINUX";
336 if (type
== NT_PRPSINFO
|| type
== NT_PRSTATUS
|| type
== NT_PRFPREG
)
337 note_name
= KEXEC_CORE_NOTE_NAME
;
338 return nt_init_name(buf
, type
, desc
, d_len
, note_name
);
342 * Fill ELF notes for one CPU with save area registers
344 static void *fill_cpu_elf_notes(void *ptr
, int cpu
, struct save_area
*sa
)
346 struct elf_prstatus nt_prstatus
;
347 elf_fpregset_t nt_fpregset
;
349 /* Prepare prstatus note */
350 memset(&nt_prstatus
, 0, sizeof(nt_prstatus
));
351 memcpy(&nt_prstatus
.pr_reg
.gprs
, sa
->gprs
, sizeof(sa
->gprs
));
352 memcpy(&nt_prstatus
.pr_reg
.psw
, sa
->psw
, sizeof(sa
->psw
));
353 memcpy(&nt_prstatus
.pr_reg
.acrs
, sa
->acrs
, sizeof(sa
->acrs
));
354 nt_prstatus
.pr_pid
= cpu
;
355 /* Prepare fpregset (floating point) note */
356 memset(&nt_fpregset
, 0, sizeof(nt_fpregset
));
357 memcpy(&nt_fpregset
.fpc
, &sa
->fpc
, sizeof(sa
->fpc
));
358 memcpy(&nt_fpregset
.fprs
, &sa
->fprs
, sizeof(sa
->fprs
));
359 /* Create ELF notes for the CPU */
360 ptr
= nt_init(ptr
, NT_PRSTATUS
, &nt_prstatus
, sizeof(nt_prstatus
));
361 ptr
= nt_init(ptr
, NT_PRFPREG
, &nt_fpregset
, sizeof(nt_fpregset
));
362 ptr
= nt_init(ptr
, NT_S390_TIMER
, &sa
->timer
, sizeof(sa
->timer
));
363 ptr
= nt_init(ptr
, NT_S390_TODCMP
, &sa
->todcmp
, sizeof(sa
->todcmp
));
364 ptr
= nt_init(ptr
, NT_S390_TODPREG
, &sa
->todpreg
, sizeof(sa
->todpreg
));
365 ptr
= nt_init(ptr
, NT_S390_CTRS
, &sa
->ctrs
, sizeof(sa
->ctrs
));
366 ptr
= nt_init(ptr
, NT_S390_PREFIX
, &sa
->prefix
, sizeof(sa
->prefix
));
367 if (MACHINE_HAS_VX
) {
368 ptr
= nt_init(ptr
, NT_S390_VXRS_HIGH
,
369 &sa
->vxrs_high
, sizeof(sa
->vxrs_high
));
370 ptr
= nt_init(ptr
, NT_S390_VXRS_LOW
,
371 &sa
->vxrs_low
, sizeof(sa
->vxrs_low
));
377 * Initialize prpsinfo note (new kernel)
379 static void *nt_prpsinfo(void *ptr
)
381 struct elf_prpsinfo prpsinfo
;
383 memset(&prpsinfo
, 0, sizeof(prpsinfo
));
384 prpsinfo
.pr_sname
= 'R';
385 strcpy(prpsinfo
.pr_fname
, "vmlinux");
386 return nt_init(ptr
, NT_PRPSINFO
, &prpsinfo
, sizeof(prpsinfo
));
390 * Get vmcoreinfo using lowcore->vmcore_info (new kernel)
392 static void *get_vmcoreinfo_old(unsigned long *size
)
394 char nt_name
[11], *vmcoreinfo
;
398 if (copy_oldmem_kernel(&addr
, &S390_lowcore
.vmcore_info
, sizeof(addr
)))
400 memset(nt_name
, 0, sizeof(nt_name
));
401 if (copy_oldmem_kernel(¬e
, addr
, sizeof(note
)))
403 if (copy_oldmem_kernel(nt_name
, addr
+ sizeof(note
),
404 sizeof(nt_name
) - 1))
406 if (strcmp(nt_name
, "VMCOREINFO") != 0)
408 vmcoreinfo
= kzalloc_panic(note
.n_descsz
);
409 if (copy_oldmem_kernel(vmcoreinfo
, addr
+ 24, note
.n_descsz
))
411 *size
= note
.n_descsz
;
416 * Initialize vmcoreinfo note (new kernel)
418 static void *nt_vmcoreinfo(void *ptr
)
423 vmcoreinfo
= os_info_old_entry(OS_INFO_VMCOREINFO
, &size
);
425 vmcoreinfo
= get_vmcoreinfo_old(&size
);
428 return nt_init_name(ptr
, 0, vmcoreinfo
, size
, "VMCOREINFO");
432 * Initialize final note (needed for /proc/vmcore code)
434 static void *nt_final(void *ptr
)
438 note
= (Elf64_Nhdr
*) ptr
;
442 return PTR_ADD(ptr
, sizeof(Elf64_Nhdr
));
446 * Initialize ELF header (new kernel)
448 static void *ehdr_init(Elf64_Ehdr
*ehdr
, int mem_chunk_cnt
)
450 memset(ehdr
, 0, sizeof(*ehdr
));
451 memcpy(ehdr
->e_ident
, ELFMAG
, SELFMAG
);
452 ehdr
->e_ident
[EI_CLASS
] = ELFCLASS64
;
453 ehdr
->e_ident
[EI_DATA
] = ELFDATA2MSB
;
454 ehdr
->e_ident
[EI_VERSION
] = EV_CURRENT
;
455 memset(ehdr
->e_ident
+ EI_PAD
, 0, EI_NIDENT
- EI_PAD
);
456 ehdr
->e_type
= ET_CORE
;
457 ehdr
->e_machine
= EM_S390
;
458 ehdr
->e_version
= EV_CURRENT
;
459 ehdr
->e_phoff
= sizeof(Elf64_Ehdr
);
460 ehdr
->e_ehsize
= sizeof(Elf64_Ehdr
);
461 ehdr
->e_phentsize
= sizeof(Elf64_Phdr
);
462 ehdr
->e_phnum
= mem_chunk_cnt
+ 1;
467 * Return CPU count for ELF header (new kernel)
469 static int get_cpu_cnt(void)
471 struct save_area
*sa
;
474 list_for_each_entry(sa
, &dump_save_areas
, list
)
481 * Return memory chunk count for ELF header (new kernel)
483 static int get_mem_chunk_cnt(void)
488 for_each_mem_range(idx
, &memblock
.physmem
, &oldmem_type
, NUMA_NO_NODE
,
489 MEMBLOCK_NONE
, NULL
, NULL
, NULL
)
495 * Initialize ELF loads (new kernel)
497 static void loads_init(Elf64_Phdr
*phdr
, u64 loads_offset
)
499 phys_addr_t start
, end
;
502 for_each_mem_range(idx
, &memblock
.physmem
, &oldmem_type
, NUMA_NO_NODE
,
503 MEMBLOCK_NONE
, &start
, &end
, NULL
) {
504 phdr
->p_filesz
= end
- start
;
505 phdr
->p_type
= PT_LOAD
;
506 phdr
->p_offset
= start
;
507 phdr
->p_vaddr
= start
;
508 phdr
->p_paddr
= start
;
509 phdr
->p_memsz
= end
- start
;
510 phdr
->p_flags
= PF_R
| PF_W
| PF_X
;
511 phdr
->p_align
= PAGE_SIZE
;
517 * Initialize notes (new kernel)
519 static void *notes_init(Elf64_Phdr
*phdr
, void *ptr
, u64 notes_offset
)
521 struct save_area
*sa
;
522 void *ptr_start
= ptr
;
525 ptr
= nt_prpsinfo(ptr
);
528 list_for_each_entry(sa
, &dump_save_areas
, list
)
530 ptr
= fill_cpu_elf_notes(ptr
, cpu
++, sa
);
531 ptr
= nt_vmcoreinfo(ptr
);
533 memset(phdr
, 0, sizeof(*phdr
));
534 phdr
->p_type
= PT_NOTE
;
535 phdr
->p_offset
= notes_offset
;
536 phdr
->p_filesz
= (unsigned long) PTR_SUB(ptr
, ptr_start
);
537 phdr
->p_memsz
= phdr
->p_filesz
;
542 * Create ELF core header (new kernel)
544 int elfcorehdr_alloc(unsigned long long *addr
, unsigned long long *size
)
546 Elf64_Phdr
*phdr_notes
, *phdr_loads
;
552 /* If we are not in kdump or zfcpdump mode return */
553 if (!OLDMEM_BASE
&& ipl_info
.type
!= IPL_TYPE_FCP_DUMP
)
555 /* If we cannot get HSA size for zfcpdump return error */
556 if (ipl_info
.type
== IPL_TYPE_FCP_DUMP
&& !sclp
.hsa_size
)
559 /* For kdump, exclude previous crashkernel memory */
561 oldmem_region
.base
= OLDMEM_BASE
;
562 oldmem_region
.size
= OLDMEM_SIZE
;
563 oldmem_type
.total_size
= OLDMEM_SIZE
;
566 mem_chunk_cnt
= get_mem_chunk_cnt();
568 alloc_size
= 0x1000 + get_cpu_cnt() * 0x4a0 +
569 mem_chunk_cnt
* sizeof(Elf64_Phdr
);
570 hdr
= kzalloc_panic(alloc_size
);
571 /* Init elf header */
572 ptr
= ehdr_init(hdr
, mem_chunk_cnt
);
573 /* Init program headers */
575 ptr
= PTR_ADD(ptr
, sizeof(Elf64_Phdr
));
577 ptr
= PTR_ADD(ptr
, sizeof(Elf64_Phdr
) * mem_chunk_cnt
);
579 hdr_off
= PTR_DIFF(ptr
, hdr
);
580 ptr
= notes_init(phdr_notes
, ptr
, ((unsigned long) hdr
) + hdr_off
);
582 hdr_off
= PTR_DIFF(ptr
, hdr
);
583 loads_init(phdr_loads
, hdr_off
);
584 *addr
= (unsigned long long) hdr
;
585 *size
= (unsigned long long) hdr_off
;
586 BUG_ON(elfcorehdr_size
> alloc_size
);
591 * Free ELF core header (new kernel)
593 void elfcorehdr_free(unsigned long long addr
)
595 kfree((void *)(unsigned long)addr
);
599 * Read from ELF header
601 ssize_t
elfcorehdr_read(char *buf
, size_t count
, u64
*ppos
)
603 void *src
= (void *)(unsigned long)*ppos
;
605 memcpy(buf
, src
, count
);
611 * Read from ELF notes data
613 ssize_t
elfcorehdr_read_notes(char *buf
, size_t count
, u64
*ppos
)
615 void *src
= (void *)(unsigned long)*ppos
;
617 memcpy(buf
, src
, count
);