Linux 2.6.34-rc3
[pohmelfs.git] / drivers / s390 / char / zcore.c
blob3166d85914f289aa41e34c7b31fcb067e284dd5b
1 /*
2 * zcore module to export memory content and register sets for creating system
3 * dumps on SCSI disks (zfcpdump). The "zcore/mem" debugfs file shows the same
4 * dump format as s390 standalone dumps.
6 * For more information please refer to Documentation/s390/zfcpdump.txt
8 * Copyright IBM Corp. 2003,2008
9 * Author(s): Michael Holzheu
12 #define KMSG_COMPONENT "zdump"
13 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
15 #include <linux/init.h>
16 #include <linux/miscdevice.h>
17 #include <linux/debugfs.h>
18 #include <asm/asm-offsets.h>
19 #include <asm/ipl.h>
20 #include <asm/sclp.h>
21 #include <asm/setup.h>
22 #include <asm/sigp.h>
23 #include <asm/uaccess.h>
24 #include <asm/debug.h>
25 #include <asm/processor.h>
26 #include <asm/irqflags.h>
27 #include <asm/checksum.h>
28 #include "sclp.h"
30 #define TRACE(x...) debug_sprintf_event(zcore_dbf, 1, x)
32 #define TO_USER 0
33 #define TO_KERNEL 1
34 #define CHUNK_INFO_SIZE 34 /* 2 16-byte char, each followed by blank */
36 enum arch_id {
37 ARCH_S390 = 0,
38 ARCH_S390X = 1,
41 /* dump system info */
43 struct sys_info {
44 enum arch_id arch;
45 unsigned long sa_base;
46 u32 sa_size;
47 int cpu_map[NR_CPUS];
48 unsigned long mem_size;
49 struct save_area lc_mask;
52 struct ipib_info {
53 unsigned long ipib;
54 u32 checksum;
55 } __attribute__((packed));
57 static struct sys_info sys_info;
58 static struct debug_info *zcore_dbf;
59 static int hsa_available;
60 static struct dentry *zcore_dir;
61 static struct dentry *zcore_file;
62 static struct dentry *zcore_memmap_file;
63 static struct dentry *zcore_reipl_file;
64 static struct ipl_parameter_block *ipl_block;
67 * Copy memory from HSA to kernel or user memory (not reentrant):
69 * @dest: Kernel or user buffer where memory should be copied to
70 * @src: Start address within HSA where data should be copied
71 * @count: Size of buffer, which should be copied
72 * @mode: Either TO_KERNEL or TO_USER
74 static int memcpy_hsa(void *dest, unsigned long src, size_t count, int mode)
76 int offs, blk_num;
77 static char buf[PAGE_SIZE] __attribute__((__aligned__(PAGE_SIZE)));
79 if (count == 0)
80 return 0;
82 /* copy first block */
83 offs = 0;
84 if ((src % PAGE_SIZE) != 0) {
85 blk_num = src / PAGE_SIZE + 2;
86 if (sclp_sdias_copy(buf, blk_num, 1)) {
87 TRACE("sclp_sdias_copy() failed\n");
88 return -EIO;
90 offs = min((PAGE_SIZE - (src % PAGE_SIZE)), count);
91 if (mode == TO_USER) {
92 if (copy_to_user((__force __user void*) dest,
93 buf + (src % PAGE_SIZE), offs))
94 return -EFAULT;
95 } else
96 memcpy(dest, buf + (src % PAGE_SIZE), offs);
98 if (offs == count)
99 goto out;
101 /* copy middle */
102 for (; (offs + PAGE_SIZE) <= count; offs += PAGE_SIZE) {
103 blk_num = (src + offs) / PAGE_SIZE + 2;
104 if (sclp_sdias_copy(buf, blk_num, 1)) {
105 TRACE("sclp_sdias_copy() failed\n");
106 return -EIO;
108 if (mode == TO_USER) {
109 if (copy_to_user((__force __user void*) dest + offs,
110 buf, PAGE_SIZE))
111 return -EFAULT;
112 } else
113 memcpy(dest + offs, buf, PAGE_SIZE);
115 if (offs == count)
116 goto out;
118 /* copy last block */
119 blk_num = (src + offs) / PAGE_SIZE + 2;
120 if (sclp_sdias_copy(buf, blk_num, 1)) {
121 TRACE("sclp_sdias_copy() failed\n");
122 return -EIO;
124 if (mode == TO_USER) {
125 if (copy_to_user((__force __user void*) dest + offs, buf,
126 PAGE_SIZE))
127 return -EFAULT;
128 } else
129 memcpy(dest + offs, buf, count - offs);
130 out:
131 return 0;
134 static int memcpy_hsa_user(void __user *dest, unsigned long src, size_t count)
136 return memcpy_hsa((void __force *) dest, src, count, TO_USER);
139 static int memcpy_hsa_kernel(void *dest, unsigned long src, size_t count)
141 return memcpy_hsa(dest, src, count, TO_KERNEL);
144 static int memcpy_real_user(void __user *dest, unsigned long src, size_t count)
146 static char buf[4096];
147 int offs = 0, size;
149 while (offs < count) {
150 size = min(sizeof(buf), count - offs);
151 if (memcpy_real(buf, (void *) src + offs, size))
152 return -EFAULT;
153 if (copy_to_user(dest + offs, buf, size))
154 return -EFAULT;
155 offs += size;
157 return 0;
160 static int __init init_cpu_info(enum arch_id arch)
162 struct save_area *sa;
164 /* get info for boot cpu from lowcore, stored in the HSA */
166 sa = kmalloc(sizeof(*sa), GFP_KERNEL);
167 if (!sa)
168 return -ENOMEM;
169 if (memcpy_hsa_kernel(sa, sys_info.sa_base, sys_info.sa_size) < 0) {
170 TRACE("could not copy from HSA\n");
171 kfree(sa);
172 return -EIO;
174 zfcpdump_save_areas[0] = sa;
175 return 0;
178 static DEFINE_MUTEX(zcore_mutex);
180 #define DUMP_VERSION 0x5
181 #define DUMP_MAGIC 0xa8190173618f23fdULL
182 #define DUMP_ARCH_S390X 2
183 #define DUMP_ARCH_S390 1
184 #define HEADER_SIZE 4096
186 /* dump header dumped according to s390 crash dump format */
188 struct zcore_header {
189 u64 magic;
190 u32 version;
191 u32 header_size;
192 u32 dump_level;
193 u32 page_size;
194 u64 mem_size;
195 u64 mem_start;
196 u64 mem_end;
197 u32 num_pages;
198 u32 pad1;
199 u64 tod;
200 struct cpuid cpu_id;
201 u32 arch_id;
202 u32 volnr;
203 u32 build_arch;
204 u64 rmem_size;
205 u8 mvdump;
206 u16 cpu_cnt;
207 u16 real_cpu_cnt;
208 u8 end_pad1[0x200-0x061];
209 u64 mvdump_sign;
210 u64 mvdump_zipl_time;
211 u8 end_pad2[0x800-0x210];
212 u32 lc_vec[512];
213 } __attribute__((packed,__aligned__(16)));
215 static struct zcore_header zcore_header = {
216 .magic = DUMP_MAGIC,
217 .version = DUMP_VERSION,
218 .header_size = 4096,
219 .dump_level = 0,
220 .page_size = PAGE_SIZE,
221 .mem_start = 0,
222 #ifdef CONFIG_64BIT
223 .build_arch = DUMP_ARCH_S390X,
224 #else
225 .build_arch = DUMP_ARCH_S390,
226 #endif
230 * Copy lowcore info to buffer. Use map in order to copy only register parts.
232 * @buf: User buffer
233 * @sa: Pointer to save area
234 * @sa_off: Offset in save area to copy
235 * @len: Number of bytes to copy
237 static int copy_lc(void __user *buf, void *sa, int sa_off, int len)
239 int i;
240 char *lc_mask = (char*)&sys_info.lc_mask;
242 for (i = 0; i < len; i++) {
243 if (!lc_mask[i + sa_off])
244 continue;
245 if (copy_to_user(buf + i, sa + sa_off + i, 1))
246 return -EFAULT;
248 return 0;
252 * Copy lowcores info to memory, if necessary
254 * @buf: User buffer
255 * @addr: Start address of buffer in dump memory
256 * @count: Size of buffer
258 static int zcore_add_lc(char __user *buf, unsigned long start, size_t count)
260 unsigned long end;
261 int i = 0;
263 if (count == 0)
264 return 0;
266 end = start + count;
267 while (zfcpdump_save_areas[i]) {
268 unsigned long cp_start, cp_end; /* copy range */
269 unsigned long sa_start, sa_end; /* save area range */
270 unsigned long prefix;
271 unsigned long sa_off, len, buf_off;
273 prefix = zfcpdump_save_areas[i]->pref_reg;
274 sa_start = prefix + sys_info.sa_base;
275 sa_end = prefix + sys_info.sa_base + sys_info.sa_size;
277 if ((end < sa_start) || (start > sa_end))
278 goto next;
279 cp_start = max(start, sa_start);
280 cp_end = min(end, sa_end);
282 buf_off = cp_start - start;
283 sa_off = cp_start - sa_start;
284 len = cp_end - cp_start;
286 TRACE("copy_lc for: %lx\n", start);
287 if (copy_lc(buf + buf_off, zfcpdump_save_areas[i], sa_off, len))
288 return -EFAULT;
289 next:
290 i++;
292 return 0;
296 * Read routine for zcore character device
297 * First 4K are dump header
298 * Next 32MB are HSA Memory
299 * Rest is read from absolute Memory
301 static ssize_t zcore_read(struct file *file, char __user *buf, size_t count,
302 loff_t *ppos)
304 unsigned long mem_start; /* Start address in memory */
305 size_t mem_offs; /* Offset in dump memory */
306 size_t hdr_count; /* Size of header part of output buffer */
307 size_t size;
308 int rc;
310 mutex_lock(&zcore_mutex);
312 if (*ppos > (sys_info.mem_size + HEADER_SIZE)) {
313 rc = -EINVAL;
314 goto fail;
317 count = min(count, (size_t) (sys_info.mem_size + HEADER_SIZE - *ppos));
319 /* Copy dump header */
320 if (*ppos < HEADER_SIZE) {
321 size = min(count, (size_t) (HEADER_SIZE - *ppos));
322 if (copy_to_user(buf, &zcore_header + *ppos, size)) {
323 rc = -EFAULT;
324 goto fail;
326 hdr_count = size;
327 mem_start = 0;
328 } else {
329 hdr_count = 0;
330 mem_start = *ppos - HEADER_SIZE;
333 mem_offs = 0;
335 /* Copy from HSA data */
336 if (*ppos < (ZFCPDUMP_HSA_SIZE + HEADER_SIZE)) {
337 size = min((count - hdr_count), (size_t) (ZFCPDUMP_HSA_SIZE
338 - mem_start));
339 rc = memcpy_hsa_user(buf + hdr_count, mem_start, size);
340 if (rc)
341 goto fail;
343 mem_offs += size;
346 /* Copy from real mem */
347 size = count - mem_offs - hdr_count;
348 rc = memcpy_real_user(buf + hdr_count + mem_offs, mem_start + mem_offs,
349 size);
350 if (rc)
351 goto fail;
354 * Since s390 dump analysis tools like lcrash or crash
355 * expect register sets in the prefix pages of the cpus,
356 * we copy them into the read buffer, if necessary.
357 * buf + hdr_count: Start of memory part of output buffer
358 * mem_start: Start memory address to copy from
359 * count - hdr_count: Size of memory area to copy
361 if (zcore_add_lc(buf + hdr_count, mem_start, count - hdr_count)) {
362 rc = -EFAULT;
363 goto fail;
365 *ppos += count;
366 fail:
367 mutex_unlock(&zcore_mutex);
368 return (rc < 0) ? rc : count;
371 static int zcore_open(struct inode *inode, struct file *filp)
373 if (!hsa_available)
374 return -ENODATA;
375 else
376 return capable(CAP_SYS_RAWIO) ? 0 : -EPERM;
379 static int zcore_release(struct inode *inode, struct file *filep)
381 diag308(DIAG308_REL_HSA, NULL);
382 hsa_available = 0;
383 return 0;
386 static loff_t zcore_lseek(struct file *file, loff_t offset, int orig)
388 loff_t rc;
390 mutex_lock(&zcore_mutex);
391 switch (orig) {
392 case 0:
393 file->f_pos = offset;
394 rc = file->f_pos;
395 break;
396 case 1:
397 file->f_pos += offset;
398 rc = file->f_pos;
399 break;
400 default:
401 rc = -EINVAL;
403 mutex_unlock(&zcore_mutex);
404 return rc;
407 static const struct file_operations zcore_fops = {
408 .owner = THIS_MODULE,
409 .llseek = zcore_lseek,
410 .read = zcore_read,
411 .open = zcore_open,
412 .release = zcore_release,
415 static ssize_t zcore_memmap_read(struct file *filp, char __user *buf,
416 size_t count, loff_t *ppos)
418 return simple_read_from_buffer(buf, count, ppos, filp->private_data,
419 MEMORY_CHUNKS * CHUNK_INFO_SIZE);
422 static int zcore_memmap_open(struct inode *inode, struct file *filp)
424 int i;
425 char *buf;
426 struct mem_chunk *chunk_array;
428 chunk_array = kzalloc(MEMORY_CHUNKS * sizeof(struct mem_chunk),
429 GFP_KERNEL);
430 if (!chunk_array)
431 return -ENOMEM;
432 detect_memory_layout(chunk_array);
433 buf = kzalloc(MEMORY_CHUNKS * CHUNK_INFO_SIZE, GFP_KERNEL);
434 if (!buf) {
435 kfree(chunk_array);
436 return -ENOMEM;
438 for (i = 0; i < MEMORY_CHUNKS; i++) {
439 sprintf(buf + (i * CHUNK_INFO_SIZE), "%016llx %016llx ",
440 (unsigned long long) chunk_array[i].addr,
441 (unsigned long long) chunk_array[i].size);
442 if (chunk_array[i].size == 0)
443 break;
445 kfree(chunk_array);
446 filp->private_data = buf;
447 return 0;
450 static int zcore_memmap_release(struct inode *inode, struct file *filp)
452 kfree(filp->private_data);
453 return 0;
456 static const struct file_operations zcore_memmap_fops = {
457 .owner = THIS_MODULE,
458 .read = zcore_memmap_read,
459 .open = zcore_memmap_open,
460 .release = zcore_memmap_release,
463 static ssize_t zcore_reipl_write(struct file *filp, const char __user *buf,
464 size_t count, loff_t *ppos)
466 if (ipl_block) {
467 diag308(DIAG308_SET, ipl_block);
468 diag308(DIAG308_IPL, NULL);
470 return count;
473 static int zcore_reipl_open(struct inode *inode, struct file *filp)
475 return 0;
478 static int zcore_reipl_release(struct inode *inode, struct file *filp)
480 return 0;
483 static const struct file_operations zcore_reipl_fops = {
484 .owner = THIS_MODULE,
485 .write = zcore_reipl_write,
486 .open = zcore_reipl_open,
487 .release = zcore_reipl_release,
490 #ifdef CONFIG_32BIT
492 static void __init set_lc_mask(struct save_area *map)
494 memset(&map->ext_save, 0xff, sizeof(map->ext_save));
495 memset(&map->timer, 0xff, sizeof(map->timer));
496 memset(&map->clk_cmp, 0xff, sizeof(map->clk_cmp));
497 memset(&map->psw, 0xff, sizeof(map->psw));
498 memset(&map->pref_reg, 0xff, sizeof(map->pref_reg));
499 memset(&map->acc_regs, 0xff, sizeof(map->acc_regs));
500 memset(&map->fp_regs, 0xff, sizeof(map->fp_regs));
501 memset(&map->gp_regs, 0xff, sizeof(map->gp_regs));
502 memset(&map->ctrl_regs, 0xff, sizeof(map->ctrl_regs));
505 #else /* CONFIG_32BIT */
507 static void __init set_lc_mask(struct save_area *map)
509 memset(&map->fp_regs, 0xff, sizeof(map->fp_regs));
510 memset(&map->gp_regs, 0xff, sizeof(map->gp_regs));
511 memset(&map->psw, 0xff, sizeof(map->psw));
512 memset(&map->pref_reg, 0xff, sizeof(map->pref_reg));
513 memset(&map->fp_ctrl_reg, 0xff, sizeof(map->fp_ctrl_reg));
514 memset(&map->tod_reg, 0xff, sizeof(map->tod_reg));
515 memset(&map->timer, 0xff, sizeof(map->timer));
516 memset(&map->clk_cmp, 0xff, sizeof(map->clk_cmp));
517 memset(&map->acc_regs, 0xff, sizeof(map->acc_regs));
518 memset(&map->ctrl_regs, 0xff, sizeof(map->ctrl_regs));
521 #endif /* CONFIG_32BIT */
524 * Initialize dump globals for a given architecture
526 static int __init sys_info_init(enum arch_id arch)
528 int rc;
530 switch (arch) {
531 case ARCH_S390X:
532 pr_alert("DETECTED 'S390X (64 bit) OS'\n");
533 break;
534 case ARCH_S390:
535 pr_alert("DETECTED 'S390 (32 bit) OS'\n");
536 break;
537 default:
538 pr_alert("0x%x is an unknown architecture.\n",arch);
539 return -EINVAL;
541 sys_info.sa_base = SAVE_AREA_BASE;
542 sys_info.sa_size = sizeof(struct save_area);
543 sys_info.arch = arch;
544 set_lc_mask(&sys_info.lc_mask);
545 rc = init_cpu_info(arch);
546 if (rc)
547 return rc;
548 sys_info.mem_size = real_memory_size;
550 return 0;
553 static int __init check_sdias(void)
555 int rc, act_hsa_size;
557 rc = sclp_sdias_blk_count();
558 if (rc < 0) {
559 TRACE("Could not determine HSA size\n");
560 return rc;
562 act_hsa_size = (rc - 1) * PAGE_SIZE;
563 if (act_hsa_size < ZFCPDUMP_HSA_SIZE) {
564 TRACE("HSA size too small: %i\n", act_hsa_size);
565 return -EINVAL;
567 return 0;
570 static int __init get_mem_size(unsigned long *mem)
572 int i;
573 struct mem_chunk *chunk_array;
575 chunk_array = kzalloc(MEMORY_CHUNKS * sizeof(struct mem_chunk),
576 GFP_KERNEL);
577 if (!chunk_array)
578 return -ENOMEM;
579 detect_memory_layout(chunk_array);
580 for (i = 0; i < MEMORY_CHUNKS; i++) {
581 if (chunk_array[i].size == 0)
582 break;
583 *mem += chunk_array[i].size;
585 kfree(chunk_array);
586 return 0;
589 static int __init zcore_header_init(int arch, struct zcore_header *hdr)
591 int rc, i;
592 unsigned long memory = 0;
593 u32 prefix;
595 if (arch == ARCH_S390X)
596 hdr->arch_id = DUMP_ARCH_S390X;
597 else
598 hdr->arch_id = DUMP_ARCH_S390;
599 rc = get_mem_size(&memory);
600 if (rc)
601 return rc;
602 hdr->mem_size = memory;
603 hdr->rmem_size = memory;
604 hdr->mem_end = sys_info.mem_size;
605 hdr->num_pages = memory / PAGE_SIZE;
606 hdr->tod = get_clock();
607 get_cpu_id(&hdr->cpu_id);
608 for (i = 0; zfcpdump_save_areas[i]; i++) {
609 prefix = zfcpdump_save_areas[i]->pref_reg;
610 hdr->real_cpu_cnt++;
611 if (!prefix)
612 continue;
613 hdr->lc_vec[hdr->cpu_cnt] = prefix;
614 hdr->cpu_cnt++;
616 return 0;
620 * Provide IPL parameter information block from either HSA or memory
621 * for future reipl
623 static int __init zcore_reipl_init(void)
625 struct ipib_info ipib_info;
626 int rc;
628 rc = memcpy_hsa_kernel(&ipib_info, __LC_DUMP_REIPL, sizeof(ipib_info));
629 if (rc)
630 return rc;
631 if (ipib_info.ipib == 0)
632 return 0;
633 ipl_block = (void *) __get_free_page(GFP_KERNEL);
634 if (!ipl_block)
635 return -ENOMEM;
636 if (ipib_info.ipib < ZFCPDUMP_HSA_SIZE)
637 rc = memcpy_hsa_kernel(ipl_block, ipib_info.ipib, PAGE_SIZE);
638 else
639 rc = memcpy_real(ipl_block, (void *) ipib_info.ipib, PAGE_SIZE);
640 if (rc) {
641 free_page((unsigned long) ipl_block);
642 return rc;
644 if (csum_partial(ipl_block, ipl_block->hdr.len, 0) !=
645 ipib_info.checksum) {
646 TRACE("Checksum does not match\n");
647 free_page((unsigned long) ipl_block);
648 ipl_block = NULL;
650 return 0;
653 static int __init zcore_init(void)
655 unsigned char arch;
656 int rc;
658 if (ipl_info.type != IPL_TYPE_FCP_DUMP)
659 return -ENODATA;
661 zcore_dbf = debug_register("zcore", 4, 1, 4 * sizeof(long));
662 debug_register_view(zcore_dbf, &debug_sprintf_view);
663 debug_set_level(zcore_dbf, 6);
665 TRACE("devno: %x\n", ipl_info.data.fcp.dev_id.devno);
666 TRACE("wwpn: %llx\n", (unsigned long long) ipl_info.data.fcp.wwpn);
667 TRACE("lun: %llx\n", (unsigned long long) ipl_info.data.fcp.lun);
669 rc = sclp_sdias_init();
670 if (rc)
671 goto fail;
673 rc = check_sdias();
674 if (rc)
675 goto fail;
677 rc = memcpy_hsa_kernel(&arch, __LC_AR_MODE_ID, 1);
678 if (rc)
679 goto fail;
681 #ifdef CONFIG_64BIT
682 if (arch == ARCH_S390) {
683 pr_alert("The 64-bit dump tool cannot be used for a "
684 "32-bit system\n");
685 rc = -EINVAL;
686 goto fail;
688 #else /* CONFIG_64BIT */
689 if (arch == ARCH_S390X) {
690 pr_alert("The 32-bit dump tool cannot be used for a "
691 "64-bit system\n");
692 rc = -EINVAL;
693 goto fail;
695 #endif /* CONFIG_64BIT */
697 rc = sys_info_init(arch);
698 if (rc)
699 goto fail;
701 rc = zcore_header_init(arch, &zcore_header);
702 if (rc)
703 goto fail;
705 rc = zcore_reipl_init();
706 if (rc)
707 goto fail;
709 zcore_dir = debugfs_create_dir("zcore" , NULL);
710 if (!zcore_dir) {
711 rc = -ENOMEM;
712 goto fail;
714 zcore_file = debugfs_create_file("mem", S_IRUSR, zcore_dir, NULL,
715 &zcore_fops);
716 if (!zcore_file) {
717 rc = -ENOMEM;
718 goto fail_dir;
720 zcore_memmap_file = debugfs_create_file("memmap", S_IRUSR, zcore_dir,
721 NULL, &zcore_memmap_fops);
722 if (!zcore_memmap_file) {
723 rc = -ENOMEM;
724 goto fail_file;
726 zcore_reipl_file = debugfs_create_file("reipl", S_IRUSR, zcore_dir,
727 NULL, &zcore_reipl_fops);
728 if (!zcore_reipl_file) {
729 rc = -ENOMEM;
730 goto fail_memmap_file;
732 hsa_available = 1;
733 return 0;
735 fail_memmap_file:
736 debugfs_remove(zcore_memmap_file);
737 fail_file:
738 debugfs_remove(zcore_file);
739 fail_dir:
740 debugfs_remove(zcore_dir);
741 fail:
742 diag308(DIAG308_REL_HSA, NULL);
743 return rc;
746 static void __exit zcore_exit(void)
748 debug_unregister(zcore_dbf);
749 sclp_sdias_exit();
750 free_page((unsigned long) ipl_block);
751 debugfs_remove(zcore_reipl_file);
752 debugfs_remove(zcore_memmap_file);
753 debugfs_remove(zcore_file);
754 debugfs_remove(zcore_dir);
755 diag308(DIAG308_REL_HSA, NULL);
758 MODULE_AUTHOR("Copyright IBM Corp. 2003,2008");
759 MODULE_DESCRIPTION("zcore module for zfcpdump support");
760 MODULE_LICENSE("GPL");
762 subsys_initcall(zcore_init);
763 module_exit(zcore_exit);