Use dentry_path() to create full path to inode object
[pohmelfs.git] / drivers / s390 / char / zcore.c
blob1b6d9247fdc78a4237d5e7048347a0a66875d140
1 /*
2 * zcore module to export memory content and register sets for creating system
3 * dumps on SCSI disks (zfcpdump). The "zcore/mem" debugfs file shows the same
4 * dump format as s390 standalone dumps.
6 * For more information please refer to Documentation/s390/zfcpdump.txt
8 * Copyright IBM Corp. 2003,2008
9 * Author(s): Michael Holzheu
12 #define KMSG_COMPONENT "zdump"
13 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
15 #include <linux/init.h>
16 #include <linux/slab.h>
17 #include <linux/miscdevice.h>
18 #include <linux/debugfs.h>
19 #include <linux/module.h>
20 #include <asm/asm-offsets.h>
21 #include <asm/ipl.h>
22 #include <asm/sclp.h>
23 #include <asm/setup.h>
24 #include <asm/sigp.h>
25 #include <asm/uaccess.h>
26 #include <asm/debug.h>
27 #include <asm/processor.h>
28 #include <asm/irqflags.h>
29 #include <asm/checksum.h>
30 #include "sclp.h"
32 #define TRACE(x...) debug_sprintf_event(zcore_dbf, 1, x)
34 #define TO_USER 0
35 #define TO_KERNEL 1
36 #define CHUNK_INFO_SIZE 34 /* 2 16-byte char, each followed by blank */
38 enum arch_id {
39 ARCH_S390 = 0,
40 ARCH_S390X = 1,
43 /* dump system info */
45 struct sys_info {
46 enum arch_id arch;
47 unsigned long sa_base;
48 u32 sa_size;
49 int cpu_map[NR_CPUS];
50 unsigned long mem_size;
51 struct save_area lc_mask;
54 struct ipib_info {
55 unsigned long ipib;
56 u32 checksum;
57 } __attribute__((packed));
59 static struct sys_info sys_info;
60 static struct debug_info *zcore_dbf;
61 static int hsa_available;
62 static struct dentry *zcore_dir;
63 static struct dentry *zcore_file;
64 static struct dentry *zcore_memmap_file;
65 static struct dentry *zcore_reipl_file;
66 static struct ipl_parameter_block *ipl_block;
69 * Copy memory from HSA to kernel or user memory (not reentrant):
71 * @dest: Kernel or user buffer where memory should be copied to
72 * @src: Start address within HSA where data should be copied
73 * @count: Size of buffer, which should be copied
74 * @mode: Either TO_KERNEL or TO_USER
76 static int memcpy_hsa(void *dest, unsigned long src, size_t count, int mode)
78 int offs, blk_num;
79 static char buf[PAGE_SIZE] __attribute__((__aligned__(PAGE_SIZE)));
81 if (count == 0)
82 return 0;
84 /* copy first block */
85 offs = 0;
86 if ((src % PAGE_SIZE) != 0) {
87 blk_num = src / PAGE_SIZE + 2;
88 if (sclp_sdias_copy(buf, blk_num, 1)) {
89 TRACE("sclp_sdias_copy() failed\n");
90 return -EIO;
92 offs = min((PAGE_SIZE - (src % PAGE_SIZE)), count);
93 if (mode == TO_USER) {
94 if (copy_to_user((__force __user void*) dest,
95 buf + (src % PAGE_SIZE), offs))
96 return -EFAULT;
97 } else
98 memcpy(dest, buf + (src % PAGE_SIZE), offs);
100 if (offs == count)
101 goto out;
103 /* copy middle */
104 for (; (offs + PAGE_SIZE) <= count; offs += PAGE_SIZE) {
105 blk_num = (src + offs) / PAGE_SIZE + 2;
106 if (sclp_sdias_copy(buf, blk_num, 1)) {
107 TRACE("sclp_sdias_copy() failed\n");
108 return -EIO;
110 if (mode == TO_USER) {
111 if (copy_to_user((__force __user void*) dest + offs,
112 buf, PAGE_SIZE))
113 return -EFAULT;
114 } else
115 memcpy(dest + offs, buf, PAGE_SIZE);
117 if (offs == count)
118 goto out;
120 /* copy last block */
121 blk_num = (src + offs) / PAGE_SIZE + 2;
122 if (sclp_sdias_copy(buf, blk_num, 1)) {
123 TRACE("sclp_sdias_copy() failed\n");
124 return -EIO;
126 if (mode == TO_USER) {
127 if (copy_to_user((__force __user void*) dest + offs, buf,
128 PAGE_SIZE))
129 return -EFAULT;
130 } else
131 memcpy(dest + offs, buf, count - offs);
132 out:
133 return 0;
136 static int memcpy_hsa_user(void __user *dest, unsigned long src, size_t count)
138 return memcpy_hsa((void __force *) dest, src, count, TO_USER);
141 static int memcpy_hsa_kernel(void *dest, unsigned long src, size_t count)
143 return memcpy_hsa(dest, src, count, TO_KERNEL);
146 static int __init init_cpu_info(enum arch_id arch)
148 struct save_area *sa;
150 /* get info for boot cpu from lowcore, stored in the HSA */
152 sa = kmalloc(sizeof(*sa), GFP_KERNEL);
153 if (!sa)
154 return -ENOMEM;
155 if (memcpy_hsa_kernel(sa, sys_info.sa_base, sys_info.sa_size) < 0) {
156 TRACE("could not copy from HSA\n");
157 kfree(sa);
158 return -EIO;
160 zfcpdump_save_areas[0] = sa;
161 return 0;
164 static DEFINE_MUTEX(zcore_mutex);
166 #define DUMP_VERSION 0x5
167 #define DUMP_MAGIC 0xa8190173618f23fdULL
168 #define DUMP_ARCH_S390X 2
169 #define DUMP_ARCH_S390 1
170 #define HEADER_SIZE 4096
172 /* dump header dumped according to s390 crash dump format */
174 struct zcore_header {
175 u64 magic;
176 u32 version;
177 u32 header_size;
178 u32 dump_level;
179 u32 page_size;
180 u64 mem_size;
181 u64 mem_start;
182 u64 mem_end;
183 u32 num_pages;
184 u32 pad1;
185 u64 tod;
186 struct cpuid cpu_id;
187 u32 arch_id;
188 u32 volnr;
189 u32 build_arch;
190 u64 rmem_size;
191 u8 mvdump;
192 u16 cpu_cnt;
193 u16 real_cpu_cnt;
194 u8 end_pad1[0x200-0x061];
195 u64 mvdump_sign;
196 u64 mvdump_zipl_time;
197 u8 end_pad2[0x800-0x210];
198 u32 lc_vec[512];
199 } __attribute__((packed,__aligned__(16)));
201 static struct zcore_header zcore_header = {
202 .magic = DUMP_MAGIC,
203 .version = DUMP_VERSION,
204 .header_size = 4096,
205 .dump_level = 0,
206 .page_size = PAGE_SIZE,
207 .mem_start = 0,
208 #ifdef CONFIG_64BIT
209 .build_arch = DUMP_ARCH_S390X,
210 #else
211 .build_arch = DUMP_ARCH_S390,
212 #endif
216 * Copy lowcore info to buffer. Use map in order to copy only register parts.
218 * @buf: User buffer
219 * @sa: Pointer to save area
220 * @sa_off: Offset in save area to copy
221 * @len: Number of bytes to copy
223 static int copy_lc(void __user *buf, void *sa, int sa_off, int len)
225 int i;
226 char *lc_mask = (char*)&sys_info.lc_mask;
228 for (i = 0; i < len; i++) {
229 if (!lc_mask[i + sa_off])
230 continue;
231 if (copy_to_user(buf + i, sa + sa_off + i, 1))
232 return -EFAULT;
234 return 0;
238 * Copy lowcores info to memory, if necessary
240 * @buf: User buffer
241 * @addr: Start address of buffer in dump memory
242 * @count: Size of buffer
244 static int zcore_add_lc(char __user *buf, unsigned long start, size_t count)
246 unsigned long end;
247 int i = 0;
249 if (count == 0)
250 return 0;
252 end = start + count;
253 while (zfcpdump_save_areas[i]) {
254 unsigned long cp_start, cp_end; /* copy range */
255 unsigned long sa_start, sa_end; /* save area range */
256 unsigned long prefix;
257 unsigned long sa_off, len, buf_off;
259 prefix = zfcpdump_save_areas[i]->pref_reg;
260 sa_start = prefix + sys_info.sa_base;
261 sa_end = prefix + sys_info.sa_base + sys_info.sa_size;
263 if ((end < sa_start) || (start > sa_end))
264 goto next;
265 cp_start = max(start, sa_start);
266 cp_end = min(end, sa_end);
268 buf_off = cp_start - start;
269 sa_off = cp_start - sa_start;
270 len = cp_end - cp_start;
272 TRACE("copy_lc for: %lx\n", start);
273 if (copy_lc(buf + buf_off, zfcpdump_save_areas[i], sa_off, len))
274 return -EFAULT;
275 next:
276 i++;
278 return 0;
282 * Read routine for zcore character device
283 * First 4K are dump header
284 * Next 32MB are HSA Memory
285 * Rest is read from absolute Memory
287 static ssize_t zcore_read(struct file *file, char __user *buf, size_t count,
288 loff_t *ppos)
290 unsigned long mem_start; /* Start address in memory */
291 size_t mem_offs; /* Offset in dump memory */
292 size_t hdr_count; /* Size of header part of output buffer */
293 size_t size;
294 int rc;
296 mutex_lock(&zcore_mutex);
298 if (*ppos > (sys_info.mem_size + HEADER_SIZE)) {
299 rc = -EINVAL;
300 goto fail;
303 count = min(count, (size_t) (sys_info.mem_size + HEADER_SIZE - *ppos));
305 /* Copy dump header */
306 if (*ppos < HEADER_SIZE) {
307 size = min(count, (size_t) (HEADER_SIZE - *ppos));
308 if (copy_to_user(buf, &zcore_header + *ppos, size)) {
309 rc = -EFAULT;
310 goto fail;
312 hdr_count = size;
313 mem_start = 0;
314 } else {
315 hdr_count = 0;
316 mem_start = *ppos - HEADER_SIZE;
319 mem_offs = 0;
321 /* Copy from HSA data */
322 if (*ppos < (ZFCPDUMP_HSA_SIZE + HEADER_SIZE)) {
323 size = min((count - hdr_count), (size_t) (ZFCPDUMP_HSA_SIZE
324 - mem_start));
325 rc = memcpy_hsa_user(buf + hdr_count, mem_start, size);
326 if (rc)
327 goto fail;
329 mem_offs += size;
332 /* Copy from real mem */
333 size = count - mem_offs - hdr_count;
334 rc = copy_to_user_real(buf + hdr_count + mem_offs,
335 (void *) mem_start + mem_offs, size);
336 if (rc)
337 goto fail;
340 * Since s390 dump analysis tools like lcrash or crash
341 * expect register sets in the prefix pages of the cpus,
342 * we copy them into the read buffer, if necessary.
343 * buf + hdr_count: Start of memory part of output buffer
344 * mem_start: Start memory address to copy from
345 * count - hdr_count: Size of memory area to copy
347 if (zcore_add_lc(buf + hdr_count, mem_start, count - hdr_count)) {
348 rc = -EFAULT;
349 goto fail;
351 *ppos += count;
352 fail:
353 mutex_unlock(&zcore_mutex);
354 return (rc < 0) ? rc : count;
357 static int zcore_open(struct inode *inode, struct file *filp)
359 if (!hsa_available)
360 return -ENODATA;
361 else
362 return capable(CAP_SYS_RAWIO) ? 0 : -EPERM;
365 static int zcore_release(struct inode *inode, struct file *filep)
367 diag308(DIAG308_REL_HSA, NULL);
368 hsa_available = 0;
369 return 0;
372 static loff_t zcore_lseek(struct file *file, loff_t offset, int orig)
374 loff_t rc;
376 mutex_lock(&zcore_mutex);
377 switch (orig) {
378 case 0:
379 file->f_pos = offset;
380 rc = file->f_pos;
381 break;
382 case 1:
383 file->f_pos += offset;
384 rc = file->f_pos;
385 break;
386 default:
387 rc = -EINVAL;
389 mutex_unlock(&zcore_mutex);
390 return rc;
393 static const struct file_operations zcore_fops = {
394 .owner = THIS_MODULE,
395 .llseek = zcore_lseek,
396 .read = zcore_read,
397 .open = zcore_open,
398 .release = zcore_release,
401 static ssize_t zcore_memmap_read(struct file *filp, char __user *buf,
402 size_t count, loff_t *ppos)
404 return simple_read_from_buffer(buf, count, ppos, filp->private_data,
405 MEMORY_CHUNKS * CHUNK_INFO_SIZE);
408 static int zcore_memmap_open(struct inode *inode, struct file *filp)
410 int i;
411 char *buf;
412 struct mem_chunk *chunk_array;
414 chunk_array = kzalloc(MEMORY_CHUNKS * sizeof(struct mem_chunk),
415 GFP_KERNEL);
416 if (!chunk_array)
417 return -ENOMEM;
418 detect_memory_layout(chunk_array);
419 buf = kzalloc(MEMORY_CHUNKS * CHUNK_INFO_SIZE, GFP_KERNEL);
420 if (!buf) {
421 kfree(chunk_array);
422 return -ENOMEM;
424 for (i = 0; i < MEMORY_CHUNKS; i++) {
425 sprintf(buf + (i * CHUNK_INFO_SIZE), "%016llx %016llx ",
426 (unsigned long long) chunk_array[i].addr,
427 (unsigned long long) chunk_array[i].size);
428 if (chunk_array[i].size == 0)
429 break;
431 kfree(chunk_array);
432 filp->private_data = buf;
433 return nonseekable_open(inode, filp);
436 static int zcore_memmap_release(struct inode *inode, struct file *filp)
438 kfree(filp->private_data);
439 return 0;
442 static const struct file_operations zcore_memmap_fops = {
443 .owner = THIS_MODULE,
444 .read = zcore_memmap_read,
445 .open = zcore_memmap_open,
446 .release = zcore_memmap_release,
447 .llseek = no_llseek,
450 static ssize_t zcore_reipl_write(struct file *filp, const char __user *buf,
451 size_t count, loff_t *ppos)
453 if (ipl_block) {
454 diag308(DIAG308_SET, ipl_block);
455 diag308(DIAG308_IPL, NULL);
457 return count;
460 static int zcore_reipl_open(struct inode *inode, struct file *filp)
462 return nonseekable_open(inode, filp);
465 static int zcore_reipl_release(struct inode *inode, struct file *filp)
467 return 0;
470 static const struct file_operations zcore_reipl_fops = {
471 .owner = THIS_MODULE,
472 .write = zcore_reipl_write,
473 .open = zcore_reipl_open,
474 .release = zcore_reipl_release,
475 .llseek = no_llseek,
478 #ifdef CONFIG_32BIT
480 static void __init set_lc_mask(struct save_area *map)
482 memset(&map->ext_save, 0xff, sizeof(map->ext_save));
483 memset(&map->timer, 0xff, sizeof(map->timer));
484 memset(&map->clk_cmp, 0xff, sizeof(map->clk_cmp));
485 memset(&map->psw, 0xff, sizeof(map->psw));
486 memset(&map->pref_reg, 0xff, sizeof(map->pref_reg));
487 memset(&map->acc_regs, 0xff, sizeof(map->acc_regs));
488 memset(&map->fp_regs, 0xff, sizeof(map->fp_regs));
489 memset(&map->gp_regs, 0xff, sizeof(map->gp_regs));
490 memset(&map->ctrl_regs, 0xff, sizeof(map->ctrl_regs));
493 #else /* CONFIG_32BIT */
495 static void __init set_lc_mask(struct save_area *map)
497 memset(&map->fp_regs, 0xff, sizeof(map->fp_regs));
498 memset(&map->gp_regs, 0xff, sizeof(map->gp_regs));
499 memset(&map->psw, 0xff, sizeof(map->psw));
500 memset(&map->pref_reg, 0xff, sizeof(map->pref_reg));
501 memset(&map->fp_ctrl_reg, 0xff, sizeof(map->fp_ctrl_reg));
502 memset(&map->tod_reg, 0xff, sizeof(map->tod_reg));
503 memset(&map->timer, 0xff, sizeof(map->timer));
504 memset(&map->clk_cmp, 0xff, sizeof(map->clk_cmp));
505 memset(&map->acc_regs, 0xff, sizeof(map->acc_regs));
506 memset(&map->ctrl_regs, 0xff, sizeof(map->ctrl_regs));
509 #endif /* CONFIG_32BIT */
512 * Initialize dump globals for a given architecture
514 static int __init sys_info_init(enum arch_id arch)
516 int rc;
518 switch (arch) {
519 case ARCH_S390X:
520 pr_alert("DETECTED 'S390X (64 bit) OS'\n");
521 break;
522 case ARCH_S390:
523 pr_alert("DETECTED 'S390 (32 bit) OS'\n");
524 break;
525 default:
526 pr_alert("0x%x is an unknown architecture.\n",arch);
527 return -EINVAL;
529 sys_info.sa_base = SAVE_AREA_BASE;
530 sys_info.sa_size = sizeof(struct save_area);
531 sys_info.arch = arch;
532 set_lc_mask(&sys_info.lc_mask);
533 rc = init_cpu_info(arch);
534 if (rc)
535 return rc;
536 sys_info.mem_size = real_memory_size;
538 return 0;
541 static int __init check_sdias(void)
543 int rc, act_hsa_size;
545 rc = sclp_sdias_blk_count();
546 if (rc < 0) {
547 TRACE("Could not determine HSA size\n");
548 return rc;
550 act_hsa_size = (rc - 1) * PAGE_SIZE;
551 if (act_hsa_size < ZFCPDUMP_HSA_SIZE) {
552 TRACE("HSA size too small: %i\n", act_hsa_size);
553 return -EINVAL;
555 return 0;
558 static int __init get_mem_size(unsigned long *mem)
560 int i;
561 struct mem_chunk *chunk_array;
563 chunk_array = kzalloc(MEMORY_CHUNKS * sizeof(struct mem_chunk),
564 GFP_KERNEL);
565 if (!chunk_array)
566 return -ENOMEM;
567 detect_memory_layout(chunk_array);
568 for (i = 0; i < MEMORY_CHUNKS; i++) {
569 if (chunk_array[i].size == 0)
570 break;
571 *mem += chunk_array[i].size;
573 kfree(chunk_array);
574 return 0;
577 static int __init zcore_header_init(int arch, struct zcore_header *hdr)
579 int rc, i;
580 unsigned long memory = 0;
581 u32 prefix;
583 if (arch == ARCH_S390X)
584 hdr->arch_id = DUMP_ARCH_S390X;
585 else
586 hdr->arch_id = DUMP_ARCH_S390;
587 rc = get_mem_size(&memory);
588 if (rc)
589 return rc;
590 hdr->mem_size = memory;
591 hdr->rmem_size = memory;
592 hdr->mem_end = sys_info.mem_size;
593 hdr->num_pages = memory / PAGE_SIZE;
594 hdr->tod = get_clock();
595 get_cpu_id(&hdr->cpu_id);
596 for (i = 0; zfcpdump_save_areas[i]; i++) {
597 prefix = zfcpdump_save_areas[i]->pref_reg;
598 hdr->real_cpu_cnt++;
599 if (!prefix)
600 continue;
601 hdr->lc_vec[hdr->cpu_cnt] = prefix;
602 hdr->cpu_cnt++;
604 return 0;
608 * Provide IPL parameter information block from either HSA or memory
609 * for future reipl
611 static int __init zcore_reipl_init(void)
613 struct ipib_info ipib_info;
614 int rc;
616 rc = memcpy_hsa_kernel(&ipib_info, __LC_DUMP_REIPL, sizeof(ipib_info));
617 if (rc)
618 return rc;
619 if (ipib_info.ipib == 0)
620 return 0;
621 ipl_block = (void *) __get_free_page(GFP_KERNEL);
622 if (!ipl_block)
623 return -ENOMEM;
624 if (ipib_info.ipib < ZFCPDUMP_HSA_SIZE)
625 rc = memcpy_hsa_kernel(ipl_block, ipib_info.ipib, PAGE_SIZE);
626 else
627 rc = memcpy_real(ipl_block, (void *) ipib_info.ipib, PAGE_SIZE);
628 if (rc || csum_partial(ipl_block, ipl_block->hdr.len, 0) !=
629 ipib_info.checksum) {
630 TRACE("Checksum does not match\n");
631 free_page((unsigned long) ipl_block);
632 ipl_block = NULL;
634 return 0;
637 static int __init zcore_init(void)
639 unsigned char arch;
640 int rc;
642 if (ipl_info.type != IPL_TYPE_FCP_DUMP)
643 return -ENODATA;
644 if (OLDMEM_BASE)
645 return -ENODATA;
647 zcore_dbf = debug_register("zcore", 4, 1, 4 * sizeof(long));
648 debug_register_view(zcore_dbf, &debug_sprintf_view);
649 debug_set_level(zcore_dbf, 6);
651 TRACE("devno: %x\n", ipl_info.data.fcp.dev_id.devno);
652 TRACE("wwpn: %llx\n", (unsigned long long) ipl_info.data.fcp.wwpn);
653 TRACE("lun: %llx\n", (unsigned long long) ipl_info.data.fcp.lun);
655 rc = sclp_sdias_init();
656 if (rc)
657 goto fail;
659 rc = check_sdias();
660 if (rc)
661 goto fail;
663 rc = memcpy_hsa_kernel(&arch, __LC_AR_MODE_ID, 1);
664 if (rc)
665 goto fail;
667 #ifdef CONFIG_64BIT
668 if (arch == ARCH_S390) {
669 pr_alert("The 64-bit dump tool cannot be used for a "
670 "32-bit system\n");
671 rc = -EINVAL;
672 goto fail;
674 #else /* CONFIG_64BIT */
675 if (arch == ARCH_S390X) {
676 pr_alert("The 32-bit dump tool cannot be used for a "
677 "64-bit system\n");
678 rc = -EINVAL;
679 goto fail;
681 #endif /* CONFIG_64BIT */
683 rc = sys_info_init(arch);
684 if (rc)
685 goto fail;
687 rc = zcore_header_init(arch, &zcore_header);
688 if (rc)
689 goto fail;
691 rc = zcore_reipl_init();
692 if (rc)
693 goto fail;
695 zcore_dir = debugfs_create_dir("zcore" , NULL);
696 if (!zcore_dir) {
697 rc = -ENOMEM;
698 goto fail;
700 zcore_file = debugfs_create_file("mem", S_IRUSR, zcore_dir, NULL,
701 &zcore_fops);
702 if (!zcore_file) {
703 rc = -ENOMEM;
704 goto fail_dir;
706 zcore_memmap_file = debugfs_create_file("memmap", S_IRUSR, zcore_dir,
707 NULL, &zcore_memmap_fops);
708 if (!zcore_memmap_file) {
709 rc = -ENOMEM;
710 goto fail_file;
712 zcore_reipl_file = debugfs_create_file("reipl", S_IRUSR, zcore_dir,
713 NULL, &zcore_reipl_fops);
714 if (!zcore_reipl_file) {
715 rc = -ENOMEM;
716 goto fail_memmap_file;
718 hsa_available = 1;
719 return 0;
721 fail_memmap_file:
722 debugfs_remove(zcore_memmap_file);
723 fail_file:
724 debugfs_remove(zcore_file);
725 fail_dir:
726 debugfs_remove(zcore_dir);
727 fail:
728 diag308(DIAG308_REL_HSA, NULL);
729 return rc;
732 static void __exit zcore_exit(void)
734 debug_unregister(zcore_dbf);
735 sclp_sdias_exit();
736 free_page((unsigned long) ipl_block);
737 debugfs_remove(zcore_reipl_file);
738 debugfs_remove(zcore_memmap_file);
739 debugfs_remove(zcore_file);
740 debugfs_remove(zcore_dir);
741 diag308(DIAG308_REL_HSA, NULL);
744 MODULE_AUTHOR("Copyright IBM Corp. 2003,2008");
745 MODULE_DESCRIPTION("zcore module for zfcpdump support");
746 MODULE_LICENSE("GPL");
748 subsys_initcall(zcore_init);
749 module_exit(zcore_exit);