mm-only debug patch...
[mmotm.git] / drivers / char / mem.c
blob305c5f2c86908094054de0fded5bcd83ec72e4ba
1 /*
2 * linux/drivers/char/mem.c
4 * Copyright (C) 1991, 1992 Linus Torvalds
6 * Added devfs support.
7 * Jan-11-1998, C. Scott Ananian <cananian@alumni.princeton.edu>
8 * Shared /dev/zero mmaping support, Feb 2000, Kanoj Sarcar <kanoj@sgi.com>
9 */
11 #include <linux/mm.h>
12 #include <linux/miscdevice.h>
13 #include <linux/slab.h>
14 #include <linux/vmalloc.h>
15 #include <linux/mman.h>
16 #include <linux/random.h>
17 #include <linux/init.h>
18 #include <linux/raw.h>
19 #include <linux/tty.h>
20 #include <linux/capability.h>
21 #include <linux/ptrace.h>
22 #include <linux/device.h>
23 #include <linux/highmem.h>
24 #include <linux/crash_dump.h>
25 #include <linux/backing-dev.h>
26 #include <linux/bootmem.h>
27 #include <linux/splice.h>
28 #include <linux/pfn.h>
29 #include <linux/smp_lock.h>
31 #include <asm/uaccess.h>
32 #include <asm/io.h>
34 #ifdef CONFIG_IA64
35 # include <linux/efi.h>
36 #endif
38 static inline unsigned long size_inside_page(unsigned long start,
39 unsigned long size)
41 unsigned long sz;
43 sz = PAGE_SIZE - (start & (PAGE_SIZE - 1));
45 return min(sz, size);
49 * Architectures vary in how they handle caching for addresses
50 * outside of main memory.
53 static inline int uncached_access(struct file *file, unsigned long addr)
55 #if defined(CONFIG_IA64)
57 * On ia64, we ignore O_DSYNC because we cannot tolerate memory attribute aliases.
59 return !(efi_mem_attributes(addr) & EFI_MEMORY_WB);
60 #elif defined(CONFIG_MIPS)
62 extern int __uncached_access(struct file *file,
63 unsigned long addr);
65 return __uncached_access(file, addr);
67 #else
69 * Accessing memory above the top the kernel knows about or through a file pointer
70 * that was marked O_DSYNC will be done non-cached.
72 if (file->f_flags & O_DSYNC)
73 return 1;
74 return addr >= __pa(high_memory);
75 #endif
78 #ifndef ARCH_HAS_VALID_PHYS_ADDR_RANGE
79 static inline int valid_phys_addr_range(unsigned long addr, size_t count)
81 if (addr + count > __pa(high_memory))
82 return 0;
84 return 1;
87 static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t size)
89 return 1;
91 #endif
93 #ifdef CONFIG_STRICT_DEVMEM
94 static inline int range_is_allowed(unsigned long pfn, unsigned long size)
96 u64 from = ((u64)pfn) << PAGE_SHIFT;
97 u64 to = from + size;
98 u64 cursor = from;
100 while (cursor < to) {
101 if (!devmem_is_allowed(pfn)) {
102 printk(KERN_INFO
103 "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
104 current->comm, from, to);
105 return 0;
107 cursor += PAGE_SIZE;
108 pfn++;
110 return 1;
112 #else
113 static inline int range_is_allowed(unsigned long pfn, unsigned long size)
115 return 1;
117 #endif
119 void __attribute__((weak)) unxlate_dev_mem_ptr(unsigned long phys, void *addr)
124 * This funcion reads the *physical* memory. The f_pos points directly to the
125 * memory location.
127 static ssize_t read_mem(struct file * file, char __user * buf,
128 size_t count, loff_t *ppos)
130 unsigned long p = *ppos;
131 ssize_t read, sz;
132 char *ptr;
134 if (!valid_phys_addr_range(p, count))
135 return -EFAULT;
136 read = 0;
137 #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
138 /* we don't have page 0 mapped on sparc and m68k.. */
139 if (p < PAGE_SIZE) {
140 sz = size_inside_page(p, count);
141 if (sz > 0) {
142 if (clear_user(buf, sz))
143 return -EFAULT;
144 buf += sz;
145 p += sz;
146 count -= sz;
147 read += sz;
150 #endif
152 while (count > 0) {
153 unsigned long remaining;
155 sz = size_inside_page(p, count);
157 if (!range_is_allowed(p >> PAGE_SHIFT, count))
158 return -EPERM;
161 * On ia64 if a page has been mapped somewhere as
162 * uncached, then it must also be accessed uncached
163 * by the kernel or data corruption may occur
165 ptr = xlate_dev_mem_ptr(p);
166 if (!ptr)
167 return -EFAULT;
169 remaining = copy_to_user(buf, ptr, sz);
170 unxlate_dev_mem_ptr(p, ptr);
171 if (remaining)
172 return -EFAULT;
174 buf += sz;
175 p += sz;
176 count -= sz;
177 read += sz;
180 *ppos += read;
181 return read;
184 static ssize_t write_mem(struct file * file, const char __user * buf,
185 size_t count, loff_t *ppos)
187 unsigned long p = *ppos;
188 ssize_t written, sz;
189 unsigned long copied;
190 void *ptr;
192 if (!valid_phys_addr_range(p, count))
193 return -EFAULT;
195 written = 0;
197 #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
198 /* we don't have page 0 mapped on sparc and m68k.. */
199 if (p < PAGE_SIZE) {
200 sz = size_inside_page(p, count);
201 /* Hmm. Do something? */
202 buf += sz;
203 p += sz;
204 count -= sz;
205 written += sz;
207 #endif
209 while (count > 0) {
210 sz = size_inside_page(p, count);
212 if (!range_is_allowed(p >> PAGE_SHIFT, sz))
213 return -EPERM;
216 * On ia64 if a page has been mapped somewhere as
217 * uncached, then it must also be accessed uncached
218 * by the kernel or data corruption may occur
220 ptr = xlate_dev_mem_ptr(p);
221 if (!ptr) {
222 if (written)
223 break;
224 return -EFAULT;
227 copied = copy_from_user(ptr, buf, sz);
228 unxlate_dev_mem_ptr(p, ptr);
229 if (copied) {
230 written += sz - copied;
231 if (written)
232 break;
233 return -EFAULT;
236 buf += sz;
237 p += sz;
238 count -= sz;
239 written += sz;
242 *ppos += written;
243 return written;
246 int __attribute__((weak)) phys_mem_access_prot_allowed(struct file *file,
247 unsigned long pfn, unsigned long size, pgprot_t *vma_prot)
249 return 1;
252 #ifndef __HAVE_PHYS_MEM_ACCESS_PROT
253 static pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
254 unsigned long size, pgprot_t vma_prot)
256 #ifdef pgprot_noncached
257 unsigned long offset = pfn << PAGE_SHIFT;
259 if (uncached_access(file, offset))
260 return pgprot_noncached(vma_prot);
261 #endif
262 return vma_prot;
264 #endif
266 #ifndef CONFIG_MMU
267 static unsigned long get_unmapped_area_mem(struct file *file,
268 unsigned long addr,
269 unsigned long len,
270 unsigned long pgoff,
271 unsigned long flags)
273 if (!valid_mmap_phys_addr_range(pgoff, len))
274 return (unsigned long) -EINVAL;
275 return pgoff << PAGE_SHIFT;
278 /* can't do an in-place private mapping if there's no MMU */
279 static inline int private_mapping_ok(struct vm_area_struct *vma)
281 return vma->vm_flags & VM_MAYSHARE;
283 #else
284 #define get_unmapped_area_mem NULL
286 static inline int private_mapping_ok(struct vm_area_struct *vma)
288 return 1;
290 #endif
292 static const struct vm_operations_struct mmap_mem_ops = {
293 #ifdef CONFIG_HAVE_IOREMAP_PROT
294 .access = generic_access_phys
295 #endif
298 static int mmap_mem(struct file * file, struct vm_area_struct * vma)
300 size_t size = vma->vm_end - vma->vm_start;
302 if (!valid_mmap_phys_addr_range(vma->vm_pgoff, size))
303 return -EINVAL;
305 if (!private_mapping_ok(vma))
306 return -ENOSYS;
308 if (!range_is_allowed(vma->vm_pgoff, size))
309 return -EPERM;
311 if (!phys_mem_access_prot_allowed(file, vma->vm_pgoff, size,
312 &vma->vm_page_prot))
313 return -EINVAL;
315 vma->vm_page_prot = phys_mem_access_prot(file, vma->vm_pgoff,
316 size,
317 vma->vm_page_prot);
319 vma->vm_ops = &mmap_mem_ops;
321 /* Remap-pfn-range will mark the range VM_IO and VM_RESERVED */
322 if (remap_pfn_range(vma,
323 vma->vm_start,
324 vma->vm_pgoff,
325 size,
326 vma->vm_page_prot)) {
327 return -EAGAIN;
329 return 0;
332 #ifdef CONFIG_DEVKMEM
333 static int mmap_kmem(struct file * file, struct vm_area_struct * vma)
335 unsigned long pfn;
337 /* Turn a kernel-virtual address into a physical page frame */
338 pfn = __pa((u64)vma->vm_pgoff << PAGE_SHIFT) >> PAGE_SHIFT;
341 * RED-PEN: on some architectures there is more mapped memory
342 * than available in mem_map which pfn_valid checks
343 * for. Perhaps should add a new macro here.
345 * RED-PEN: vmalloc is not supported right now.
347 if (!pfn_valid(pfn))
348 return -EIO;
350 vma->vm_pgoff = pfn;
351 return mmap_mem(file, vma);
353 #endif
355 #ifdef CONFIG_CRASH_DUMP
357 * Read memory corresponding to the old kernel.
359 static ssize_t read_oldmem(struct file *file, char __user *buf,
360 size_t count, loff_t *ppos)
362 unsigned long pfn, offset;
363 size_t read = 0, csize;
364 int rc = 0;
366 while (count) {
367 pfn = *ppos / PAGE_SIZE;
368 if (pfn > saved_max_pfn)
369 return read;
371 offset = (unsigned long)(*ppos % PAGE_SIZE);
372 if (count > PAGE_SIZE - offset)
373 csize = PAGE_SIZE - offset;
374 else
375 csize = count;
377 rc = copy_oldmem_page(pfn, buf, csize, offset, 1);
378 if (rc < 0)
379 return rc;
380 buf += csize;
381 *ppos += csize;
382 read += csize;
383 count -= csize;
385 return read;
387 #endif
389 #ifdef CONFIG_DEVKMEM
391 * This function reads the *virtual* memory as seen by the kernel.
393 static ssize_t read_kmem(struct file *file, char __user *buf,
394 size_t count, loff_t *ppos)
396 unsigned long p = *ppos;
397 ssize_t low_count, read, sz;
398 char * kbuf; /* k-addr because vread() takes vmlist_lock rwlock */
400 read = 0;
401 if (p < (unsigned long) high_memory) {
402 low_count = count;
403 if (count > (unsigned long) high_memory - p)
404 low_count = (unsigned long) high_memory - p;
406 #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
407 /* we don't have page 0 mapped on sparc and m68k.. */
408 if (p < PAGE_SIZE && low_count > 0) {
409 sz = size_inside_page(p, low_count);
410 if (clear_user(buf, sz))
411 return -EFAULT;
412 buf += sz;
413 p += sz;
414 read += sz;
415 low_count -= sz;
416 count -= sz;
418 #endif
419 while (low_count > 0) {
420 sz = size_inside_page(p, low_count);
423 * On ia64 if a page has been mapped somewhere as
424 * uncached, then it must also be accessed uncached
425 * by the kernel or data corruption may occur
427 kbuf = xlate_dev_kmem_ptr((char *)p);
429 if (copy_to_user(buf, kbuf, sz))
430 return -EFAULT;
431 buf += sz;
432 p += sz;
433 read += sz;
434 low_count -= sz;
435 count -= sz;
439 if (count > 0) {
440 kbuf = (char *)__get_free_page(GFP_KERNEL);
441 if (!kbuf)
442 return -ENOMEM;
443 while (count > 0) {
444 sz = size_inside_page(p, count);
445 sz = vread(kbuf, (char *)p, sz);
446 if (!sz)
447 break;
448 if (copy_to_user(buf, kbuf, sz)) {
449 free_page((unsigned long)kbuf);
450 return -EFAULT;
452 count -= sz;
453 buf += sz;
454 read += sz;
455 p += sz;
457 free_page((unsigned long)kbuf);
459 *ppos = p;
460 return read;
464 static inline ssize_t
465 do_write_kmem(unsigned long p, const char __user *buf,
466 size_t count, loff_t *ppos)
468 ssize_t written, sz;
469 unsigned long copied;
471 written = 0;
472 #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
473 /* we don't have page 0 mapped on sparc and m68k.. */
474 if (p < PAGE_SIZE) {
475 sz = size_inside_page(p, count);
476 /* Hmm. Do something? */
477 buf += sz;
478 p += sz;
479 count -= sz;
480 written += sz;
482 #endif
484 while (count > 0) {
485 char *ptr;
487 sz = size_inside_page(p, count);
490 * On ia64 if a page has been mapped somewhere as
491 * uncached, then it must also be accessed uncached
492 * by the kernel or data corruption may occur
494 ptr = xlate_dev_kmem_ptr((char *)p);
496 copied = copy_from_user(ptr, buf, sz);
497 if (copied) {
498 written += sz - copied;
499 if (written)
500 break;
501 return -EFAULT;
503 buf += sz;
504 p += sz;
505 count -= sz;
506 written += sz;
509 *ppos += written;
510 return written;
515 * This function writes to the *virtual* memory as seen by the kernel.
517 static ssize_t write_kmem(struct file * file, const char __user * buf,
518 size_t count, loff_t *ppos)
520 unsigned long p = *ppos;
521 ssize_t wrote = 0;
522 ssize_t virtr = 0;
523 char * kbuf; /* k-addr because vwrite() takes vmlist_lock rwlock */
525 if (p < (unsigned long) high_memory) {
526 unsigned long to_write = min_t(unsigned long, count,
527 (unsigned long)high_memory - p);
528 wrote = do_write_kmem(p, buf, to_write, ppos);
529 if (wrote != to_write)
530 return wrote;
531 p += wrote;
532 buf += wrote;
533 count -= wrote;
536 if (count > 0) {
537 kbuf = (char *)__get_free_page(GFP_KERNEL);
538 if (!kbuf)
539 return wrote ? wrote : -ENOMEM;
540 while (count > 0) {
541 unsigned long sz = size_inside_page(p, count);
542 unsigned long n;
544 n = copy_from_user(kbuf, buf, sz);
545 if (n) {
546 if (wrote + virtr)
547 break;
548 free_page((unsigned long)kbuf);
549 return -EFAULT;
551 sz = vwrite(kbuf, (char *)p, sz);
552 count -= sz;
553 buf += sz;
554 virtr += sz;
555 p += sz;
557 free_page((unsigned long)kbuf);
560 *ppos = p;
561 return virtr + wrote;
563 #endif
565 #ifdef CONFIG_DEVPORT
566 static ssize_t read_port(struct file * file, char __user * buf,
567 size_t count, loff_t *ppos)
569 unsigned long i = *ppos;
570 char __user *tmp = buf;
572 if (!access_ok(VERIFY_WRITE, buf, count))
573 return -EFAULT;
574 while (count-- > 0 && i < 65536) {
575 if (__put_user(inb(i),tmp) < 0)
576 return -EFAULT;
577 i++;
578 tmp++;
580 *ppos = i;
581 return tmp-buf;
584 static ssize_t write_port(struct file * file, const char __user * buf,
585 size_t count, loff_t *ppos)
587 unsigned long i = *ppos;
588 const char __user * tmp = buf;
590 if (!access_ok(VERIFY_READ,buf,count))
591 return -EFAULT;
592 while (count-- > 0 && i < 65536) {
593 char c;
594 if (__get_user(c, tmp)) {
595 if (tmp > buf)
596 break;
597 return -EFAULT;
599 outb(c,i);
600 i++;
601 tmp++;
603 *ppos = i;
604 return tmp-buf;
606 #endif
608 static ssize_t read_null(struct file * file, char __user * buf,
609 size_t count, loff_t *ppos)
611 return 0;
614 static ssize_t write_null(struct file * file, const char __user * buf,
615 size_t count, loff_t *ppos)
617 return count;
620 static int pipe_to_null(struct pipe_inode_info *info, struct pipe_buffer *buf,
621 struct splice_desc *sd)
623 return sd->len;
626 static ssize_t splice_write_null(struct pipe_inode_info *pipe,struct file *out,
627 loff_t *ppos, size_t len, unsigned int flags)
629 return splice_from_pipe(pipe, out, ppos, len, flags, pipe_to_null);
632 static ssize_t read_zero(struct file * file, char __user * buf,
633 size_t count, loff_t *ppos)
635 size_t written;
637 if (!count)
638 return 0;
640 if (!access_ok(VERIFY_WRITE, buf, count))
641 return -EFAULT;
643 written = 0;
644 while (count) {
645 unsigned long unwritten;
646 size_t chunk = count;
648 if (chunk > PAGE_SIZE)
649 chunk = PAGE_SIZE; /* Just for latency reasons */
650 unwritten = __clear_user(buf, chunk);
651 written += chunk - unwritten;
652 if (unwritten)
653 break;
654 if (signal_pending(current))
655 return written ? written : -ERESTARTSYS;
656 buf += chunk;
657 count -= chunk;
658 cond_resched();
660 return written ? written : -EFAULT;
663 static int mmap_zero(struct file * file, struct vm_area_struct * vma)
665 #ifndef CONFIG_MMU
666 return -ENOSYS;
667 #endif
668 if (vma->vm_flags & VM_SHARED)
669 return shmem_zero_setup(vma);
670 return 0;
673 static ssize_t write_full(struct file * file, const char __user * buf,
674 size_t count, loff_t *ppos)
676 return -ENOSPC;
680 * Special lseek() function for /dev/null and /dev/zero. Most notably, you
681 * can fopen() both devices with "a" now. This was previously impossible.
682 * -- SRB.
685 static loff_t null_lseek(struct file * file, loff_t offset, int orig)
687 return file->f_pos = 0;
691 * The memory devices use the full 32/64 bits of the offset, and so we cannot
692 * check against negative addresses: they are ok. The return value is weird,
693 * though, in that case (0).
695 * also note that seeking relative to the "end of file" isn't supported:
696 * it has no meaning, so it returns -EINVAL.
698 static loff_t memory_lseek(struct file * file, loff_t offset, int orig)
700 loff_t ret;
702 mutex_lock(&file->f_path.dentry->d_inode->i_mutex);
703 switch (orig) {
704 case 0:
705 file->f_pos = offset;
706 ret = file->f_pos;
707 force_successful_syscall_return();
708 break;
709 case 1:
710 file->f_pos += offset;
711 ret = file->f_pos;
712 force_successful_syscall_return();
713 break;
714 default:
715 ret = -EINVAL;
717 mutex_unlock(&file->f_path.dentry->d_inode->i_mutex);
718 return ret;
721 static int open_port(struct inode * inode, struct file * filp)
723 return capable(CAP_SYS_RAWIO) ? 0 : -EPERM;
726 #define zero_lseek null_lseek
727 #define full_lseek null_lseek
728 #define write_zero write_null
729 #define read_full read_zero
730 #define open_mem open_port
731 #define open_kmem open_mem
732 #define open_oldmem open_mem
734 static const struct file_operations mem_fops = {
735 .llseek = memory_lseek,
736 .read = read_mem,
737 .write = write_mem,
738 .mmap = mmap_mem,
739 .open = open_mem,
740 .get_unmapped_area = get_unmapped_area_mem,
743 #ifdef CONFIG_DEVKMEM
744 static const struct file_operations kmem_fops = {
745 .llseek = memory_lseek,
746 .read = read_kmem,
747 .write = write_kmem,
748 .mmap = mmap_kmem,
749 .open = open_kmem,
750 .get_unmapped_area = get_unmapped_area_mem,
752 #endif
754 static const struct file_operations null_fops = {
755 .llseek = null_lseek,
756 .read = read_null,
757 .write = write_null,
758 .splice_write = splice_write_null,
761 #ifdef CONFIG_DEVPORT
762 static const struct file_operations port_fops = {
763 .llseek = memory_lseek,
764 .read = read_port,
765 .write = write_port,
766 .open = open_port,
768 #endif
770 static const struct file_operations zero_fops = {
771 .llseek = zero_lseek,
772 .read = read_zero,
773 .write = write_zero,
774 .mmap = mmap_zero,
778 * capabilities for /dev/zero
779 * - permits private mappings, "copies" are taken of the source of zeros
781 static struct backing_dev_info zero_bdi = {
782 .name = "char/mem",
783 .capabilities = BDI_CAP_MAP_COPY,
786 static const struct file_operations full_fops = {
787 .llseek = full_lseek,
788 .read = read_full,
789 .write = write_full,
792 #ifdef CONFIG_CRASH_DUMP
793 static const struct file_operations oldmem_fops = {
794 .read = read_oldmem,
795 .open = open_oldmem,
797 #endif
799 static ssize_t kmsg_write(struct file * file, const char __user * buf,
800 size_t count, loff_t *ppos)
802 char *tmp;
803 ssize_t ret;
805 tmp = kmalloc(count + 1, GFP_KERNEL);
806 if (tmp == NULL)
807 return -ENOMEM;
808 ret = -EFAULT;
809 if (!copy_from_user(tmp, buf, count)) {
810 tmp[count] = 0;
811 ret = printk("%s", tmp);
812 if (ret > count)
813 /* printk can add a prefix */
814 ret = count;
816 kfree(tmp);
817 return ret;
820 static const struct file_operations kmsg_fops = {
821 .write = kmsg_write,
824 static const struct memdev {
825 const char *name;
826 mode_t mode;
827 const struct file_operations *fops;
828 struct backing_dev_info *dev_info;
829 } devlist[] = {
830 [1] = { "mem", 0, &mem_fops, &directly_mappable_cdev_bdi },
831 #ifdef CONFIG_DEVKMEM
832 [2] = { "kmem", 0, &kmem_fops, &directly_mappable_cdev_bdi },
833 #endif
834 [3] = { "null", 0666, &null_fops, NULL },
835 #ifdef CONFIG_DEVPORT
836 [4] = { "port", 0, &port_fops, NULL },
837 #endif
838 [5] = { "zero", 0666, &zero_fops, &zero_bdi },
839 [7] = { "full", 0666, &full_fops, NULL },
840 [8] = { "random", 0666, &random_fops, NULL },
841 [9] = { "urandom", 0666, &urandom_fops, NULL },
842 [11] = { "kmsg", 0, &kmsg_fops, NULL },
843 #ifdef CONFIG_CRASH_DUMP
844 [12] = { "oldmem", 0, &oldmem_fops, NULL },
845 #endif
848 static int memory_open(struct inode *inode, struct file *filp)
850 int minor;
851 const struct memdev *dev;
852 int ret = -ENXIO;
854 lock_kernel();
856 minor = iminor(inode);
857 if (minor >= ARRAY_SIZE(devlist))
858 goto out;
860 dev = &devlist[minor];
861 if (!dev->fops)
862 goto out;
864 filp->f_op = dev->fops;
865 if (dev->dev_info)
866 filp->f_mapping->backing_dev_info = dev->dev_info;
868 if (dev->fops->open)
869 ret = dev->fops->open(inode, filp);
870 else
871 ret = 0;
872 out:
873 unlock_kernel();
874 return ret;
877 static const struct file_operations memory_fops = {
878 .open = memory_open,
881 static char *mem_devnode(struct device *dev, mode_t *mode)
883 if (mode && devlist[MINOR(dev->devt)].mode)
884 *mode = devlist[MINOR(dev->devt)].mode;
885 return NULL;
888 static struct class *mem_class;
890 static int __init chr_dev_init(void)
892 int minor;
893 int err;
895 err = bdi_init(&zero_bdi);
896 if (err)
897 return err;
899 if (register_chrdev(MEM_MAJOR,"mem",&memory_fops))
900 printk("unable to get major %d for memory devs\n", MEM_MAJOR);
902 mem_class = class_create(THIS_MODULE, "mem");
903 mem_class->devnode = mem_devnode;
904 for (minor = 1; minor < ARRAY_SIZE(devlist); minor++) {
905 if (!devlist[minor].name)
906 continue;
907 device_create(mem_class, NULL, MKDEV(MEM_MAJOR, minor),
908 NULL, devlist[minor].name);
911 return 0;
914 fs_initcall(chr_dev_init);