x86/speculation/mds: Fix documentation typo
[linux/fpc-iii.git] / drivers / char / mem.c
blobf11224a5dc5c787b7c4cc168c1230cc5cb94bc01
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * linux/drivers/char/mem.c
5 * Copyright (C) 1991, 1992 Linus Torvalds
7 * Added devfs support.
8 * Jan-11-1998, C. Scott Ananian <cananian@alumni.princeton.edu>
9 * Shared /dev/zero mmapping support, Feb 2000, Kanoj Sarcar <kanoj@sgi.com>
12 #include <linux/mm.h>
13 #include <linux/miscdevice.h>
14 #include <linux/slab.h>
15 #include <linux/vmalloc.h>
16 #include <linux/mman.h>
17 #include <linux/random.h>
18 #include <linux/init.h>
19 #include <linux/raw.h>
20 #include <linux/tty.h>
21 #include <linux/capability.h>
22 #include <linux/ptrace.h>
23 #include <linux/device.h>
24 #include <linux/highmem.h>
25 #include <linux/backing-dev.h>
26 #include <linux/shmem_fs.h>
27 #include <linux/splice.h>
28 #include <linux/pfn.h>
29 #include <linux/export.h>
30 #include <linux/io.h>
31 #include <linux/uio.h>
33 #include <linux/uaccess.h>
35 #ifdef CONFIG_IA64
36 # include <linux/efi.h>
37 #endif
39 #define DEVPORT_MINOR 4
41 static inline unsigned long size_inside_page(unsigned long start,
42 unsigned long size)
44 unsigned long sz;
46 sz = PAGE_SIZE - (start & (PAGE_SIZE - 1));
48 return min(sz, size);
51 #ifndef ARCH_HAS_VALID_PHYS_ADDR_RANGE
52 static inline int valid_phys_addr_range(phys_addr_t addr, size_t count)
54 return addr + count <= __pa(high_memory);
57 static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t size)
59 return 1;
61 #endif
63 #ifdef CONFIG_STRICT_DEVMEM
64 static inline int page_is_allowed(unsigned long pfn)
66 return devmem_is_allowed(pfn);
68 static inline int range_is_allowed(unsigned long pfn, unsigned long size)
70 u64 from = ((u64)pfn) << PAGE_SHIFT;
71 u64 to = from + size;
72 u64 cursor = from;
74 while (cursor < to) {
75 if (!devmem_is_allowed(pfn))
76 return 0;
77 cursor += PAGE_SIZE;
78 pfn++;
80 return 1;
82 #else
83 static inline int page_is_allowed(unsigned long pfn)
85 return 1;
87 static inline int range_is_allowed(unsigned long pfn, unsigned long size)
89 return 1;
91 #endif
93 #ifndef unxlate_dev_mem_ptr
94 #define unxlate_dev_mem_ptr unxlate_dev_mem_ptr
95 void __weak unxlate_dev_mem_ptr(phys_addr_t phys, void *addr)
98 #endif
101 * This funcion reads the *physical* memory. The f_pos points directly to the
102 * memory location.
104 static ssize_t read_mem(struct file *file, char __user *buf,
105 size_t count, loff_t *ppos)
107 phys_addr_t p = *ppos;
108 ssize_t read, sz;
109 void *ptr;
110 char *bounce;
111 int err;
113 if (p != *ppos)
114 return 0;
116 if (!valid_phys_addr_range(p, count))
117 return -EFAULT;
118 read = 0;
119 #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
120 /* we don't have page 0 mapped on sparc and m68k.. */
121 if (p < PAGE_SIZE) {
122 sz = size_inside_page(p, count);
123 if (sz > 0) {
124 if (clear_user(buf, sz))
125 return -EFAULT;
126 buf += sz;
127 p += sz;
128 count -= sz;
129 read += sz;
132 #endif
134 bounce = kmalloc(PAGE_SIZE, GFP_KERNEL);
135 if (!bounce)
136 return -ENOMEM;
138 while (count > 0) {
139 unsigned long remaining;
140 int allowed, probe;
142 sz = size_inside_page(p, count);
144 err = -EPERM;
145 allowed = page_is_allowed(p >> PAGE_SHIFT);
146 if (!allowed)
147 goto failed;
149 err = -EFAULT;
150 if (allowed == 2) {
151 /* Show zeros for restricted memory. */
152 remaining = clear_user(buf, sz);
153 } else {
155 * On ia64 if a page has been mapped somewhere as
156 * uncached, then it must also be accessed uncached
157 * by the kernel or data corruption may occur.
159 ptr = xlate_dev_mem_ptr(p);
160 if (!ptr)
161 goto failed;
163 probe = probe_kernel_read(bounce, ptr, sz);
164 unxlate_dev_mem_ptr(p, ptr);
165 if (probe)
166 goto failed;
168 remaining = copy_to_user(buf, bounce, sz);
171 if (remaining)
172 goto failed;
174 buf += sz;
175 p += sz;
176 count -= sz;
177 read += sz;
179 kfree(bounce);
181 *ppos += read;
182 return read;
184 failed:
185 kfree(bounce);
186 return err;
189 static ssize_t write_mem(struct file *file, const char __user *buf,
190 size_t count, loff_t *ppos)
192 phys_addr_t p = *ppos;
193 ssize_t written, sz;
194 unsigned long copied;
195 void *ptr;
197 if (p != *ppos)
198 return -EFBIG;
200 if (!valid_phys_addr_range(p, count))
201 return -EFAULT;
203 written = 0;
205 #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
206 /* we don't have page 0 mapped on sparc and m68k.. */
207 if (p < PAGE_SIZE) {
208 sz = size_inside_page(p, count);
209 /* Hmm. Do something? */
210 buf += sz;
211 p += sz;
212 count -= sz;
213 written += sz;
215 #endif
217 while (count > 0) {
218 int allowed;
220 sz = size_inside_page(p, count);
222 allowed = page_is_allowed(p >> PAGE_SHIFT);
223 if (!allowed)
224 return -EPERM;
226 /* Skip actual writing when a page is marked as restricted. */
227 if (allowed == 1) {
229 * On ia64 if a page has been mapped somewhere as
230 * uncached, then it must also be accessed uncached
231 * by the kernel or data corruption may occur.
233 ptr = xlate_dev_mem_ptr(p);
234 if (!ptr) {
235 if (written)
236 break;
237 return -EFAULT;
240 copied = copy_from_user(ptr, buf, sz);
241 unxlate_dev_mem_ptr(p, ptr);
242 if (copied) {
243 written += sz - copied;
244 if (written)
245 break;
246 return -EFAULT;
250 buf += sz;
251 p += sz;
252 count -= sz;
253 written += sz;
256 *ppos += written;
257 return written;
260 int __weak phys_mem_access_prot_allowed(struct file *file,
261 unsigned long pfn, unsigned long size, pgprot_t *vma_prot)
263 return 1;
266 #ifndef __HAVE_PHYS_MEM_ACCESS_PROT
269 * Architectures vary in how they handle caching for addresses
270 * outside of main memory.
273 #ifdef pgprot_noncached
274 static int uncached_access(struct file *file, phys_addr_t addr)
276 #if defined(CONFIG_IA64)
278 * On ia64, we ignore O_DSYNC because we cannot tolerate memory
279 * attribute aliases.
281 return !(efi_mem_attributes(addr) & EFI_MEMORY_WB);
282 #elif defined(CONFIG_MIPS)
284 extern int __uncached_access(struct file *file,
285 unsigned long addr);
287 return __uncached_access(file, addr);
289 #else
291 * Accessing memory above the top the kernel knows about or through a
292 * file pointer
293 * that was marked O_DSYNC will be done non-cached.
295 if (file->f_flags & O_DSYNC)
296 return 1;
297 return addr >= __pa(high_memory);
298 #endif
300 #endif
302 static pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
303 unsigned long size, pgprot_t vma_prot)
305 #ifdef pgprot_noncached
306 phys_addr_t offset = pfn << PAGE_SHIFT;
308 if (uncached_access(file, offset))
309 return pgprot_noncached(vma_prot);
310 #endif
311 return vma_prot;
313 #endif
315 #ifndef CONFIG_MMU
316 static unsigned long get_unmapped_area_mem(struct file *file,
317 unsigned long addr,
318 unsigned long len,
319 unsigned long pgoff,
320 unsigned long flags)
322 if (!valid_mmap_phys_addr_range(pgoff, len))
323 return (unsigned long) -EINVAL;
324 return pgoff << PAGE_SHIFT;
327 /* permit direct mmap, for read, write or exec */
328 static unsigned memory_mmap_capabilities(struct file *file)
330 return NOMMU_MAP_DIRECT |
331 NOMMU_MAP_READ | NOMMU_MAP_WRITE | NOMMU_MAP_EXEC;
334 static unsigned zero_mmap_capabilities(struct file *file)
336 return NOMMU_MAP_COPY;
339 /* can't do an in-place private mapping if there's no MMU */
340 static inline int private_mapping_ok(struct vm_area_struct *vma)
342 return vma->vm_flags & VM_MAYSHARE;
344 #else
346 static inline int private_mapping_ok(struct vm_area_struct *vma)
348 return 1;
350 #endif
352 static const struct vm_operations_struct mmap_mem_ops = {
353 #ifdef CONFIG_HAVE_IOREMAP_PROT
354 .access = generic_access_phys
355 #endif
358 static int mmap_mem(struct file *file, struct vm_area_struct *vma)
360 size_t size = vma->vm_end - vma->vm_start;
361 phys_addr_t offset = (phys_addr_t)vma->vm_pgoff << PAGE_SHIFT;
363 /* It's illegal to wrap around the end of the physical address space. */
364 if (offset + (phys_addr_t)size - 1 < offset)
365 return -EINVAL;
367 if (!valid_mmap_phys_addr_range(vma->vm_pgoff, size))
368 return -EINVAL;
370 if (!private_mapping_ok(vma))
371 return -ENOSYS;
373 if (!range_is_allowed(vma->vm_pgoff, size))
374 return -EPERM;
376 if (!phys_mem_access_prot_allowed(file, vma->vm_pgoff, size,
377 &vma->vm_page_prot))
378 return -EINVAL;
380 vma->vm_page_prot = phys_mem_access_prot(file, vma->vm_pgoff,
381 size,
382 vma->vm_page_prot);
384 vma->vm_ops = &mmap_mem_ops;
386 /* Remap-pfn-range will mark the range VM_IO */
387 if (remap_pfn_range(vma,
388 vma->vm_start,
389 vma->vm_pgoff,
390 size,
391 vma->vm_page_prot)) {
392 return -EAGAIN;
394 return 0;
397 static int mmap_kmem(struct file *file, struct vm_area_struct *vma)
399 unsigned long pfn;
401 /* Turn a kernel-virtual address into a physical page frame */
402 pfn = __pa((u64)vma->vm_pgoff << PAGE_SHIFT) >> PAGE_SHIFT;
405 * RED-PEN: on some architectures there is more mapped memory than
406 * available in mem_map which pfn_valid checks for. Perhaps should add a
407 * new macro here.
409 * RED-PEN: vmalloc is not supported right now.
411 if (!pfn_valid(pfn))
412 return -EIO;
414 vma->vm_pgoff = pfn;
415 return mmap_mem(file, vma);
419 * This function reads the *virtual* memory as seen by the kernel.
421 static ssize_t read_kmem(struct file *file, char __user *buf,
422 size_t count, loff_t *ppos)
424 unsigned long p = *ppos;
425 ssize_t low_count, read, sz;
426 char *kbuf; /* k-addr because vread() takes vmlist_lock rwlock */
427 int err = 0;
429 read = 0;
430 if (p < (unsigned long) high_memory) {
431 low_count = count;
432 if (count > (unsigned long)high_memory - p)
433 low_count = (unsigned long)high_memory - p;
435 #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
436 /* we don't have page 0 mapped on sparc and m68k.. */
437 if (p < PAGE_SIZE && low_count > 0) {
438 sz = size_inside_page(p, low_count);
439 if (clear_user(buf, sz))
440 return -EFAULT;
441 buf += sz;
442 p += sz;
443 read += sz;
444 low_count -= sz;
445 count -= sz;
447 #endif
448 while (low_count > 0) {
449 sz = size_inside_page(p, low_count);
452 * On ia64 if a page has been mapped somewhere as
453 * uncached, then it must also be accessed uncached
454 * by the kernel or data corruption may occur
456 kbuf = xlate_dev_kmem_ptr((void *)p);
457 if (!virt_addr_valid(kbuf))
458 return -ENXIO;
460 if (copy_to_user(buf, kbuf, sz))
461 return -EFAULT;
462 buf += sz;
463 p += sz;
464 read += sz;
465 low_count -= sz;
466 count -= sz;
470 if (count > 0) {
471 kbuf = (char *)__get_free_page(GFP_KERNEL);
472 if (!kbuf)
473 return -ENOMEM;
474 while (count > 0) {
475 sz = size_inside_page(p, count);
476 if (!is_vmalloc_or_module_addr((void *)p)) {
477 err = -ENXIO;
478 break;
480 sz = vread(kbuf, (char *)p, sz);
481 if (!sz)
482 break;
483 if (copy_to_user(buf, kbuf, sz)) {
484 err = -EFAULT;
485 break;
487 count -= sz;
488 buf += sz;
489 read += sz;
490 p += sz;
492 free_page((unsigned long)kbuf);
494 *ppos = p;
495 return read ? read : err;
499 static ssize_t do_write_kmem(unsigned long p, const char __user *buf,
500 size_t count, loff_t *ppos)
502 ssize_t written, sz;
503 unsigned long copied;
505 written = 0;
506 #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
507 /* we don't have page 0 mapped on sparc and m68k.. */
508 if (p < PAGE_SIZE) {
509 sz = size_inside_page(p, count);
510 /* Hmm. Do something? */
511 buf += sz;
512 p += sz;
513 count -= sz;
514 written += sz;
516 #endif
518 while (count > 0) {
519 void *ptr;
521 sz = size_inside_page(p, count);
524 * On ia64 if a page has been mapped somewhere as uncached, then
525 * it must also be accessed uncached by the kernel or data
526 * corruption may occur.
528 ptr = xlate_dev_kmem_ptr((void *)p);
529 if (!virt_addr_valid(ptr))
530 return -ENXIO;
532 copied = copy_from_user(ptr, buf, sz);
533 if (copied) {
534 written += sz - copied;
535 if (written)
536 break;
537 return -EFAULT;
539 buf += sz;
540 p += sz;
541 count -= sz;
542 written += sz;
545 *ppos += written;
546 return written;
550 * This function writes to the *virtual* memory as seen by the kernel.
552 static ssize_t write_kmem(struct file *file, const char __user *buf,
553 size_t count, loff_t *ppos)
555 unsigned long p = *ppos;
556 ssize_t wrote = 0;
557 ssize_t virtr = 0;
558 char *kbuf; /* k-addr because vwrite() takes vmlist_lock rwlock */
559 int err = 0;
561 if (p < (unsigned long) high_memory) {
562 unsigned long to_write = min_t(unsigned long, count,
563 (unsigned long)high_memory - p);
564 wrote = do_write_kmem(p, buf, to_write, ppos);
565 if (wrote != to_write)
566 return wrote;
567 p += wrote;
568 buf += wrote;
569 count -= wrote;
572 if (count > 0) {
573 kbuf = (char *)__get_free_page(GFP_KERNEL);
574 if (!kbuf)
575 return wrote ? wrote : -ENOMEM;
576 while (count > 0) {
577 unsigned long sz = size_inside_page(p, count);
578 unsigned long n;
580 if (!is_vmalloc_or_module_addr((void *)p)) {
581 err = -ENXIO;
582 break;
584 n = copy_from_user(kbuf, buf, sz);
585 if (n) {
586 err = -EFAULT;
587 break;
589 vwrite(kbuf, (char *)p, sz);
590 count -= sz;
591 buf += sz;
592 virtr += sz;
593 p += sz;
595 free_page((unsigned long)kbuf);
598 *ppos = p;
599 return virtr + wrote ? : err;
602 static ssize_t read_port(struct file *file, char __user *buf,
603 size_t count, loff_t *ppos)
605 unsigned long i = *ppos;
606 char __user *tmp = buf;
608 if (!access_ok(VERIFY_WRITE, buf, count))
609 return -EFAULT;
610 while (count-- > 0 && i < 65536) {
611 if (__put_user(inb(i), tmp) < 0)
612 return -EFAULT;
613 i++;
614 tmp++;
616 *ppos = i;
617 return tmp-buf;
620 static ssize_t write_port(struct file *file, const char __user *buf,
621 size_t count, loff_t *ppos)
623 unsigned long i = *ppos;
624 const char __user *tmp = buf;
626 if (!access_ok(VERIFY_READ, buf, count))
627 return -EFAULT;
628 while (count-- > 0 && i < 65536) {
629 char c;
631 if (__get_user(c, tmp)) {
632 if (tmp > buf)
633 break;
634 return -EFAULT;
636 outb(c, i);
637 i++;
638 tmp++;
640 *ppos = i;
641 return tmp-buf;
644 static ssize_t read_null(struct file *file, char __user *buf,
645 size_t count, loff_t *ppos)
647 return 0;
650 static ssize_t write_null(struct file *file, const char __user *buf,
651 size_t count, loff_t *ppos)
653 return count;
656 static ssize_t read_iter_null(struct kiocb *iocb, struct iov_iter *to)
658 return 0;
661 static ssize_t write_iter_null(struct kiocb *iocb, struct iov_iter *from)
663 size_t count = iov_iter_count(from);
664 iov_iter_advance(from, count);
665 return count;
668 static int pipe_to_null(struct pipe_inode_info *info, struct pipe_buffer *buf,
669 struct splice_desc *sd)
671 return sd->len;
674 static ssize_t splice_write_null(struct pipe_inode_info *pipe, struct file *out,
675 loff_t *ppos, size_t len, unsigned int flags)
677 return splice_from_pipe(pipe, out, ppos, len, flags, pipe_to_null);
680 static ssize_t read_iter_zero(struct kiocb *iocb, struct iov_iter *iter)
682 size_t written = 0;
684 while (iov_iter_count(iter)) {
685 size_t chunk = iov_iter_count(iter), n;
687 if (chunk > PAGE_SIZE)
688 chunk = PAGE_SIZE; /* Just for latency reasons */
689 n = iov_iter_zero(chunk, iter);
690 if (!n && iov_iter_count(iter))
691 return written ? written : -EFAULT;
692 written += n;
693 if (signal_pending(current))
694 return written ? written : -ERESTARTSYS;
695 cond_resched();
697 return written;
700 static int mmap_zero(struct file *file, struct vm_area_struct *vma)
702 #ifndef CONFIG_MMU
703 return -ENOSYS;
704 #endif
705 if (vma->vm_flags & VM_SHARED)
706 return shmem_zero_setup(vma);
707 return 0;
710 static unsigned long get_unmapped_area_zero(struct file *file,
711 unsigned long addr, unsigned long len,
712 unsigned long pgoff, unsigned long flags)
714 #ifdef CONFIG_MMU
715 if (flags & MAP_SHARED) {
717 * mmap_zero() will call shmem_zero_setup() to create a file,
718 * so use shmem's get_unmapped_area in case it can be huge;
719 * and pass NULL for file as in mmap.c's get_unmapped_area(),
720 * so as not to confuse shmem with our handle on "/dev/zero".
722 return shmem_get_unmapped_area(NULL, addr, len, pgoff, flags);
725 /* Otherwise flags & MAP_PRIVATE: with no shmem object beneath it */
726 return current->mm->get_unmapped_area(file, addr, len, pgoff, flags);
727 #else
728 return -ENOSYS;
729 #endif
732 static ssize_t write_full(struct file *file, const char __user *buf,
733 size_t count, loff_t *ppos)
735 return -ENOSPC;
739 * Special lseek() function for /dev/null and /dev/zero. Most notably, you
740 * can fopen() both devices with "a" now. This was previously impossible.
741 * -- SRB.
743 static loff_t null_lseek(struct file *file, loff_t offset, int orig)
745 return file->f_pos = 0;
749 * The memory devices use the full 32/64 bits of the offset, and so we cannot
750 * check against negative addresses: they are ok. The return value is weird,
751 * though, in that case (0).
753 * also note that seeking relative to the "end of file" isn't supported:
754 * it has no meaning, so it returns -EINVAL.
756 static loff_t memory_lseek(struct file *file, loff_t offset, int orig)
758 loff_t ret;
760 inode_lock(file_inode(file));
761 switch (orig) {
762 case SEEK_CUR:
763 offset += file->f_pos;
764 case SEEK_SET:
765 /* to avoid userland mistaking f_pos=-9 as -EBADF=-9 */
766 if ((unsigned long long)offset >= -MAX_ERRNO) {
767 ret = -EOVERFLOW;
768 break;
770 file->f_pos = offset;
771 ret = file->f_pos;
772 force_successful_syscall_return();
773 break;
774 default:
775 ret = -EINVAL;
777 inode_unlock(file_inode(file));
778 return ret;
781 static int open_port(struct inode *inode, struct file *filp)
783 return capable(CAP_SYS_RAWIO) ? 0 : -EPERM;
786 #define zero_lseek null_lseek
787 #define full_lseek null_lseek
788 #define write_zero write_null
789 #define write_iter_zero write_iter_null
790 #define open_mem open_port
791 #define open_kmem open_mem
793 static const struct file_operations __maybe_unused mem_fops = {
794 .llseek = memory_lseek,
795 .read = read_mem,
796 .write = write_mem,
797 .mmap = mmap_mem,
798 .open = open_mem,
799 #ifndef CONFIG_MMU
800 .get_unmapped_area = get_unmapped_area_mem,
801 .mmap_capabilities = memory_mmap_capabilities,
802 #endif
805 static const struct file_operations __maybe_unused kmem_fops = {
806 .llseek = memory_lseek,
807 .read = read_kmem,
808 .write = write_kmem,
809 .mmap = mmap_kmem,
810 .open = open_kmem,
811 #ifndef CONFIG_MMU
812 .get_unmapped_area = get_unmapped_area_mem,
813 .mmap_capabilities = memory_mmap_capabilities,
814 #endif
817 static const struct file_operations null_fops = {
818 .llseek = null_lseek,
819 .read = read_null,
820 .write = write_null,
821 .read_iter = read_iter_null,
822 .write_iter = write_iter_null,
823 .splice_write = splice_write_null,
826 static const struct file_operations __maybe_unused port_fops = {
827 .llseek = memory_lseek,
828 .read = read_port,
829 .write = write_port,
830 .open = open_port,
833 static const struct file_operations zero_fops = {
834 .llseek = zero_lseek,
835 .write = write_zero,
836 .read_iter = read_iter_zero,
837 .write_iter = write_iter_zero,
838 .mmap = mmap_zero,
839 .get_unmapped_area = get_unmapped_area_zero,
840 #ifndef CONFIG_MMU
841 .mmap_capabilities = zero_mmap_capabilities,
842 #endif
845 static const struct file_operations full_fops = {
846 .llseek = full_lseek,
847 .read_iter = read_iter_zero,
848 .write = write_full,
851 static const struct memdev {
852 const char *name;
853 umode_t mode;
854 const struct file_operations *fops;
855 fmode_t fmode;
856 } devlist[] = {
857 #ifdef CONFIG_DEVMEM
858 [1] = { "mem", 0, &mem_fops, FMODE_UNSIGNED_OFFSET },
859 #endif
860 #ifdef CONFIG_DEVKMEM
861 [2] = { "kmem", 0, &kmem_fops, FMODE_UNSIGNED_OFFSET },
862 #endif
863 [3] = { "null", 0666, &null_fops, 0 },
864 #ifdef CONFIG_DEVPORT
865 [4] = { "port", 0, &port_fops, 0 },
866 #endif
867 [5] = { "zero", 0666, &zero_fops, 0 },
868 [7] = { "full", 0666, &full_fops, 0 },
869 [8] = { "random", 0666, &random_fops, 0 },
870 [9] = { "urandom", 0666, &urandom_fops, 0 },
871 #ifdef CONFIG_PRINTK
872 [11] = { "kmsg", 0644, &kmsg_fops, 0 },
873 #endif
876 static int memory_open(struct inode *inode, struct file *filp)
878 int minor;
879 const struct memdev *dev;
881 minor = iminor(inode);
882 if (minor >= ARRAY_SIZE(devlist))
883 return -ENXIO;
885 dev = &devlist[minor];
886 if (!dev->fops)
887 return -ENXIO;
889 filp->f_op = dev->fops;
890 filp->f_mode |= dev->fmode;
892 if (dev->fops->open)
893 return dev->fops->open(inode, filp);
895 return 0;
898 static const struct file_operations memory_fops = {
899 .open = memory_open,
900 .llseek = noop_llseek,
903 static char *mem_devnode(struct device *dev, umode_t *mode)
905 if (mode && devlist[MINOR(dev->devt)].mode)
906 *mode = devlist[MINOR(dev->devt)].mode;
907 return NULL;
910 static struct class *mem_class;
912 static int __init chr_dev_init(void)
914 int minor;
916 if (register_chrdev(MEM_MAJOR, "mem", &memory_fops))
917 printk("unable to get major %d for memory devs\n", MEM_MAJOR);
919 mem_class = class_create(THIS_MODULE, "mem");
920 if (IS_ERR(mem_class))
921 return PTR_ERR(mem_class);
923 mem_class->devnode = mem_devnode;
924 for (minor = 1; minor < ARRAY_SIZE(devlist); minor++) {
925 if (!devlist[minor].name)
926 continue;
929 * Create /dev/port?
931 if ((minor == DEVPORT_MINOR) && !arch_has_dev_port())
932 continue;
934 device_create(mem_class, NULL, MKDEV(MEM_MAJOR, minor),
935 NULL, devlist[minor].name);
938 return tty_init();
941 fs_initcall(chr_dev_init);