dm writecache: add cond_resched to loop in persistent_memory_claim()
[linux/fpc-iii.git] / drivers / char / mem.c
blob31cae88a730baf4cd74fbe67acd7d1c67412141c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * linux/drivers/char/mem.c
5 * Copyright (C) 1991, 1992 Linus Torvalds
7 * Added devfs support.
8 * Jan-11-1998, C. Scott Ananian <cananian@alumni.princeton.edu>
9 * Shared /dev/zero mmapping support, Feb 2000, Kanoj Sarcar <kanoj@sgi.com>
12 #include <linux/mm.h>
13 #include <linux/miscdevice.h>
14 #include <linux/slab.h>
15 #include <linux/vmalloc.h>
16 #include <linux/mman.h>
17 #include <linux/random.h>
18 #include <linux/init.h>
19 #include <linux/raw.h>
20 #include <linux/tty.h>
21 #include <linux/capability.h>
22 #include <linux/ptrace.h>
23 #include <linux/device.h>
24 #include <linux/highmem.h>
25 #include <linux/backing-dev.h>
26 #include <linux/shmem_fs.h>
27 #include <linux/splice.h>
28 #include <linux/pfn.h>
29 #include <linux/export.h>
30 #include <linux/io.h>
31 #include <linux/uio.h>
32 #include <linux/uaccess.h>
33 #include <linux/security.h>
34 #include <linux/pseudo_fs.h>
35 #include <uapi/linux/magic.h>
36 #include <linux/mount.h>
38 #ifdef CONFIG_IA64
39 # include <linux/efi.h>
40 #endif
42 #define DEVMEM_MINOR 1
43 #define DEVPORT_MINOR 4
45 static inline unsigned long size_inside_page(unsigned long start,
46 unsigned long size)
48 unsigned long sz;
50 sz = PAGE_SIZE - (start & (PAGE_SIZE - 1));
52 return min(sz, size);
55 #ifndef ARCH_HAS_VALID_PHYS_ADDR_RANGE
56 static inline int valid_phys_addr_range(phys_addr_t addr, size_t count)
58 return addr + count <= __pa(high_memory);
61 static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t size)
63 return 1;
65 #endif
67 #ifdef CONFIG_STRICT_DEVMEM
68 static inline int page_is_allowed(unsigned long pfn)
70 return devmem_is_allowed(pfn);
72 static inline int range_is_allowed(unsigned long pfn, unsigned long size)
74 u64 from = ((u64)pfn) << PAGE_SHIFT;
75 u64 to = from + size;
76 u64 cursor = from;
78 while (cursor < to) {
79 if (!devmem_is_allowed(pfn))
80 return 0;
81 cursor += PAGE_SIZE;
82 pfn++;
84 return 1;
86 #else
87 static inline int page_is_allowed(unsigned long pfn)
89 return 1;
91 static inline int range_is_allowed(unsigned long pfn, unsigned long size)
93 return 1;
95 #endif
97 #ifndef unxlate_dev_mem_ptr
98 #define unxlate_dev_mem_ptr unxlate_dev_mem_ptr
99 void __weak unxlate_dev_mem_ptr(phys_addr_t phys, void *addr)
102 #endif
104 static inline bool should_stop_iteration(void)
106 if (need_resched())
107 cond_resched();
108 return fatal_signal_pending(current);
112 * This funcion reads the *physical* memory. The f_pos points directly to the
113 * memory location.
115 static ssize_t read_mem(struct file *file, char __user *buf,
116 size_t count, loff_t *ppos)
118 phys_addr_t p = *ppos;
119 ssize_t read, sz;
120 void *ptr;
121 char *bounce;
122 int err;
124 if (p != *ppos)
125 return 0;
127 if (!valid_phys_addr_range(p, count))
128 return -EFAULT;
129 read = 0;
130 #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
131 /* we don't have page 0 mapped on sparc and m68k.. */
132 if (p < PAGE_SIZE) {
133 sz = size_inside_page(p, count);
134 if (sz > 0) {
135 if (clear_user(buf, sz))
136 return -EFAULT;
137 buf += sz;
138 p += sz;
139 count -= sz;
140 read += sz;
143 #endif
145 bounce = kmalloc(PAGE_SIZE, GFP_KERNEL);
146 if (!bounce)
147 return -ENOMEM;
149 while (count > 0) {
150 unsigned long remaining;
151 int allowed, probe;
153 sz = size_inside_page(p, count);
155 err = -EPERM;
156 allowed = page_is_allowed(p >> PAGE_SHIFT);
157 if (!allowed)
158 goto failed;
160 err = -EFAULT;
161 if (allowed == 2) {
162 /* Show zeros for restricted memory. */
163 remaining = clear_user(buf, sz);
164 } else {
166 * On ia64 if a page has been mapped somewhere as
167 * uncached, then it must also be accessed uncached
168 * by the kernel or data corruption may occur.
170 ptr = xlate_dev_mem_ptr(p);
171 if (!ptr)
172 goto failed;
174 probe = probe_kernel_read(bounce, ptr, sz);
175 unxlate_dev_mem_ptr(p, ptr);
176 if (probe)
177 goto failed;
179 remaining = copy_to_user(buf, bounce, sz);
182 if (remaining)
183 goto failed;
185 buf += sz;
186 p += sz;
187 count -= sz;
188 read += sz;
189 if (should_stop_iteration())
190 break;
192 kfree(bounce);
194 *ppos += read;
195 return read;
197 failed:
198 kfree(bounce);
199 return err;
202 static ssize_t write_mem(struct file *file, const char __user *buf,
203 size_t count, loff_t *ppos)
205 phys_addr_t p = *ppos;
206 ssize_t written, sz;
207 unsigned long copied;
208 void *ptr;
210 if (p != *ppos)
211 return -EFBIG;
213 if (!valid_phys_addr_range(p, count))
214 return -EFAULT;
216 written = 0;
218 #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
219 /* we don't have page 0 mapped on sparc and m68k.. */
220 if (p < PAGE_SIZE) {
221 sz = size_inside_page(p, count);
222 /* Hmm. Do something? */
223 buf += sz;
224 p += sz;
225 count -= sz;
226 written += sz;
228 #endif
230 while (count > 0) {
231 int allowed;
233 sz = size_inside_page(p, count);
235 allowed = page_is_allowed(p >> PAGE_SHIFT);
236 if (!allowed)
237 return -EPERM;
239 /* Skip actual writing when a page is marked as restricted. */
240 if (allowed == 1) {
242 * On ia64 if a page has been mapped somewhere as
243 * uncached, then it must also be accessed uncached
244 * by the kernel or data corruption may occur.
246 ptr = xlate_dev_mem_ptr(p);
247 if (!ptr) {
248 if (written)
249 break;
250 return -EFAULT;
253 copied = copy_from_user(ptr, buf, sz);
254 unxlate_dev_mem_ptr(p, ptr);
255 if (copied) {
256 written += sz - copied;
257 if (written)
258 break;
259 return -EFAULT;
263 buf += sz;
264 p += sz;
265 count -= sz;
266 written += sz;
267 if (should_stop_iteration())
268 break;
271 *ppos += written;
272 return written;
275 int __weak phys_mem_access_prot_allowed(struct file *file,
276 unsigned long pfn, unsigned long size, pgprot_t *vma_prot)
278 return 1;
281 #ifndef __HAVE_PHYS_MEM_ACCESS_PROT
284 * Architectures vary in how they handle caching for addresses
285 * outside of main memory.
288 #ifdef pgprot_noncached
289 static int uncached_access(struct file *file, phys_addr_t addr)
291 #if defined(CONFIG_IA64)
293 * On ia64, we ignore O_DSYNC because we cannot tolerate memory
294 * attribute aliases.
296 return !(efi_mem_attributes(addr) & EFI_MEMORY_WB);
297 #elif defined(CONFIG_MIPS)
299 extern int __uncached_access(struct file *file,
300 unsigned long addr);
302 return __uncached_access(file, addr);
304 #else
306 * Accessing memory above the top the kernel knows about or through a
307 * file pointer
308 * that was marked O_DSYNC will be done non-cached.
310 if (file->f_flags & O_DSYNC)
311 return 1;
312 return addr >= __pa(high_memory);
313 #endif
315 #endif
317 static pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
318 unsigned long size, pgprot_t vma_prot)
320 #ifdef pgprot_noncached
321 phys_addr_t offset = pfn << PAGE_SHIFT;
323 if (uncached_access(file, offset))
324 return pgprot_noncached(vma_prot);
325 #endif
326 return vma_prot;
328 #endif
330 #ifndef CONFIG_MMU
331 static unsigned long get_unmapped_area_mem(struct file *file,
332 unsigned long addr,
333 unsigned long len,
334 unsigned long pgoff,
335 unsigned long flags)
337 if (!valid_mmap_phys_addr_range(pgoff, len))
338 return (unsigned long) -EINVAL;
339 return pgoff << PAGE_SHIFT;
342 /* permit direct mmap, for read, write or exec */
343 static unsigned memory_mmap_capabilities(struct file *file)
345 return NOMMU_MAP_DIRECT |
346 NOMMU_MAP_READ | NOMMU_MAP_WRITE | NOMMU_MAP_EXEC;
349 static unsigned zero_mmap_capabilities(struct file *file)
351 return NOMMU_MAP_COPY;
354 /* can't do an in-place private mapping if there's no MMU */
355 static inline int private_mapping_ok(struct vm_area_struct *vma)
357 return vma->vm_flags & VM_MAYSHARE;
359 #else
361 static inline int private_mapping_ok(struct vm_area_struct *vma)
363 return 1;
365 #endif
367 static const struct vm_operations_struct mmap_mem_ops = {
368 #ifdef CONFIG_HAVE_IOREMAP_PROT
369 .access = generic_access_phys
370 #endif
373 static int mmap_mem(struct file *file, struct vm_area_struct *vma)
375 size_t size = vma->vm_end - vma->vm_start;
376 phys_addr_t offset = (phys_addr_t)vma->vm_pgoff << PAGE_SHIFT;
378 /* Does it even fit in phys_addr_t? */
379 if (offset >> PAGE_SHIFT != vma->vm_pgoff)
380 return -EINVAL;
382 /* It's illegal to wrap around the end of the physical address space. */
383 if (offset + (phys_addr_t)size - 1 < offset)
384 return -EINVAL;
386 if (!valid_mmap_phys_addr_range(vma->vm_pgoff, size))
387 return -EINVAL;
389 if (!private_mapping_ok(vma))
390 return -ENOSYS;
392 if (!range_is_allowed(vma->vm_pgoff, size))
393 return -EPERM;
395 if (!phys_mem_access_prot_allowed(file, vma->vm_pgoff, size,
396 &vma->vm_page_prot))
397 return -EINVAL;
399 vma->vm_page_prot = phys_mem_access_prot(file, vma->vm_pgoff,
400 size,
401 vma->vm_page_prot);
403 vma->vm_ops = &mmap_mem_ops;
405 /* Remap-pfn-range will mark the range VM_IO */
406 if (remap_pfn_range(vma,
407 vma->vm_start,
408 vma->vm_pgoff,
409 size,
410 vma->vm_page_prot)) {
411 return -EAGAIN;
413 return 0;
416 static int mmap_kmem(struct file *file, struct vm_area_struct *vma)
418 unsigned long pfn;
420 /* Turn a kernel-virtual address into a physical page frame */
421 pfn = __pa((u64)vma->vm_pgoff << PAGE_SHIFT) >> PAGE_SHIFT;
424 * RED-PEN: on some architectures there is more mapped memory than
425 * available in mem_map which pfn_valid checks for. Perhaps should add a
426 * new macro here.
428 * RED-PEN: vmalloc is not supported right now.
430 if (!pfn_valid(pfn))
431 return -EIO;
433 vma->vm_pgoff = pfn;
434 return mmap_mem(file, vma);
438 * This function reads the *virtual* memory as seen by the kernel.
440 static ssize_t read_kmem(struct file *file, char __user *buf,
441 size_t count, loff_t *ppos)
443 unsigned long p = *ppos;
444 ssize_t low_count, read, sz;
445 char *kbuf; /* k-addr because vread() takes vmlist_lock rwlock */
446 int err = 0;
448 read = 0;
449 if (p < (unsigned long) high_memory) {
450 low_count = count;
451 if (count > (unsigned long)high_memory - p)
452 low_count = (unsigned long)high_memory - p;
454 #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
455 /* we don't have page 0 mapped on sparc and m68k.. */
456 if (p < PAGE_SIZE && low_count > 0) {
457 sz = size_inside_page(p, low_count);
458 if (clear_user(buf, sz))
459 return -EFAULT;
460 buf += sz;
461 p += sz;
462 read += sz;
463 low_count -= sz;
464 count -= sz;
466 #endif
467 while (low_count > 0) {
468 sz = size_inside_page(p, low_count);
471 * On ia64 if a page has been mapped somewhere as
472 * uncached, then it must also be accessed uncached
473 * by the kernel or data corruption may occur
475 kbuf = xlate_dev_kmem_ptr((void *)p);
476 if (!virt_addr_valid(kbuf))
477 return -ENXIO;
479 if (copy_to_user(buf, kbuf, sz))
480 return -EFAULT;
481 buf += sz;
482 p += sz;
483 read += sz;
484 low_count -= sz;
485 count -= sz;
486 if (should_stop_iteration()) {
487 count = 0;
488 break;
493 if (count > 0) {
494 kbuf = (char *)__get_free_page(GFP_KERNEL);
495 if (!kbuf)
496 return -ENOMEM;
497 while (count > 0) {
498 sz = size_inside_page(p, count);
499 if (!is_vmalloc_or_module_addr((void *)p)) {
500 err = -ENXIO;
501 break;
503 sz = vread(kbuf, (char *)p, sz);
504 if (!sz)
505 break;
506 if (copy_to_user(buf, kbuf, sz)) {
507 err = -EFAULT;
508 break;
510 count -= sz;
511 buf += sz;
512 read += sz;
513 p += sz;
514 if (should_stop_iteration())
515 break;
517 free_page((unsigned long)kbuf);
519 *ppos = p;
520 return read ? read : err;
524 static ssize_t do_write_kmem(unsigned long p, const char __user *buf,
525 size_t count, loff_t *ppos)
527 ssize_t written, sz;
528 unsigned long copied;
530 written = 0;
531 #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
532 /* we don't have page 0 mapped on sparc and m68k.. */
533 if (p < PAGE_SIZE) {
534 sz = size_inside_page(p, count);
535 /* Hmm. Do something? */
536 buf += sz;
537 p += sz;
538 count -= sz;
539 written += sz;
541 #endif
543 while (count > 0) {
544 void *ptr;
546 sz = size_inside_page(p, count);
549 * On ia64 if a page has been mapped somewhere as uncached, then
550 * it must also be accessed uncached by the kernel or data
551 * corruption may occur.
553 ptr = xlate_dev_kmem_ptr((void *)p);
554 if (!virt_addr_valid(ptr))
555 return -ENXIO;
557 copied = copy_from_user(ptr, buf, sz);
558 if (copied) {
559 written += sz - copied;
560 if (written)
561 break;
562 return -EFAULT;
564 buf += sz;
565 p += sz;
566 count -= sz;
567 written += sz;
568 if (should_stop_iteration())
569 break;
572 *ppos += written;
573 return written;
577 * This function writes to the *virtual* memory as seen by the kernel.
579 static ssize_t write_kmem(struct file *file, const char __user *buf,
580 size_t count, loff_t *ppos)
582 unsigned long p = *ppos;
583 ssize_t wrote = 0;
584 ssize_t virtr = 0;
585 char *kbuf; /* k-addr because vwrite() takes vmlist_lock rwlock */
586 int err = 0;
588 if (p < (unsigned long) high_memory) {
589 unsigned long to_write = min_t(unsigned long, count,
590 (unsigned long)high_memory - p);
591 wrote = do_write_kmem(p, buf, to_write, ppos);
592 if (wrote != to_write)
593 return wrote;
594 p += wrote;
595 buf += wrote;
596 count -= wrote;
599 if (count > 0) {
600 kbuf = (char *)__get_free_page(GFP_KERNEL);
601 if (!kbuf)
602 return wrote ? wrote : -ENOMEM;
603 while (count > 0) {
604 unsigned long sz = size_inside_page(p, count);
605 unsigned long n;
607 if (!is_vmalloc_or_module_addr((void *)p)) {
608 err = -ENXIO;
609 break;
611 n = copy_from_user(kbuf, buf, sz);
612 if (n) {
613 err = -EFAULT;
614 break;
616 vwrite(kbuf, (char *)p, sz);
617 count -= sz;
618 buf += sz;
619 virtr += sz;
620 p += sz;
621 if (should_stop_iteration())
622 break;
624 free_page((unsigned long)kbuf);
627 *ppos = p;
628 return virtr + wrote ? : err;
631 static ssize_t read_port(struct file *file, char __user *buf,
632 size_t count, loff_t *ppos)
634 unsigned long i = *ppos;
635 char __user *tmp = buf;
637 if (!access_ok(buf, count))
638 return -EFAULT;
639 while (count-- > 0 && i < 65536) {
640 if (__put_user(inb(i), tmp) < 0)
641 return -EFAULT;
642 i++;
643 tmp++;
645 *ppos = i;
646 return tmp-buf;
649 static ssize_t write_port(struct file *file, const char __user *buf,
650 size_t count, loff_t *ppos)
652 unsigned long i = *ppos;
653 const char __user *tmp = buf;
655 if (!access_ok(buf, count))
656 return -EFAULT;
657 while (count-- > 0 && i < 65536) {
658 char c;
660 if (__get_user(c, tmp)) {
661 if (tmp > buf)
662 break;
663 return -EFAULT;
665 outb(c, i);
666 i++;
667 tmp++;
669 *ppos = i;
670 return tmp-buf;
673 static ssize_t read_null(struct file *file, char __user *buf,
674 size_t count, loff_t *ppos)
676 return 0;
679 static ssize_t write_null(struct file *file, const char __user *buf,
680 size_t count, loff_t *ppos)
682 return count;
685 static ssize_t read_iter_null(struct kiocb *iocb, struct iov_iter *to)
687 return 0;
690 static ssize_t write_iter_null(struct kiocb *iocb, struct iov_iter *from)
692 size_t count = iov_iter_count(from);
693 iov_iter_advance(from, count);
694 return count;
697 static int pipe_to_null(struct pipe_inode_info *info, struct pipe_buffer *buf,
698 struct splice_desc *sd)
700 return sd->len;
703 static ssize_t splice_write_null(struct pipe_inode_info *pipe, struct file *out,
704 loff_t *ppos, size_t len, unsigned int flags)
706 return splice_from_pipe(pipe, out, ppos, len, flags, pipe_to_null);
709 static ssize_t read_iter_zero(struct kiocb *iocb, struct iov_iter *iter)
711 size_t written = 0;
713 while (iov_iter_count(iter)) {
714 size_t chunk = iov_iter_count(iter), n;
716 if (chunk > PAGE_SIZE)
717 chunk = PAGE_SIZE; /* Just for latency reasons */
718 n = iov_iter_zero(chunk, iter);
719 if (!n && iov_iter_count(iter))
720 return written ? written : -EFAULT;
721 written += n;
722 if (signal_pending(current))
723 return written ? written : -ERESTARTSYS;
724 cond_resched();
726 return written;
729 static int mmap_zero(struct file *file, struct vm_area_struct *vma)
731 #ifndef CONFIG_MMU
732 return -ENOSYS;
733 #endif
734 if (vma->vm_flags & VM_SHARED)
735 return shmem_zero_setup(vma);
736 vma_set_anonymous(vma);
737 return 0;
740 static unsigned long get_unmapped_area_zero(struct file *file,
741 unsigned long addr, unsigned long len,
742 unsigned long pgoff, unsigned long flags)
744 #ifdef CONFIG_MMU
745 if (flags & MAP_SHARED) {
747 * mmap_zero() will call shmem_zero_setup() to create a file,
748 * so use shmem's get_unmapped_area in case it can be huge;
749 * and pass NULL for file as in mmap.c's get_unmapped_area(),
750 * so as not to confuse shmem with our handle on "/dev/zero".
752 return shmem_get_unmapped_area(NULL, addr, len, pgoff, flags);
755 /* Otherwise flags & MAP_PRIVATE: with no shmem object beneath it */
756 return current->mm->get_unmapped_area(file, addr, len, pgoff, flags);
757 #else
758 return -ENOSYS;
759 #endif
762 static ssize_t write_full(struct file *file, const char __user *buf,
763 size_t count, loff_t *ppos)
765 return -ENOSPC;
769 * Special lseek() function for /dev/null and /dev/zero. Most notably, you
770 * can fopen() both devices with "a" now. This was previously impossible.
771 * -- SRB.
773 static loff_t null_lseek(struct file *file, loff_t offset, int orig)
775 return file->f_pos = 0;
779 * The memory devices use the full 32/64 bits of the offset, and so we cannot
780 * check against negative addresses: they are ok. The return value is weird,
781 * though, in that case (0).
783 * also note that seeking relative to the "end of file" isn't supported:
784 * it has no meaning, so it returns -EINVAL.
786 static loff_t memory_lseek(struct file *file, loff_t offset, int orig)
788 loff_t ret;
790 inode_lock(file_inode(file));
791 switch (orig) {
792 case SEEK_CUR:
793 offset += file->f_pos;
794 /* fall through */
795 case SEEK_SET:
796 /* to avoid userland mistaking f_pos=-9 as -EBADF=-9 */
797 if ((unsigned long long)offset >= -MAX_ERRNO) {
798 ret = -EOVERFLOW;
799 break;
801 file->f_pos = offset;
802 ret = file->f_pos;
803 force_successful_syscall_return();
804 break;
805 default:
806 ret = -EINVAL;
808 inode_unlock(file_inode(file));
809 return ret;
812 static struct inode *devmem_inode;
814 #ifdef CONFIG_IO_STRICT_DEVMEM
815 void revoke_devmem(struct resource *res)
817 struct inode *inode = READ_ONCE(devmem_inode);
820 * Check that the initialization has completed. Losing the race
821 * is ok because it means drivers are claiming resources before
822 * the fs_initcall level of init and prevent /dev/mem from
823 * establishing mappings.
825 if (!inode)
826 return;
829 * The expectation is that the driver has successfully marked
830 * the resource busy by this point, so devmem_is_allowed()
831 * should start returning false, however for performance this
832 * does not iterate the entire resource range.
834 if (devmem_is_allowed(PHYS_PFN(res->start)) &&
835 devmem_is_allowed(PHYS_PFN(res->end))) {
837 * *cringe* iomem=relaxed says "go ahead, what's the
838 * worst that can happen?"
840 return;
843 unmap_mapping_range(inode->i_mapping, res->start, resource_size(res), 1);
845 #endif
847 static int open_port(struct inode *inode, struct file *filp)
849 int rc;
851 if (!capable(CAP_SYS_RAWIO))
852 return -EPERM;
854 rc = security_locked_down(LOCKDOWN_DEV_MEM);
855 if (rc)
856 return rc;
858 if (iminor(inode) != DEVMEM_MINOR)
859 return 0;
862 * Use a unified address space to have a single point to manage
863 * revocations when drivers want to take over a /dev/mem mapped
864 * range.
866 inode->i_mapping = devmem_inode->i_mapping;
867 filp->f_mapping = inode->i_mapping;
869 return 0;
872 #define zero_lseek null_lseek
873 #define full_lseek null_lseek
874 #define write_zero write_null
875 #define write_iter_zero write_iter_null
876 #define open_mem open_port
877 #define open_kmem open_mem
879 static const struct file_operations __maybe_unused mem_fops = {
880 .llseek = memory_lseek,
881 .read = read_mem,
882 .write = write_mem,
883 .mmap = mmap_mem,
884 .open = open_mem,
885 #ifndef CONFIG_MMU
886 .get_unmapped_area = get_unmapped_area_mem,
887 .mmap_capabilities = memory_mmap_capabilities,
888 #endif
891 static const struct file_operations __maybe_unused kmem_fops = {
892 .llseek = memory_lseek,
893 .read = read_kmem,
894 .write = write_kmem,
895 .mmap = mmap_kmem,
896 .open = open_kmem,
897 #ifndef CONFIG_MMU
898 .get_unmapped_area = get_unmapped_area_mem,
899 .mmap_capabilities = memory_mmap_capabilities,
900 #endif
903 static const struct file_operations null_fops = {
904 .llseek = null_lseek,
905 .read = read_null,
906 .write = write_null,
907 .read_iter = read_iter_null,
908 .write_iter = write_iter_null,
909 .splice_write = splice_write_null,
912 static const struct file_operations __maybe_unused port_fops = {
913 .llseek = memory_lseek,
914 .read = read_port,
915 .write = write_port,
916 .open = open_port,
919 static const struct file_operations zero_fops = {
920 .llseek = zero_lseek,
921 .write = write_zero,
922 .read_iter = read_iter_zero,
923 .write_iter = write_iter_zero,
924 .mmap = mmap_zero,
925 .get_unmapped_area = get_unmapped_area_zero,
926 #ifndef CONFIG_MMU
927 .mmap_capabilities = zero_mmap_capabilities,
928 #endif
931 static const struct file_operations full_fops = {
932 .llseek = full_lseek,
933 .read_iter = read_iter_zero,
934 .write = write_full,
937 static const struct memdev {
938 const char *name;
939 umode_t mode;
940 const struct file_operations *fops;
941 fmode_t fmode;
942 } devlist[] = {
943 #ifdef CONFIG_DEVMEM
944 [DEVMEM_MINOR] = { "mem", 0, &mem_fops, FMODE_UNSIGNED_OFFSET },
945 #endif
946 #ifdef CONFIG_DEVKMEM
947 [2] = { "kmem", 0, &kmem_fops, FMODE_UNSIGNED_OFFSET },
948 #endif
949 [3] = { "null", 0666, &null_fops, 0 },
950 #ifdef CONFIG_DEVPORT
951 [4] = { "port", 0, &port_fops, 0 },
952 #endif
953 [5] = { "zero", 0666, &zero_fops, 0 },
954 [7] = { "full", 0666, &full_fops, 0 },
955 [8] = { "random", 0666, &random_fops, 0 },
956 [9] = { "urandom", 0666, &urandom_fops, 0 },
957 #ifdef CONFIG_PRINTK
958 [11] = { "kmsg", 0644, &kmsg_fops, 0 },
959 #endif
962 static int memory_open(struct inode *inode, struct file *filp)
964 int minor;
965 const struct memdev *dev;
967 minor = iminor(inode);
968 if (minor >= ARRAY_SIZE(devlist))
969 return -ENXIO;
971 dev = &devlist[minor];
972 if (!dev->fops)
973 return -ENXIO;
975 filp->f_op = dev->fops;
976 filp->f_mode |= dev->fmode;
978 if (dev->fops->open)
979 return dev->fops->open(inode, filp);
981 return 0;
984 static const struct file_operations memory_fops = {
985 .open = memory_open,
986 .llseek = noop_llseek,
989 static char *mem_devnode(struct device *dev, umode_t *mode)
991 if (mode && devlist[MINOR(dev->devt)].mode)
992 *mode = devlist[MINOR(dev->devt)].mode;
993 return NULL;
996 static struct class *mem_class;
998 static int devmem_fs_init_fs_context(struct fs_context *fc)
1000 return init_pseudo(fc, DEVMEM_MAGIC) ? 0 : -ENOMEM;
1003 static struct file_system_type devmem_fs_type = {
1004 .name = "devmem",
1005 .owner = THIS_MODULE,
1006 .init_fs_context = devmem_fs_init_fs_context,
1007 .kill_sb = kill_anon_super,
1010 static int devmem_init_inode(void)
1012 static struct vfsmount *devmem_vfs_mount;
1013 static int devmem_fs_cnt;
1014 struct inode *inode;
1015 int rc;
1017 rc = simple_pin_fs(&devmem_fs_type, &devmem_vfs_mount, &devmem_fs_cnt);
1018 if (rc < 0) {
1019 pr_err("Cannot mount /dev/mem pseudo filesystem: %d\n", rc);
1020 return rc;
1023 inode = alloc_anon_inode(devmem_vfs_mount->mnt_sb);
1024 if (IS_ERR(inode)) {
1025 rc = PTR_ERR(inode);
1026 pr_err("Cannot allocate inode for /dev/mem: %d\n", rc);
1027 simple_release_fs(&devmem_vfs_mount, &devmem_fs_cnt);
1028 return rc;
1031 /* publish /dev/mem initialized */
1032 WRITE_ONCE(devmem_inode, inode);
1034 return 0;
1037 static int __init chr_dev_init(void)
1039 int minor;
1041 if (register_chrdev(MEM_MAJOR, "mem", &memory_fops))
1042 printk("unable to get major %d for memory devs\n", MEM_MAJOR);
1044 mem_class = class_create(THIS_MODULE, "mem");
1045 if (IS_ERR(mem_class))
1046 return PTR_ERR(mem_class);
1048 mem_class->devnode = mem_devnode;
1049 for (minor = 1; minor < ARRAY_SIZE(devlist); minor++) {
1050 if (!devlist[minor].name)
1051 continue;
1054 * Create /dev/port?
1056 if ((minor == DEVPORT_MINOR) && !arch_has_dev_port())
1057 continue;
1058 if ((minor == DEVMEM_MINOR) && devmem_init_inode() != 0)
1059 continue;
1061 device_create(mem_class, NULL, MKDEV(MEM_MAJOR, minor),
1062 NULL, devlist[minor].name);
1065 return tty_init();
1068 fs_initcall(chr_dev_init);