1 // SPDX-License-Identifier: GPL-2.0
3 * linux/drivers/char/mem.c
5 * Copyright (C) 1991, 1992 Linus Torvalds
8 * Jan-11-1998, C. Scott Ananian <cananian@alumni.princeton.edu>
9 * Shared /dev/zero mmapping support, Feb 2000, Kanoj Sarcar <kanoj@sgi.com>
13 #include <linux/miscdevice.h>
14 #include <linux/slab.h>
15 #include <linux/vmalloc.h>
16 #include <linux/mman.h>
17 #include <linux/random.h>
18 #include <linux/init.h>
19 #include <linux/raw.h>
20 #include <linux/tty.h>
21 #include <linux/capability.h>
22 #include <linux/ptrace.h>
23 #include <linux/device.h>
24 #include <linux/highmem.h>
25 #include <linux/backing-dev.h>
26 #include <linux/shmem_fs.h>
27 #include <linux/splice.h>
28 #include <linux/pfn.h>
29 #include <linux/export.h>
31 #include <linux/uio.h>
33 #include <linux/uaccess.h>
36 # include <linux/efi.h>
39 #define DEVPORT_MINOR 4
41 static inline unsigned long size_inside_page(unsigned long start
,
46 sz
= PAGE_SIZE
- (start
& (PAGE_SIZE
- 1));
51 #ifndef ARCH_HAS_VALID_PHYS_ADDR_RANGE
52 static inline int valid_phys_addr_range(phys_addr_t addr
, size_t count
)
54 return addr
+ count
<= __pa(high_memory
);
57 static inline int valid_mmap_phys_addr_range(unsigned long pfn
, size_t size
)
63 #ifdef CONFIG_STRICT_DEVMEM
64 static inline int page_is_allowed(unsigned long pfn
)
66 return devmem_is_allowed(pfn
);
68 static inline int range_is_allowed(unsigned long pfn
, unsigned long size
)
70 u64 from
= ((u64
)pfn
) << PAGE_SHIFT
;
75 if (!devmem_is_allowed(pfn
))
83 static inline int page_is_allowed(unsigned long pfn
)
87 static inline int range_is_allowed(unsigned long pfn
, unsigned long size
)
93 #ifndef unxlate_dev_mem_ptr
94 #define unxlate_dev_mem_ptr unxlate_dev_mem_ptr
95 void __weak
unxlate_dev_mem_ptr(phys_addr_t phys
, void *addr
)
100 static inline bool should_stop_iteration(void)
104 return fatal_signal_pending(current
);
108 * This funcion reads the *physical* memory. The f_pos points directly to the
111 static ssize_t
read_mem(struct file
*file
, char __user
*buf
,
112 size_t count
, loff_t
*ppos
)
114 phys_addr_t p
= *ppos
;
123 if (!valid_phys_addr_range(p
, count
))
126 #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
127 /* we don't have page 0 mapped on sparc and m68k.. */
129 sz
= size_inside_page(p
, count
);
131 if (clear_user(buf
, sz
))
141 bounce
= kmalloc(PAGE_SIZE
, GFP_KERNEL
);
146 unsigned long remaining
;
149 sz
= size_inside_page(p
, count
);
152 allowed
= page_is_allowed(p
>> PAGE_SHIFT
);
158 /* Show zeros for restricted memory. */
159 remaining
= clear_user(buf
, sz
);
162 * On ia64 if a page has been mapped somewhere as
163 * uncached, then it must also be accessed uncached
164 * by the kernel or data corruption may occur.
166 ptr
= xlate_dev_mem_ptr(p
);
170 probe
= probe_kernel_read(bounce
, ptr
, sz
);
171 unxlate_dev_mem_ptr(p
, ptr
);
175 remaining
= copy_to_user(buf
, bounce
, sz
);
185 if (should_stop_iteration())
198 static ssize_t
write_mem(struct file
*file
, const char __user
*buf
,
199 size_t count
, loff_t
*ppos
)
201 phys_addr_t p
= *ppos
;
203 unsigned long copied
;
209 if (!valid_phys_addr_range(p
, count
))
214 #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
215 /* we don't have page 0 mapped on sparc and m68k.. */
217 sz
= size_inside_page(p
, count
);
218 /* Hmm. Do something? */
229 sz
= size_inside_page(p
, count
);
231 allowed
= page_is_allowed(p
>> PAGE_SHIFT
);
235 /* Skip actual writing when a page is marked as restricted. */
238 * On ia64 if a page has been mapped somewhere as
239 * uncached, then it must also be accessed uncached
240 * by the kernel or data corruption may occur.
242 ptr
= xlate_dev_mem_ptr(p
);
249 copied
= copy_from_user(ptr
, buf
, sz
);
250 unxlate_dev_mem_ptr(p
, ptr
);
252 written
+= sz
- copied
;
263 if (should_stop_iteration())
271 int __weak
phys_mem_access_prot_allowed(struct file
*file
,
272 unsigned long pfn
, unsigned long size
, pgprot_t
*vma_prot
)
277 #ifndef __HAVE_PHYS_MEM_ACCESS_PROT
280 * Architectures vary in how they handle caching for addresses
281 * outside of main memory.
284 #ifdef pgprot_noncached
285 static int uncached_access(struct file
*file
, phys_addr_t addr
)
287 #if defined(CONFIG_IA64)
289 * On ia64, we ignore O_DSYNC because we cannot tolerate memory
292 return !(efi_mem_attributes(addr
) & EFI_MEMORY_WB
);
293 #elif defined(CONFIG_MIPS)
295 extern int __uncached_access(struct file
*file
,
298 return __uncached_access(file
, addr
);
302 * Accessing memory above the top the kernel knows about or through a
304 * that was marked O_DSYNC will be done non-cached.
306 if (file
->f_flags
& O_DSYNC
)
308 return addr
>= __pa(high_memory
);
313 static pgprot_t
phys_mem_access_prot(struct file
*file
, unsigned long pfn
,
314 unsigned long size
, pgprot_t vma_prot
)
316 #ifdef pgprot_noncached
317 phys_addr_t offset
= pfn
<< PAGE_SHIFT
;
319 if (uncached_access(file
, offset
))
320 return pgprot_noncached(vma_prot
);
327 static unsigned long get_unmapped_area_mem(struct file
*file
,
333 if (!valid_mmap_phys_addr_range(pgoff
, len
))
334 return (unsigned long) -EINVAL
;
335 return pgoff
<< PAGE_SHIFT
;
338 /* permit direct mmap, for read, write or exec */
339 static unsigned memory_mmap_capabilities(struct file
*file
)
341 return NOMMU_MAP_DIRECT
|
342 NOMMU_MAP_READ
| NOMMU_MAP_WRITE
| NOMMU_MAP_EXEC
;
345 static unsigned zero_mmap_capabilities(struct file
*file
)
347 return NOMMU_MAP_COPY
;
350 /* can't do an in-place private mapping if there's no MMU */
351 static inline int private_mapping_ok(struct vm_area_struct
*vma
)
353 return vma
->vm_flags
& VM_MAYSHARE
;
357 static inline int private_mapping_ok(struct vm_area_struct
*vma
)
363 static const struct vm_operations_struct mmap_mem_ops
= {
364 #ifdef CONFIG_HAVE_IOREMAP_PROT
365 .access
= generic_access_phys
369 static int mmap_mem(struct file
*file
, struct vm_area_struct
*vma
)
371 size_t size
= vma
->vm_end
- vma
->vm_start
;
372 phys_addr_t offset
= (phys_addr_t
)vma
->vm_pgoff
<< PAGE_SHIFT
;
374 /* Does it even fit in phys_addr_t? */
375 if (offset
>> PAGE_SHIFT
!= vma
->vm_pgoff
)
378 /* It's illegal to wrap around the end of the physical address space. */
379 if (offset
+ (phys_addr_t
)size
- 1 < offset
)
382 if (!valid_mmap_phys_addr_range(vma
->vm_pgoff
, size
))
385 if (!private_mapping_ok(vma
))
388 if (!range_is_allowed(vma
->vm_pgoff
, size
))
391 if (!phys_mem_access_prot_allowed(file
, vma
->vm_pgoff
, size
,
395 vma
->vm_page_prot
= phys_mem_access_prot(file
, vma
->vm_pgoff
,
399 vma
->vm_ops
= &mmap_mem_ops
;
401 /* Remap-pfn-range will mark the range VM_IO */
402 if (remap_pfn_range(vma
,
406 vma
->vm_page_prot
)) {
412 static int mmap_kmem(struct file
*file
, struct vm_area_struct
*vma
)
416 /* Turn a kernel-virtual address into a physical page frame */
417 pfn
= __pa((u64
)vma
->vm_pgoff
<< PAGE_SHIFT
) >> PAGE_SHIFT
;
420 * RED-PEN: on some architectures there is more mapped memory than
421 * available in mem_map which pfn_valid checks for. Perhaps should add a
424 * RED-PEN: vmalloc is not supported right now.
430 return mmap_mem(file
, vma
);
434 * This function reads the *virtual* memory as seen by the kernel.
436 static ssize_t
read_kmem(struct file
*file
, char __user
*buf
,
437 size_t count
, loff_t
*ppos
)
439 unsigned long p
= *ppos
;
440 ssize_t low_count
, read
, sz
;
441 char *kbuf
; /* k-addr because vread() takes vmlist_lock rwlock */
445 if (p
< (unsigned long) high_memory
) {
447 if (count
> (unsigned long)high_memory
- p
)
448 low_count
= (unsigned long)high_memory
- p
;
450 #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
451 /* we don't have page 0 mapped on sparc and m68k.. */
452 if (p
< PAGE_SIZE
&& low_count
> 0) {
453 sz
= size_inside_page(p
, low_count
);
454 if (clear_user(buf
, sz
))
463 while (low_count
> 0) {
464 sz
= size_inside_page(p
, low_count
);
467 * On ia64 if a page has been mapped somewhere as
468 * uncached, then it must also be accessed uncached
469 * by the kernel or data corruption may occur
471 kbuf
= xlate_dev_kmem_ptr((void *)p
);
472 if (!virt_addr_valid(kbuf
))
475 if (copy_to_user(buf
, kbuf
, sz
))
482 if (should_stop_iteration()) {
490 kbuf
= (char *)__get_free_page(GFP_KERNEL
);
494 sz
= size_inside_page(p
, count
);
495 if (!is_vmalloc_or_module_addr((void *)p
)) {
499 sz
= vread(kbuf
, (char *)p
, sz
);
502 if (copy_to_user(buf
, kbuf
, sz
)) {
510 if (should_stop_iteration())
513 free_page((unsigned long)kbuf
);
516 return read
? read
: err
;
520 static ssize_t
do_write_kmem(unsigned long p
, const char __user
*buf
,
521 size_t count
, loff_t
*ppos
)
524 unsigned long copied
;
527 #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
528 /* we don't have page 0 mapped on sparc and m68k.. */
530 sz
= size_inside_page(p
, count
);
531 /* Hmm. Do something? */
542 sz
= size_inside_page(p
, count
);
545 * On ia64 if a page has been mapped somewhere as uncached, then
546 * it must also be accessed uncached by the kernel or data
547 * corruption may occur.
549 ptr
= xlate_dev_kmem_ptr((void *)p
);
550 if (!virt_addr_valid(ptr
))
553 copied
= copy_from_user(ptr
, buf
, sz
);
555 written
+= sz
- copied
;
564 if (should_stop_iteration())
573 * This function writes to the *virtual* memory as seen by the kernel.
575 static ssize_t
write_kmem(struct file
*file
, const char __user
*buf
,
576 size_t count
, loff_t
*ppos
)
578 unsigned long p
= *ppos
;
581 char *kbuf
; /* k-addr because vwrite() takes vmlist_lock rwlock */
584 if (p
< (unsigned long) high_memory
) {
585 unsigned long to_write
= min_t(unsigned long, count
,
586 (unsigned long)high_memory
- p
);
587 wrote
= do_write_kmem(p
, buf
, to_write
, ppos
);
588 if (wrote
!= to_write
)
596 kbuf
= (char *)__get_free_page(GFP_KERNEL
);
598 return wrote
? wrote
: -ENOMEM
;
600 unsigned long sz
= size_inside_page(p
, count
);
603 if (!is_vmalloc_or_module_addr((void *)p
)) {
607 n
= copy_from_user(kbuf
, buf
, sz
);
612 vwrite(kbuf
, (char *)p
, sz
);
617 if (should_stop_iteration())
620 free_page((unsigned long)kbuf
);
624 return virtr
+ wrote
? : err
;
627 static ssize_t
read_port(struct file
*file
, char __user
*buf
,
628 size_t count
, loff_t
*ppos
)
630 unsigned long i
= *ppos
;
631 char __user
*tmp
= buf
;
633 if (!access_ok(VERIFY_WRITE
, buf
, count
))
635 while (count
-- > 0 && i
< 65536) {
636 if (__put_user(inb(i
), tmp
) < 0)
645 static ssize_t
write_port(struct file
*file
, const char __user
*buf
,
646 size_t count
, loff_t
*ppos
)
648 unsigned long i
= *ppos
;
649 const char __user
*tmp
= buf
;
651 if (!access_ok(VERIFY_READ
, buf
, count
))
653 while (count
-- > 0 && i
< 65536) {
656 if (__get_user(c
, tmp
)) {
669 static ssize_t
read_null(struct file
*file
, char __user
*buf
,
670 size_t count
, loff_t
*ppos
)
675 static ssize_t
write_null(struct file
*file
, const char __user
*buf
,
676 size_t count
, loff_t
*ppos
)
681 static ssize_t
read_iter_null(struct kiocb
*iocb
, struct iov_iter
*to
)
686 static ssize_t
write_iter_null(struct kiocb
*iocb
, struct iov_iter
*from
)
688 size_t count
= iov_iter_count(from
);
689 iov_iter_advance(from
, count
);
693 static int pipe_to_null(struct pipe_inode_info
*info
, struct pipe_buffer
*buf
,
694 struct splice_desc
*sd
)
699 static ssize_t
splice_write_null(struct pipe_inode_info
*pipe
, struct file
*out
,
700 loff_t
*ppos
, size_t len
, unsigned int flags
)
702 return splice_from_pipe(pipe
, out
, ppos
, len
, flags
, pipe_to_null
);
705 static ssize_t
read_iter_zero(struct kiocb
*iocb
, struct iov_iter
*iter
)
709 while (iov_iter_count(iter
)) {
710 size_t chunk
= iov_iter_count(iter
), n
;
712 if (chunk
> PAGE_SIZE
)
713 chunk
= PAGE_SIZE
; /* Just for latency reasons */
714 n
= iov_iter_zero(chunk
, iter
);
715 if (!n
&& iov_iter_count(iter
))
716 return written
? written
: -EFAULT
;
718 if (signal_pending(current
))
719 return written
? written
: -ERESTARTSYS
;
725 static int mmap_zero(struct file
*file
, struct vm_area_struct
*vma
)
730 if (vma
->vm_flags
& VM_SHARED
)
731 return shmem_zero_setup(vma
);
732 vma_set_anonymous(vma
);
736 static unsigned long get_unmapped_area_zero(struct file
*file
,
737 unsigned long addr
, unsigned long len
,
738 unsigned long pgoff
, unsigned long flags
)
741 if (flags
& MAP_SHARED
) {
743 * mmap_zero() will call shmem_zero_setup() to create a file,
744 * so use shmem's get_unmapped_area in case it can be huge;
745 * and pass NULL for file as in mmap.c's get_unmapped_area(),
746 * so as not to confuse shmem with our handle on "/dev/zero".
748 return shmem_get_unmapped_area(NULL
, addr
, len
, pgoff
, flags
);
751 /* Otherwise flags & MAP_PRIVATE: with no shmem object beneath it */
752 return current
->mm
->get_unmapped_area(file
, addr
, len
, pgoff
, flags
);
758 static ssize_t
write_full(struct file
*file
, const char __user
*buf
,
759 size_t count
, loff_t
*ppos
)
765 * Special lseek() function for /dev/null and /dev/zero. Most notably, you
766 * can fopen() both devices with "a" now. This was previously impossible.
769 static loff_t
null_lseek(struct file
*file
, loff_t offset
, int orig
)
771 return file
->f_pos
= 0;
775 * The memory devices use the full 32/64 bits of the offset, and so we cannot
776 * check against negative addresses: they are ok. The return value is weird,
777 * though, in that case (0).
779 * also note that seeking relative to the "end of file" isn't supported:
780 * it has no meaning, so it returns -EINVAL.
782 static loff_t
memory_lseek(struct file
*file
, loff_t offset
, int orig
)
786 inode_lock(file_inode(file
));
789 offset
+= file
->f_pos
;
792 /* to avoid userland mistaking f_pos=-9 as -EBADF=-9 */
793 if ((unsigned long long)offset
>= -MAX_ERRNO
) {
797 file
->f_pos
= offset
;
799 force_successful_syscall_return();
804 inode_unlock(file_inode(file
));
808 static int open_port(struct inode
*inode
, struct file
*filp
)
810 return capable(CAP_SYS_RAWIO
) ? 0 : -EPERM
;
813 #define zero_lseek null_lseek
814 #define full_lseek null_lseek
815 #define write_zero write_null
816 #define write_iter_zero write_iter_null
817 #define open_mem open_port
818 #define open_kmem open_mem
820 static const struct file_operations __maybe_unused mem_fops
= {
821 .llseek
= memory_lseek
,
827 .get_unmapped_area
= get_unmapped_area_mem
,
828 .mmap_capabilities
= memory_mmap_capabilities
,
832 static const struct file_operations __maybe_unused kmem_fops
= {
833 .llseek
= memory_lseek
,
839 .get_unmapped_area
= get_unmapped_area_mem
,
840 .mmap_capabilities
= memory_mmap_capabilities
,
844 static const struct file_operations null_fops
= {
845 .llseek
= null_lseek
,
848 .read_iter
= read_iter_null
,
849 .write_iter
= write_iter_null
,
850 .splice_write
= splice_write_null
,
853 static const struct file_operations __maybe_unused port_fops
= {
854 .llseek
= memory_lseek
,
860 static const struct file_operations zero_fops
= {
861 .llseek
= zero_lseek
,
863 .read_iter
= read_iter_zero
,
864 .write_iter
= write_iter_zero
,
866 .get_unmapped_area
= get_unmapped_area_zero
,
868 .mmap_capabilities
= zero_mmap_capabilities
,
872 static const struct file_operations full_fops
= {
873 .llseek
= full_lseek
,
874 .read_iter
= read_iter_zero
,
878 static const struct memdev
{
881 const struct file_operations
*fops
;
885 [1] = { "mem", 0, &mem_fops
, FMODE_UNSIGNED_OFFSET
},
887 #ifdef CONFIG_DEVKMEM
888 [2] = { "kmem", 0, &kmem_fops
, FMODE_UNSIGNED_OFFSET
},
890 [3] = { "null", 0666, &null_fops
, 0 },
891 #ifdef CONFIG_DEVPORT
892 [4] = { "port", 0, &port_fops
, 0 },
894 [5] = { "zero", 0666, &zero_fops
, 0 },
895 [7] = { "full", 0666, &full_fops
, 0 },
896 [8] = { "random", 0666, &random_fops
, 0 },
897 [9] = { "urandom", 0666, &urandom_fops
, 0 },
899 [11] = { "kmsg", 0644, &kmsg_fops
, 0 },
903 static int memory_open(struct inode
*inode
, struct file
*filp
)
906 const struct memdev
*dev
;
908 minor
= iminor(inode
);
909 if (minor
>= ARRAY_SIZE(devlist
))
912 dev
= &devlist
[minor
];
916 filp
->f_op
= dev
->fops
;
917 filp
->f_mode
|= dev
->fmode
;
920 return dev
->fops
->open(inode
, filp
);
925 static const struct file_operations memory_fops
= {
927 .llseek
= noop_llseek
,
930 static char *mem_devnode(struct device
*dev
, umode_t
*mode
)
932 if (mode
&& devlist
[MINOR(dev
->devt
)].mode
)
933 *mode
= devlist
[MINOR(dev
->devt
)].mode
;
937 static struct class *mem_class
;
939 static int __init
chr_dev_init(void)
943 if (register_chrdev(MEM_MAJOR
, "mem", &memory_fops
))
944 printk("unable to get major %d for memory devs\n", MEM_MAJOR
);
946 mem_class
= class_create(THIS_MODULE
, "mem");
947 if (IS_ERR(mem_class
))
948 return PTR_ERR(mem_class
);
950 mem_class
->devnode
= mem_devnode
;
951 for (minor
= 1; minor
< ARRAY_SIZE(devlist
); minor
++) {
952 if (!devlist
[minor
].name
)
958 if ((minor
== DEVPORT_MINOR
) && !arch_has_dev_port())
961 device_create(mem_class
, NULL
, MKDEV(MEM_MAJOR
, minor
),
962 NULL
, devlist
[minor
].name
);
968 fs_initcall(chr_dev_init
);