2 * linux/drivers/char/mem.c
4 * Copyright (C) 1991, 1992 Linus Torvalds
7 * Jan-11-1998, C. Scott Ananian <cananian@alumni.princeton.edu>
8 * Shared /dev/zero mmapping support, Feb 2000, Kanoj Sarcar <kanoj@sgi.com>
12 #include <linux/miscdevice.h>
13 #include <linux/slab.h>
14 #include <linux/vmalloc.h>
15 #include <linux/mman.h>
16 #include <linux/random.h>
17 #include <linux/init.h>
18 #include <linux/raw.h>
19 #include <linux/tty.h>
20 #include <linux/capability.h>
21 #include <linux/ptrace.h>
22 #include <linux/device.h>
23 #include <linux/highmem.h>
24 #include <linux/backing-dev.h>
25 #include <linux/shmem_fs.h>
26 #include <linux/splice.h>
27 #include <linux/pfn.h>
28 #include <linux/export.h>
30 #include <linux/uio.h>
32 #include <linux/uaccess.h>
35 # include <linux/efi.h>
38 #define DEVPORT_MINOR 4
40 static inline unsigned long size_inside_page(unsigned long start
,
45 sz
= PAGE_SIZE
- (start
& (PAGE_SIZE
- 1));
50 #ifndef ARCH_HAS_VALID_PHYS_ADDR_RANGE
51 static inline int valid_phys_addr_range(phys_addr_t addr
, size_t count
)
53 return addr
+ count
<= __pa(high_memory
);
56 static inline int valid_mmap_phys_addr_range(unsigned long pfn
, size_t size
)
62 #ifdef CONFIG_STRICT_DEVMEM
63 static inline int page_is_allowed(unsigned long pfn
)
65 return devmem_is_allowed(pfn
);
67 static inline int range_is_allowed(unsigned long pfn
, unsigned long size
)
69 u64 from
= ((u64
)pfn
) << PAGE_SHIFT
;
74 if (!devmem_is_allowed(pfn
))
82 static inline int page_is_allowed(unsigned long pfn
)
86 static inline int range_is_allowed(unsigned long pfn
, unsigned long size
)
92 #ifndef unxlate_dev_mem_ptr
93 #define unxlate_dev_mem_ptr unxlate_dev_mem_ptr
94 void __weak
unxlate_dev_mem_ptr(phys_addr_t phys
, void *addr
)
100 * This funcion reads the *physical* memory. The f_pos points directly to the
103 static ssize_t
read_mem(struct file
*file
, char __user
*buf
,
104 size_t count
, loff_t
*ppos
)
106 phys_addr_t p
= *ppos
;
113 if (!valid_phys_addr_range(p
, count
))
116 #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
117 /* we don't have page 0 mapped on sparc and m68k.. */
119 sz
= size_inside_page(p
, count
);
121 if (clear_user(buf
, sz
))
132 unsigned long remaining
;
135 sz
= size_inside_page(p
, count
);
137 allowed
= page_is_allowed(p
>> PAGE_SHIFT
);
141 /* Show zeros for restricted memory. */
142 remaining
= clear_user(buf
, sz
);
145 * On ia64 if a page has been mapped somewhere as
146 * uncached, then it must also be accessed uncached
147 * by the kernel or data corruption may occur.
149 ptr
= xlate_dev_mem_ptr(p
);
153 remaining
= copy_to_user(buf
, ptr
, sz
);
155 unxlate_dev_mem_ptr(p
, ptr
);
171 static ssize_t
write_mem(struct file
*file
, const char __user
*buf
,
172 size_t count
, loff_t
*ppos
)
174 phys_addr_t p
= *ppos
;
176 unsigned long copied
;
182 if (!valid_phys_addr_range(p
, count
))
187 #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
188 /* we don't have page 0 mapped on sparc and m68k.. */
190 sz
= size_inside_page(p
, count
);
191 /* Hmm. Do something? */
202 sz
= size_inside_page(p
, count
);
204 allowed
= page_is_allowed(p
>> PAGE_SHIFT
);
208 /* Skip actual writing when a page is marked as restricted. */
211 * On ia64 if a page has been mapped somewhere as
212 * uncached, then it must also be accessed uncached
213 * by the kernel or data corruption may occur.
215 ptr
= xlate_dev_mem_ptr(p
);
222 copied
= copy_from_user(ptr
, buf
, sz
);
223 unxlate_dev_mem_ptr(p
, ptr
);
225 written
+= sz
- copied
;
242 int __weak
phys_mem_access_prot_allowed(struct file
*file
,
243 unsigned long pfn
, unsigned long size
, pgprot_t
*vma_prot
)
248 #ifndef __HAVE_PHYS_MEM_ACCESS_PROT
251 * Architectures vary in how they handle caching for addresses
252 * outside of main memory.
255 #ifdef pgprot_noncached
256 static int uncached_access(struct file
*file
, phys_addr_t addr
)
258 #if defined(CONFIG_IA64)
260 * On ia64, we ignore O_DSYNC because we cannot tolerate memory
263 return !(efi_mem_attributes(addr
) & EFI_MEMORY_WB
);
264 #elif defined(CONFIG_MIPS)
266 extern int __uncached_access(struct file
*file
,
269 return __uncached_access(file
, addr
);
273 * Accessing memory above the top the kernel knows about or through a
275 * that was marked O_DSYNC will be done non-cached.
277 if (file
->f_flags
& O_DSYNC
)
279 return addr
>= __pa(high_memory
);
284 static pgprot_t
phys_mem_access_prot(struct file
*file
, unsigned long pfn
,
285 unsigned long size
, pgprot_t vma_prot
)
287 #ifdef pgprot_noncached
288 phys_addr_t offset
= pfn
<< PAGE_SHIFT
;
290 if (uncached_access(file
, offset
))
291 return pgprot_noncached(vma_prot
);
298 static unsigned long get_unmapped_area_mem(struct file
*file
,
304 if (!valid_mmap_phys_addr_range(pgoff
, len
))
305 return (unsigned long) -EINVAL
;
306 return pgoff
<< PAGE_SHIFT
;
309 /* permit direct mmap, for read, write or exec */
310 static unsigned memory_mmap_capabilities(struct file
*file
)
312 return NOMMU_MAP_DIRECT
|
313 NOMMU_MAP_READ
| NOMMU_MAP_WRITE
| NOMMU_MAP_EXEC
;
316 static unsigned zero_mmap_capabilities(struct file
*file
)
318 return NOMMU_MAP_COPY
;
321 /* can't do an in-place private mapping if there's no MMU */
322 static inline int private_mapping_ok(struct vm_area_struct
*vma
)
324 return vma
->vm_flags
& VM_MAYSHARE
;
328 static inline int private_mapping_ok(struct vm_area_struct
*vma
)
334 static const struct vm_operations_struct mmap_mem_ops
= {
335 #ifdef CONFIG_HAVE_IOREMAP_PROT
336 .access
= generic_access_phys
340 static int mmap_mem(struct file
*file
, struct vm_area_struct
*vma
)
342 size_t size
= vma
->vm_end
- vma
->vm_start
;
343 phys_addr_t offset
= (phys_addr_t
)vma
->vm_pgoff
<< PAGE_SHIFT
;
345 /* It's illegal to wrap around the end of the physical address space. */
346 if (offset
+ (phys_addr_t
)size
- 1 < offset
)
349 if (!valid_mmap_phys_addr_range(vma
->vm_pgoff
, size
))
352 if (!private_mapping_ok(vma
))
355 if (!range_is_allowed(vma
->vm_pgoff
, size
))
358 if (!phys_mem_access_prot_allowed(file
, vma
->vm_pgoff
, size
,
362 vma
->vm_page_prot
= phys_mem_access_prot(file
, vma
->vm_pgoff
,
366 vma
->vm_ops
= &mmap_mem_ops
;
368 /* Remap-pfn-range will mark the range VM_IO */
369 if (remap_pfn_range(vma
,
373 vma
->vm_page_prot
)) {
379 static int mmap_kmem(struct file
*file
, struct vm_area_struct
*vma
)
383 /* Turn a kernel-virtual address into a physical page frame */
384 pfn
= __pa((u64
)vma
->vm_pgoff
<< PAGE_SHIFT
) >> PAGE_SHIFT
;
387 * RED-PEN: on some architectures there is more mapped memory than
388 * available in mem_map which pfn_valid checks for. Perhaps should add a
391 * RED-PEN: vmalloc is not supported right now.
397 return mmap_mem(file
, vma
);
401 * This function reads the *virtual* memory as seen by the kernel.
403 static ssize_t
read_kmem(struct file
*file
, char __user
*buf
,
404 size_t count
, loff_t
*ppos
)
406 unsigned long p
= *ppos
;
407 ssize_t low_count
, read
, sz
;
408 char *kbuf
; /* k-addr because vread() takes vmlist_lock rwlock */
412 if (p
< (unsigned long) high_memory
) {
414 if (count
> (unsigned long)high_memory
- p
)
415 low_count
= (unsigned long)high_memory
- p
;
417 #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
418 /* we don't have page 0 mapped on sparc and m68k.. */
419 if (p
< PAGE_SIZE
&& low_count
> 0) {
420 sz
= size_inside_page(p
, low_count
);
421 if (clear_user(buf
, sz
))
430 while (low_count
> 0) {
431 sz
= size_inside_page(p
, low_count
);
434 * On ia64 if a page has been mapped somewhere as
435 * uncached, then it must also be accessed uncached
436 * by the kernel or data corruption may occur
438 kbuf
= xlate_dev_kmem_ptr((void *)p
);
439 if (!virt_addr_valid(kbuf
))
442 if (copy_to_user(buf
, kbuf
, sz
))
453 kbuf
= (char *)__get_free_page(GFP_KERNEL
);
457 sz
= size_inside_page(p
, count
);
458 if (!is_vmalloc_or_module_addr((void *)p
)) {
462 sz
= vread(kbuf
, (char *)p
, sz
);
465 if (copy_to_user(buf
, kbuf
, sz
)) {
474 free_page((unsigned long)kbuf
);
477 return read
? read
: err
;
481 static ssize_t
do_write_kmem(unsigned long p
, const char __user
*buf
,
482 size_t count
, loff_t
*ppos
)
485 unsigned long copied
;
488 #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
489 /* we don't have page 0 mapped on sparc and m68k.. */
491 sz
= size_inside_page(p
, count
);
492 /* Hmm. Do something? */
503 sz
= size_inside_page(p
, count
);
506 * On ia64 if a page has been mapped somewhere as uncached, then
507 * it must also be accessed uncached by the kernel or data
508 * corruption may occur.
510 ptr
= xlate_dev_kmem_ptr((void *)p
);
511 if (!virt_addr_valid(ptr
))
514 copied
= copy_from_user(ptr
, buf
, sz
);
516 written
+= sz
- copied
;
532 * This function writes to the *virtual* memory as seen by the kernel.
534 static ssize_t
write_kmem(struct file
*file
, const char __user
*buf
,
535 size_t count
, loff_t
*ppos
)
537 unsigned long p
= *ppos
;
540 char *kbuf
; /* k-addr because vwrite() takes vmlist_lock rwlock */
543 if (p
< (unsigned long) high_memory
) {
544 unsigned long to_write
= min_t(unsigned long, count
,
545 (unsigned long)high_memory
- p
);
546 wrote
= do_write_kmem(p
, buf
, to_write
, ppos
);
547 if (wrote
!= to_write
)
555 kbuf
= (char *)__get_free_page(GFP_KERNEL
);
557 return wrote
? wrote
: -ENOMEM
;
559 unsigned long sz
= size_inside_page(p
, count
);
562 if (!is_vmalloc_or_module_addr((void *)p
)) {
566 n
= copy_from_user(kbuf
, buf
, sz
);
571 vwrite(kbuf
, (char *)p
, sz
);
577 free_page((unsigned long)kbuf
);
581 return virtr
+ wrote
? : err
;
584 static ssize_t
read_port(struct file
*file
, char __user
*buf
,
585 size_t count
, loff_t
*ppos
)
587 unsigned long i
= *ppos
;
588 char __user
*tmp
= buf
;
590 if (!access_ok(VERIFY_WRITE
, buf
, count
))
592 while (count
-- > 0 && i
< 65536) {
593 if (__put_user(inb(i
), tmp
) < 0)
602 static ssize_t
write_port(struct file
*file
, const char __user
*buf
,
603 size_t count
, loff_t
*ppos
)
605 unsigned long i
= *ppos
;
606 const char __user
*tmp
= buf
;
608 if (!access_ok(VERIFY_READ
, buf
, count
))
610 while (count
-- > 0 && i
< 65536) {
613 if (__get_user(c
, tmp
)) {
626 static ssize_t
read_null(struct file
*file
, char __user
*buf
,
627 size_t count
, loff_t
*ppos
)
632 static ssize_t
write_null(struct file
*file
, const char __user
*buf
,
633 size_t count
, loff_t
*ppos
)
638 static ssize_t
read_iter_null(struct kiocb
*iocb
, struct iov_iter
*to
)
643 static ssize_t
write_iter_null(struct kiocb
*iocb
, struct iov_iter
*from
)
645 size_t count
= iov_iter_count(from
);
646 iov_iter_advance(from
, count
);
650 static int pipe_to_null(struct pipe_inode_info
*info
, struct pipe_buffer
*buf
,
651 struct splice_desc
*sd
)
656 static ssize_t
splice_write_null(struct pipe_inode_info
*pipe
, struct file
*out
,
657 loff_t
*ppos
, size_t len
, unsigned int flags
)
659 return splice_from_pipe(pipe
, out
, ppos
, len
, flags
, pipe_to_null
);
662 static ssize_t
read_iter_zero(struct kiocb
*iocb
, struct iov_iter
*iter
)
666 while (iov_iter_count(iter
)) {
667 size_t chunk
= iov_iter_count(iter
), n
;
669 if (chunk
> PAGE_SIZE
)
670 chunk
= PAGE_SIZE
; /* Just for latency reasons */
671 n
= iov_iter_zero(chunk
, iter
);
672 if (!n
&& iov_iter_count(iter
))
673 return written
? written
: -EFAULT
;
675 if (signal_pending(current
))
676 return written
? written
: -ERESTARTSYS
;
682 static int mmap_zero(struct file
*file
, struct vm_area_struct
*vma
)
687 if (vma
->vm_flags
& VM_SHARED
)
688 return shmem_zero_setup(vma
);
692 static unsigned long get_unmapped_area_zero(struct file
*file
,
693 unsigned long addr
, unsigned long len
,
694 unsigned long pgoff
, unsigned long flags
)
697 if (flags
& MAP_SHARED
) {
699 * mmap_zero() will call shmem_zero_setup() to create a file,
700 * so use shmem's get_unmapped_area in case it can be huge;
701 * and pass NULL for file as in mmap.c's get_unmapped_area(),
702 * so as not to confuse shmem with our handle on "/dev/zero".
704 return shmem_get_unmapped_area(NULL
, addr
, len
, pgoff
, flags
);
707 /* Otherwise flags & MAP_PRIVATE: with no shmem object beneath it */
708 return current
->mm
->get_unmapped_area(file
, addr
, len
, pgoff
, flags
);
714 static ssize_t
write_full(struct file
*file
, const char __user
*buf
,
715 size_t count
, loff_t
*ppos
)
721 * Special lseek() function for /dev/null and /dev/zero. Most notably, you
722 * can fopen() both devices with "a" now. This was previously impossible.
725 static loff_t
null_lseek(struct file
*file
, loff_t offset
, int orig
)
727 return file
->f_pos
= 0;
731 * The memory devices use the full 32/64 bits of the offset, and so we cannot
732 * check against negative addresses: they are ok. The return value is weird,
733 * though, in that case (0).
735 * also note that seeking relative to the "end of file" isn't supported:
736 * it has no meaning, so it returns -EINVAL.
738 static loff_t
memory_lseek(struct file
*file
, loff_t offset
, int orig
)
742 inode_lock(file_inode(file
));
745 offset
+= file
->f_pos
;
747 /* to avoid userland mistaking f_pos=-9 as -EBADF=-9 */
748 if ((unsigned long long)offset
>= -MAX_ERRNO
) {
752 file
->f_pos
= offset
;
754 force_successful_syscall_return();
759 inode_unlock(file_inode(file
));
763 static int open_port(struct inode
*inode
, struct file
*filp
)
765 return capable(CAP_SYS_RAWIO
) ? 0 : -EPERM
;
768 #define zero_lseek null_lseek
769 #define full_lseek null_lseek
770 #define write_zero write_null
771 #define write_iter_zero write_iter_null
772 #define open_mem open_port
773 #define open_kmem open_mem
775 static const struct file_operations __maybe_unused mem_fops
= {
776 .llseek
= memory_lseek
,
782 .get_unmapped_area
= get_unmapped_area_mem
,
783 .mmap_capabilities
= memory_mmap_capabilities
,
787 static const struct file_operations __maybe_unused kmem_fops
= {
788 .llseek
= memory_lseek
,
794 .get_unmapped_area
= get_unmapped_area_mem
,
795 .mmap_capabilities
= memory_mmap_capabilities
,
799 static const struct file_operations null_fops
= {
800 .llseek
= null_lseek
,
803 .read_iter
= read_iter_null
,
804 .write_iter
= write_iter_null
,
805 .splice_write
= splice_write_null
,
808 static const struct file_operations __maybe_unused port_fops
= {
809 .llseek
= memory_lseek
,
815 static const struct file_operations zero_fops
= {
816 .llseek
= zero_lseek
,
818 .read_iter
= read_iter_zero
,
819 .write_iter
= write_iter_zero
,
821 .get_unmapped_area
= get_unmapped_area_zero
,
823 .mmap_capabilities
= zero_mmap_capabilities
,
827 static const struct file_operations full_fops
= {
828 .llseek
= full_lseek
,
829 .read_iter
= read_iter_zero
,
833 static const struct memdev
{
836 const struct file_operations
*fops
;
840 [1] = { "mem", 0, &mem_fops
, FMODE_UNSIGNED_OFFSET
},
842 #ifdef CONFIG_DEVKMEM
843 [2] = { "kmem", 0, &kmem_fops
, FMODE_UNSIGNED_OFFSET
},
845 [3] = { "null", 0666, &null_fops
, 0 },
846 #ifdef CONFIG_DEVPORT
847 [4] = { "port", 0, &port_fops
, 0 },
849 [5] = { "zero", 0666, &zero_fops
, 0 },
850 [7] = { "full", 0666, &full_fops
, 0 },
851 [8] = { "random", 0666, &random_fops
, 0 },
852 [9] = { "urandom", 0666, &urandom_fops
, 0 },
854 [11] = { "kmsg", 0644, &kmsg_fops
, 0 },
858 static int memory_open(struct inode
*inode
, struct file
*filp
)
861 const struct memdev
*dev
;
863 minor
= iminor(inode
);
864 if (minor
>= ARRAY_SIZE(devlist
))
867 dev
= &devlist
[minor
];
871 filp
->f_op
= dev
->fops
;
872 filp
->f_mode
|= dev
->fmode
;
875 return dev
->fops
->open(inode
, filp
);
880 static const struct file_operations memory_fops
= {
882 .llseek
= noop_llseek
,
885 static char *mem_devnode(struct device
*dev
, umode_t
*mode
)
887 if (mode
&& devlist
[MINOR(dev
->devt
)].mode
)
888 *mode
= devlist
[MINOR(dev
->devt
)].mode
;
892 static struct class *mem_class
;
894 static int __init
chr_dev_init(void)
898 if (register_chrdev(MEM_MAJOR
, "mem", &memory_fops
))
899 printk("unable to get major %d for memory devs\n", MEM_MAJOR
);
901 mem_class
= class_create(THIS_MODULE
, "mem");
902 if (IS_ERR(mem_class
))
903 return PTR_ERR(mem_class
);
905 mem_class
->devnode
= mem_devnode
;
906 for (minor
= 1; minor
< ARRAY_SIZE(devlist
); minor
++) {
907 if (!devlist
[minor
].name
)
913 if ((minor
== DEVPORT_MINOR
) && !arch_has_dev_port())
916 device_create(mem_class
, NULL
, MKDEV(MEM_MAJOR
, minor
),
917 NULL
, devlist
[minor
].name
);
923 fs_initcall(chr_dev_init
);