2 * linux/drivers/char/mem.c
4 * Copyright (C) 1991, 1992 Linus Torvalds
7 * Jan-11-1998, C. Scott Ananian <cananian@alumni.princeton.edu>
8 * Shared /dev/zero mmapping support, Feb 2000, Kanoj Sarcar <kanoj@sgi.com>
12 #include <linux/miscdevice.h>
13 #include <linux/slab.h>
14 #include <linux/vmalloc.h>
15 #include <linux/mman.h>
16 #include <linux/random.h>
17 #include <linux/init.h>
18 #include <linux/raw.h>
19 #include <linux/tty.h>
20 #include <linux/capability.h>
21 #include <linux/ptrace.h>
22 #include <linux/device.h>
23 #include <linux/highmem.h>
24 #include <linux/backing-dev.h>
25 #include <linux/shmem_fs.h>
26 #include <linux/splice.h>
27 #include <linux/pfn.h>
28 #include <linux/export.h>
30 #include <linux/uio.h>
32 #include <linux/uaccess.h>
35 # include <linux/efi.h>
38 #define DEVPORT_MINOR 4
40 static inline unsigned long size_inside_page(unsigned long start
,
45 sz
= PAGE_SIZE
- (start
& (PAGE_SIZE
- 1));
50 #ifndef ARCH_HAS_VALID_PHYS_ADDR_RANGE
51 static inline int valid_phys_addr_range(phys_addr_t addr
, size_t count
)
53 return addr
+ count
<= __pa(high_memory
);
56 static inline int valid_mmap_phys_addr_range(unsigned long pfn
, size_t size
)
62 #ifdef CONFIG_STRICT_DEVMEM
63 static inline int range_is_allowed(unsigned long pfn
, unsigned long size
)
65 u64 from
= ((u64
)pfn
) << PAGE_SHIFT
;
70 if (!devmem_is_allowed(pfn
))
78 static inline int range_is_allowed(unsigned long pfn
, unsigned long size
)
84 #ifndef unxlate_dev_mem_ptr
85 #define unxlate_dev_mem_ptr unxlate_dev_mem_ptr
86 void __weak
unxlate_dev_mem_ptr(phys_addr_t phys
, void *addr
)
92 * This funcion reads the *physical* memory. The f_pos points directly to the
95 static ssize_t
read_mem(struct file
*file
, char __user
*buf
,
96 size_t count
, loff_t
*ppos
)
98 phys_addr_t p
= *ppos
;
105 if (!valid_phys_addr_range(p
, count
))
108 #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
109 /* we don't have page 0 mapped on sparc and m68k.. */
111 sz
= size_inside_page(p
, count
);
113 if (clear_user(buf
, sz
))
124 unsigned long remaining
;
126 sz
= size_inside_page(p
, count
);
128 if (!range_is_allowed(p
>> PAGE_SHIFT
, count
))
132 * On ia64 if a page has been mapped somewhere as uncached, then
133 * it must also be accessed uncached by the kernel or data
134 * corruption may occur.
136 ptr
= xlate_dev_mem_ptr(p
);
140 remaining
= copy_to_user(buf
, ptr
, sz
);
141 unxlate_dev_mem_ptr(p
, ptr
);
155 static ssize_t
write_mem(struct file
*file
, const char __user
*buf
,
156 size_t count
, loff_t
*ppos
)
158 phys_addr_t p
= *ppos
;
160 unsigned long copied
;
166 if (!valid_phys_addr_range(p
, count
))
171 #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
172 /* we don't have page 0 mapped on sparc and m68k.. */
174 sz
= size_inside_page(p
, count
);
175 /* Hmm. Do something? */
184 sz
= size_inside_page(p
, count
);
186 if (!range_is_allowed(p
>> PAGE_SHIFT
, sz
))
190 * On ia64 if a page has been mapped somewhere as uncached, then
191 * it must also be accessed uncached by the kernel or data
192 * corruption may occur.
194 ptr
= xlate_dev_mem_ptr(p
);
201 copied
= copy_from_user(ptr
, buf
, sz
);
202 unxlate_dev_mem_ptr(p
, ptr
);
204 written
+= sz
- copied
;
220 int __weak
phys_mem_access_prot_allowed(struct file
*file
,
221 unsigned long pfn
, unsigned long size
, pgprot_t
*vma_prot
)
226 #ifndef __HAVE_PHYS_MEM_ACCESS_PROT
229 * Architectures vary in how they handle caching for addresses
230 * outside of main memory.
233 #ifdef pgprot_noncached
234 static int uncached_access(struct file
*file
, phys_addr_t addr
)
236 #if defined(CONFIG_IA64)
238 * On ia64, we ignore O_DSYNC because we cannot tolerate memory
241 return !(efi_mem_attributes(addr
) & EFI_MEMORY_WB
);
242 #elif defined(CONFIG_MIPS)
244 extern int __uncached_access(struct file
*file
,
247 return __uncached_access(file
, addr
);
251 * Accessing memory above the top the kernel knows about or through a
253 * that was marked O_DSYNC will be done non-cached.
255 if (file
->f_flags
& O_DSYNC
)
257 return addr
>= __pa(high_memory
);
262 static pgprot_t
phys_mem_access_prot(struct file
*file
, unsigned long pfn
,
263 unsigned long size
, pgprot_t vma_prot
)
265 #ifdef pgprot_noncached
266 phys_addr_t offset
= pfn
<< PAGE_SHIFT
;
268 if (uncached_access(file
, offset
))
269 return pgprot_noncached(vma_prot
);
276 static unsigned long get_unmapped_area_mem(struct file
*file
,
282 if (!valid_mmap_phys_addr_range(pgoff
, len
))
283 return (unsigned long) -EINVAL
;
284 return pgoff
<< PAGE_SHIFT
;
287 /* permit direct mmap, for read, write or exec */
288 static unsigned memory_mmap_capabilities(struct file
*file
)
290 return NOMMU_MAP_DIRECT
|
291 NOMMU_MAP_READ
| NOMMU_MAP_WRITE
| NOMMU_MAP_EXEC
;
294 static unsigned zero_mmap_capabilities(struct file
*file
)
296 return NOMMU_MAP_COPY
;
299 /* can't do an in-place private mapping if there's no MMU */
300 static inline int private_mapping_ok(struct vm_area_struct
*vma
)
302 return vma
->vm_flags
& VM_MAYSHARE
;
306 static inline int private_mapping_ok(struct vm_area_struct
*vma
)
312 static const struct vm_operations_struct mmap_mem_ops
= {
313 #ifdef CONFIG_HAVE_IOREMAP_PROT
314 .access
= generic_access_phys
318 static int mmap_mem(struct file
*file
, struct vm_area_struct
*vma
)
320 size_t size
= vma
->vm_end
- vma
->vm_start
;
322 if (!valid_mmap_phys_addr_range(vma
->vm_pgoff
, size
))
325 if (!private_mapping_ok(vma
))
328 if (!range_is_allowed(vma
->vm_pgoff
, size
))
331 if (!phys_mem_access_prot_allowed(file
, vma
->vm_pgoff
, size
,
335 vma
->vm_page_prot
= phys_mem_access_prot(file
, vma
->vm_pgoff
,
339 vma
->vm_ops
= &mmap_mem_ops
;
341 /* Remap-pfn-range will mark the range VM_IO */
342 if (remap_pfn_range(vma
,
346 vma
->vm_page_prot
)) {
352 static int mmap_kmem(struct file
*file
, struct vm_area_struct
*vma
)
356 /* Turn a kernel-virtual address into a physical page frame */
357 pfn
= __pa((u64
)vma
->vm_pgoff
<< PAGE_SHIFT
) >> PAGE_SHIFT
;
360 * RED-PEN: on some architectures there is more mapped memory than
361 * available in mem_map which pfn_valid checks for. Perhaps should add a
364 * RED-PEN: vmalloc is not supported right now.
370 return mmap_mem(file
, vma
);
374 * This function reads the *virtual* memory as seen by the kernel.
376 static ssize_t
read_kmem(struct file
*file
, char __user
*buf
,
377 size_t count
, loff_t
*ppos
)
379 unsigned long p
= *ppos
;
380 ssize_t low_count
, read
, sz
;
381 char *kbuf
; /* k-addr because vread() takes vmlist_lock rwlock */
385 if (p
< (unsigned long) high_memory
) {
387 if (count
> (unsigned long)high_memory
- p
)
388 low_count
= (unsigned long)high_memory
- p
;
390 #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
391 /* we don't have page 0 mapped on sparc and m68k.. */
392 if (p
< PAGE_SIZE
&& low_count
> 0) {
393 sz
= size_inside_page(p
, low_count
);
394 if (clear_user(buf
, sz
))
403 while (low_count
> 0) {
404 sz
= size_inside_page(p
, low_count
);
407 * On ia64 if a page has been mapped somewhere as
408 * uncached, then it must also be accessed uncached
409 * by the kernel or data corruption may occur
411 kbuf
= xlate_dev_kmem_ptr((void *)p
);
412 if (!virt_addr_valid(kbuf
))
415 if (copy_to_user(buf
, kbuf
, sz
))
426 kbuf
= (char *)__get_free_page(GFP_KERNEL
);
430 sz
= size_inside_page(p
, count
);
431 if (!is_vmalloc_or_module_addr((void *)p
)) {
435 sz
= vread(kbuf
, (char *)p
, sz
);
438 if (copy_to_user(buf
, kbuf
, sz
)) {
447 free_page((unsigned long)kbuf
);
450 return read
? read
: err
;
454 static ssize_t
do_write_kmem(unsigned long p
, const char __user
*buf
,
455 size_t count
, loff_t
*ppos
)
458 unsigned long copied
;
461 #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
462 /* we don't have page 0 mapped on sparc and m68k.. */
464 sz
= size_inside_page(p
, count
);
465 /* Hmm. Do something? */
476 sz
= size_inside_page(p
, count
);
479 * On ia64 if a page has been mapped somewhere as uncached, then
480 * it must also be accessed uncached by the kernel or data
481 * corruption may occur.
483 ptr
= xlate_dev_kmem_ptr((void *)p
);
484 if (!virt_addr_valid(ptr
))
487 copied
= copy_from_user(ptr
, buf
, sz
);
489 written
+= sz
- copied
;
505 * This function writes to the *virtual* memory as seen by the kernel.
507 static ssize_t
write_kmem(struct file
*file
, const char __user
*buf
,
508 size_t count
, loff_t
*ppos
)
510 unsigned long p
= *ppos
;
513 char *kbuf
; /* k-addr because vwrite() takes vmlist_lock rwlock */
516 if (p
< (unsigned long) high_memory
) {
517 unsigned long to_write
= min_t(unsigned long, count
,
518 (unsigned long)high_memory
- p
);
519 wrote
= do_write_kmem(p
, buf
, to_write
, ppos
);
520 if (wrote
!= to_write
)
528 kbuf
= (char *)__get_free_page(GFP_KERNEL
);
530 return wrote
? wrote
: -ENOMEM
;
532 unsigned long sz
= size_inside_page(p
, count
);
535 if (!is_vmalloc_or_module_addr((void *)p
)) {
539 n
= copy_from_user(kbuf
, buf
, sz
);
544 vwrite(kbuf
, (char *)p
, sz
);
550 free_page((unsigned long)kbuf
);
554 return virtr
+ wrote
? : err
;
557 static ssize_t
read_port(struct file
*file
, char __user
*buf
,
558 size_t count
, loff_t
*ppos
)
560 unsigned long i
= *ppos
;
561 char __user
*tmp
= buf
;
563 if (!access_ok(VERIFY_WRITE
, buf
, count
))
565 while (count
-- > 0 && i
< 65536) {
566 if (__put_user(inb(i
), tmp
) < 0)
575 static ssize_t
write_port(struct file
*file
, const char __user
*buf
,
576 size_t count
, loff_t
*ppos
)
578 unsigned long i
= *ppos
;
579 const char __user
*tmp
= buf
;
581 if (!access_ok(VERIFY_READ
, buf
, count
))
583 while (count
-- > 0 && i
< 65536) {
586 if (__get_user(c
, tmp
)) {
599 static ssize_t
read_null(struct file
*file
, char __user
*buf
,
600 size_t count
, loff_t
*ppos
)
605 static ssize_t
write_null(struct file
*file
, const char __user
*buf
,
606 size_t count
, loff_t
*ppos
)
611 static ssize_t
read_iter_null(struct kiocb
*iocb
, struct iov_iter
*to
)
616 static ssize_t
write_iter_null(struct kiocb
*iocb
, struct iov_iter
*from
)
618 size_t count
= iov_iter_count(from
);
619 iov_iter_advance(from
, count
);
623 static int pipe_to_null(struct pipe_inode_info
*info
, struct pipe_buffer
*buf
,
624 struct splice_desc
*sd
)
629 static ssize_t
splice_write_null(struct pipe_inode_info
*pipe
, struct file
*out
,
630 loff_t
*ppos
, size_t len
, unsigned int flags
)
632 return splice_from_pipe(pipe
, out
, ppos
, len
, flags
, pipe_to_null
);
635 static ssize_t
read_iter_zero(struct kiocb
*iocb
, struct iov_iter
*iter
)
639 while (iov_iter_count(iter
)) {
640 size_t chunk
= iov_iter_count(iter
), n
;
642 if (chunk
> PAGE_SIZE
)
643 chunk
= PAGE_SIZE
; /* Just for latency reasons */
644 n
= iov_iter_zero(chunk
, iter
);
645 if (!n
&& iov_iter_count(iter
))
646 return written
? written
: -EFAULT
;
648 if (signal_pending(current
))
649 return written
? written
: -ERESTARTSYS
;
655 static int mmap_zero(struct file
*file
, struct vm_area_struct
*vma
)
660 if (vma
->vm_flags
& VM_SHARED
)
661 return shmem_zero_setup(vma
);
665 static unsigned long get_unmapped_area_zero(struct file
*file
,
666 unsigned long addr
, unsigned long len
,
667 unsigned long pgoff
, unsigned long flags
)
670 if (flags
& MAP_SHARED
) {
672 * mmap_zero() will call shmem_zero_setup() to create a file,
673 * so use shmem's get_unmapped_area in case it can be huge;
674 * and pass NULL for file as in mmap.c's get_unmapped_area(),
675 * so as not to confuse shmem with our handle on "/dev/zero".
677 return shmem_get_unmapped_area(NULL
, addr
, len
, pgoff
, flags
);
680 /* Otherwise flags & MAP_PRIVATE: with no shmem object beneath it */
681 return current
->mm
->get_unmapped_area(file
, addr
, len
, pgoff
, flags
);
687 static ssize_t
write_full(struct file
*file
, const char __user
*buf
,
688 size_t count
, loff_t
*ppos
)
694 * Special lseek() function for /dev/null and /dev/zero. Most notably, you
695 * can fopen() both devices with "a" now. This was previously impossible.
698 static loff_t
null_lseek(struct file
*file
, loff_t offset
, int orig
)
700 return file
->f_pos
= 0;
704 * The memory devices use the full 32/64 bits of the offset, and so we cannot
705 * check against negative addresses: they are ok. The return value is weird,
706 * though, in that case (0).
708 * also note that seeking relative to the "end of file" isn't supported:
709 * it has no meaning, so it returns -EINVAL.
711 static loff_t
memory_lseek(struct file
*file
, loff_t offset
, int orig
)
715 inode_lock(file_inode(file
));
718 offset
+= file
->f_pos
;
720 /* to avoid userland mistaking f_pos=-9 as -EBADF=-9 */
721 if ((unsigned long long)offset
>= -MAX_ERRNO
) {
725 file
->f_pos
= offset
;
727 force_successful_syscall_return();
732 inode_unlock(file_inode(file
));
736 static int open_port(struct inode
*inode
, struct file
*filp
)
738 return capable(CAP_SYS_RAWIO
) ? 0 : -EPERM
;
741 #define zero_lseek null_lseek
742 #define full_lseek null_lseek
743 #define write_zero write_null
744 #define write_iter_zero write_iter_null
745 #define open_mem open_port
746 #define open_kmem open_mem
748 static const struct file_operations __maybe_unused mem_fops
= {
749 .llseek
= memory_lseek
,
755 .get_unmapped_area
= get_unmapped_area_mem
,
756 .mmap_capabilities
= memory_mmap_capabilities
,
760 static const struct file_operations __maybe_unused kmem_fops
= {
761 .llseek
= memory_lseek
,
767 .get_unmapped_area
= get_unmapped_area_mem
,
768 .mmap_capabilities
= memory_mmap_capabilities
,
772 static const struct file_operations null_fops
= {
773 .llseek
= null_lseek
,
776 .read_iter
= read_iter_null
,
777 .write_iter
= write_iter_null
,
778 .splice_write
= splice_write_null
,
781 static const struct file_operations __maybe_unused port_fops
= {
782 .llseek
= memory_lseek
,
788 static const struct file_operations zero_fops
= {
789 .llseek
= zero_lseek
,
791 .read_iter
= read_iter_zero
,
792 .write_iter
= write_iter_zero
,
794 .get_unmapped_area
= get_unmapped_area_zero
,
796 .mmap_capabilities
= zero_mmap_capabilities
,
800 static const struct file_operations full_fops
= {
801 .llseek
= full_lseek
,
802 .read_iter
= read_iter_zero
,
806 static const struct memdev
{
809 const struct file_operations
*fops
;
813 [1] = { "mem", 0, &mem_fops
, FMODE_UNSIGNED_OFFSET
},
815 #ifdef CONFIG_DEVKMEM
816 [2] = { "kmem", 0, &kmem_fops
, FMODE_UNSIGNED_OFFSET
},
818 [3] = { "null", 0666, &null_fops
, 0 },
819 #ifdef CONFIG_DEVPORT
820 [4] = { "port", 0, &port_fops
, 0 },
822 [5] = { "zero", 0666, &zero_fops
, 0 },
823 [7] = { "full", 0666, &full_fops
, 0 },
824 [8] = { "random", 0666, &random_fops
, 0 },
825 [9] = { "urandom", 0666, &urandom_fops
, 0 },
827 [11] = { "kmsg", 0644, &kmsg_fops
, 0 },
831 static int memory_open(struct inode
*inode
, struct file
*filp
)
834 const struct memdev
*dev
;
836 minor
= iminor(inode
);
837 if (minor
>= ARRAY_SIZE(devlist
))
840 dev
= &devlist
[minor
];
844 filp
->f_op
= dev
->fops
;
845 filp
->f_mode
|= dev
->fmode
;
848 return dev
->fops
->open(inode
, filp
);
853 static const struct file_operations memory_fops
= {
855 .llseek
= noop_llseek
,
858 static char *mem_devnode(struct device
*dev
, umode_t
*mode
)
860 if (mode
&& devlist
[MINOR(dev
->devt
)].mode
)
861 *mode
= devlist
[MINOR(dev
->devt
)].mode
;
865 static struct class *mem_class
;
867 static int __init
chr_dev_init(void)
871 if (register_chrdev(MEM_MAJOR
, "mem", &memory_fops
))
872 printk("unable to get major %d for memory devs\n", MEM_MAJOR
);
874 mem_class
= class_create(THIS_MODULE
, "mem");
875 if (IS_ERR(mem_class
))
876 return PTR_ERR(mem_class
);
878 mem_class
->devnode
= mem_devnode
;
879 for (minor
= 1; minor
< ARRAY_SIZE(devlist
); minor
++) {
880 if (!devlist
[minor
].name
)
886 if ((minor
== DEVPORT_MINOR
) && !arch_has_dev_port())
889 device_create(mem_class
, NULL
, MKDEV(MEM_MAJOR
, minor
),
890 NULL
, devlist
[minor
].name
);
896 fs_initcall(chr_dev_init
);