2 * linux/drivers/char/mem.c
4 * Copyright (C) 1991, 1992 Linus Torvalds
7 * Jan-11-1998, C. Scott Ananian <cananian@alumni.princeton.edu>
8 * Shared /dev/zero mmaping support, Feb 2000, Kanoj Sarcar <kanoj@sgi.com>
11 #include <linux/config.h>
13 #include <linux/miscdevice.h>
14 #include <linux/slab.h>
15 #include <linux/vmalloc.h>
16 #include <linux/mman.h>
17 #include <linux/random.h>
18 #include <linux/init.h>
19 #include <linux/raw.h>
20 #include <linux/tty.h>
21 #include <linux/capability.h>
22 #include <linux/smp_lock.h>
23 #include <linux/devfs_fs_kernel.h>
24 #include <linux/ptrace.h>
25 #include <linux/device.h>
26 #include <linux/backing-dev.h>
28 #include <asm/uaccess.h>
32 # include <linux/efi.h>
35 #if defined(CONFIG_S390_TAPE) && defined(CONFIG_S390_TAPE_CHAR)
36 extern void tapechar_init(void);
40 * Architectures vary in how they handle caching for addresses
41 * outside of main memory.
44 static inline int uncached_access(struct file
*file
, unsigned long addr
)
48 * On the PPro and successors, the MTRRs are used to set
49 * memory types for physical addresses outside main memory,
50 * so blindly setting PCD or PWT on those pages is wrong.
51 * For Pentiums and earlier, the surround logic should disable
52 * caching for the high addresses through the KEN pin, but
53 * we maintain the tradition of paranoia in this code.
55 if (file
->f_flags
& O_SYNC
)
57 return !( test_bit(X86_FEATURE_MTRR
, boot_cpu_data
.x86_capability
) ||
58 test_bit(X86_FEATURE_K6_MTRR
, boot_cpu_data
.x86_capability
) ||
59 test_bit(X86_FEATURE_CYRIX_ARR
, boot_cpu_data
.x86_capability
) ||
60 test_bit(X86_FEATURE_CENTAUR_MCR
, boot_cpu_data
.x86_capability
) )
61 && addr
>= __pa(high_memory
);
62 #elif defined(__x86_64__)
64 * This is broken because it can generate memory type aliases,
65 * which can cause cache corruptions
66 * But it is only available for root and we have to be bug-to-bug
67 * compatible with i386.
69 if (file
->f_flags
& O_SYNC
)
71 /* same behaviour as i386. PAT always set to cached and MTRRs control the
73 Hopefully a full PAT implementation will fix that soon. */
75 #elif defined(CONFIG_IA64)
77 * On ia64, we ignore O_SYNC because we cannot tolerate memory attribute aliases.
79 return !(efi_mem_attributes(addr
) & EFI_MEMORY_WB
);
82 * Accessing memory above the top the kernel knows about or through a file pointer
83 * that was marked O_SYNC will be done non-cached.
85 if (file
->f_flags
& O_SYNC
)
87 return addr
>= __pa(high_memory
);
91 #ifndef ARCH_HAS_VALID_PHYS_ADDR_RANGE
92 static inline int valid_phys_addr_range(unsigned long addr
, size_t *count
)
94 unsigned long end_mem
;
96 end_mem
= __pa(high_memory
);
100 if (*count
> end_mem
- addr
)
101 *count
= end_mem
- addr
;
108 * This funcion reads the *physical* memory. The f_pos points directly to the
111 static ssize_t
read_mem(struct file
* file
, char __user
* buf
,
112 size_t count
, loff_t
*ppos
)
114 unsigned long p
= *ppos
;
118 if (!valid_phys_addr_range(p
, &count
))
121 #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
122 /* we don't have page 0 mapped on sparc and m68k.. */
128 if (clear_user(buf
, sz
))
140 * Handle first page in case it's not aligned
142 if (-p
& (PAGE_SIZE
- 1))
143 sz
= -p
& (PAGE_SIZE
- 1);
147 sz
= min_t(unsigned long, sz
, count
);
150 * On ia64 if a page has been mapped somewhere as
151 * uncached, then it must also be accessed uncached
152 * by the kernel or data corruption may occur
154 ptr
= xlate_dev_mem_ptr(p
);
156 if (copy_to_user(buf
, ptr
, sz
))
168 static ssize_t
write_mem(struct file
* file
, const char __user
* buf
,
169 size_t count
, loff_t
*ppos
)
171 unsigned long p
= *ppos
;
173 unsigned long copied
;
176 if (!valid_phys_addr_range(p
, &count
))
181 #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
182 /* we don't have page 0 mapped on sparc and m68k.. */
184 unsigned long sz
= PAGE_SIZE
- p
;
187 /* Hmm. Do something? */
197 * Handle first page in case it's not aligned
199 if (-p
& (PAGE_SIZE
- 1))
200 sz
= -p
& (PAGE_SIZE
- 1);
204 sz
= min_t(unsigned long, sz
, count
);
207 * On ia64 if a page has been mapped somewhere as
208 * uncached, then it must also be accessed uncached
209 * by the kernel or data corruption may occur
211 ptr
= xlate_dev_mem_ptr(p
);
213 copied
= copy_from_user(ptr
, buf
, sz
);
217 ret
= written
+ (sz
- copied
);
232 static int mmap_mem(struct file
* file
, struct vm_area_struct
* vma
)
234 #if defined(__HAVE_PHYS_MEM_ACCESS_PROT)
235 unsigned long offset
= vma
->vm_pgoff
<< PAGE_SHIFT
;
237 vma
->vm_page_prot
= phys_mem_access_prot(file
, offset
,
238 vma
->vm_end
- vma
->vm_start
,
240 #elif defined(pgprot_noncached)
241 unsigned long offset
= vma
->vm_pgoff
<< PAGE_SHIFT
;
244 uncached
= uncached_access(file
, offset
);
246 vma
->vm_page_prot
= pgprot_noncached(vma
->vm_page_prot
);
249 /* Remap-pfn-range will mark the range VM_IO and VM_RESERVED */
250 if (remap_pfn_range(vma
,
253 vma
->vm_end
-vma
->vm_start
,
259 static int mmap_kmem(struct file
* file
, struct vm_area_struct
* vma
)
261 unsigned long long val
;
263 * RED-PEN: on some architectures there is more mapped memory
264 * than available in mem_map which pfn_valid checks
265 * for. Perhaps should add a new macro here.
267 * RED-PEN: vmalloc is not supported right now.
269 if (!pfn_valid(vma
->vm_pgoff
))
271 val
= (u64
)vma
->vm_pgoff
<< PAGE_SHIFT
;
272 vma
->vm_pgoff
= __pa(val
) >> PAGE_SHIFT
;
273 return mmap_mem(file
, vma
);
276 extern long vread(char *buf
, char *addr
, unsigned long count
);
277 extern long vwrite(char *buf
, char *addr
, unsigned long count
);
280 * This function reads the *virtual* memory as seen by the kernel.
282 static ssize_t
read_kmem(struct file
*file
, char __user
*buf
,
283 size_t count
, loff_t
*ppos
)
285 unsigned long p
= *ppos
;
286 ssize_t low_count
, read
, sz
;
287 char * kbuf
; /* k-addr because vread() takes vmlist_lock rwlock */
290 if (p
< (unsigned long) high_memory
) {
292 if (count
> (unsigned long) high_memory
- p
)
293 low_count
= (unsigned long) high_memory
- p
;
295 #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
296 /* we don't have page 0 mapped on sparc and m68k.. */
297 if (p
< PAGE_SIZE
&& low_count
> 0) {
298 size_t tmp
= PAGE_SIZE
- p
;
299 if (tmp
> low_count
) tmp
= low_count
;
300 if (clear_user(buf
, tmp
))
309 while (low_count
> 0) {
311 * Handle first page in case it's not aligned
313 if (-p
& (PAGE_SIZE
- 1))
314 sz
= -p
& (PAGE_SIZE
- 1);
318 sz
= min_t(unsigned long, sz
, low_count
);
321 * On ia64 if a page has been mapped somewhere as
322 * uncached, then it must also be accessed uncached
323 * by the kernel or data corruption may occur
325 kbuf
= xlate_dev_kmem_ptr((char *)p
);
327 if (copy_to_user(buf
, kbuf
, sz
))
338 kbuf
= (char *)__get_free_page(GFP_KERNEL
);
346 len
= vread(kbuf
, (char *)p
, len
);
349 if (copy_to_user(buf
, kbuf
, len
)) {
350 free_page((unsigned long)kbuf
);
358 free_page((unsigned long)kbuf
);
365 static inline ssize_t
366 do_write_kmem(void *p
, unsigned long realp
, const char __user
* buf
,
367 size_t count
, loff_t
*ppos
)
370 unsigned long copied
;
373 #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
374 /* we don't have page 0 mapped on sparc and m68k.. */
375 if (realp
< PAGE_SIZE
) {
376 unsigned long sz
= PAGE_SIZE
- realp
;
379 /* Hmm. Do something? */
391 * Handle first page in case it's not aligned
393 if (-realp
& (PAGE_SIZE
- 1))
394 sz
= -realp
& (PAGE_SIZE
- 1);
398 sz
= min_t(unsigned long, sz
, count
);
401 * On ia64 if a page has been mapped somewhere as
402 * uncached, then it must also be accessed uncached
403 * by the kernel or data corruption may occur
405 ptr
= xlate_dev_kmem_ptr(p
);
407 copied
= copy_from_user(ptr
, buf
, sz
);
411 ret
= written
+ (sz
- copied
);
429 * This function writes to the *virtual* memory as seen by the kernel.
431 static ssize_t
write_kmem(struct file
* file
, const char __user
* buf
,
432 size_t count
, loff_t
*ppos
)
434 unsigned long p
= *ppos
;
438 char * kbuf
; /* k-addr because vwrite() takes vmlist_lock rwlock */
440 if (p
< (unsigned long) high_memory
) {
443 if (count
> (unsigned long) high_memory
- p
)
444 wrote
= (unsigned long) high_memory
- p
;
446 written
= do_write_kmem((void*)p
, p
, buf
, wrote
, ppos
);
447 if (written
!= wrote
)
456 kbuf
= (char *)__get_free_page(GFP_KERNEL
);
458 return wrote
? wrote
: -ENOMEM
;
465 written
= copy_from_user(kbuf
, buf
, len
);
469 free_page((unsigned long)kbuf
);
470 ret
= wrote
+ virtr
+ (len
- written
);
471 return ret
? ret
: -EFAULT
;
474 len
= vwrite(kbuf
, (char *)p
, len
);
480 free_page((unsigned long)kbuf
);
484 return virtr
+ wrote
;
487 #if defined(CONFIG_ISA) || !defined(__mc68000__)
488 static ssize_t
read_port(struct file
* file
, char __user
* buf
,
489 size_t count
, loff_t
*ppos
)
491 unsigned long i
= *ppos
;
492 char __user
*tmp
= buf
;
494 if (!access_ok(VERIFY_WRITE
, buf
, count
))
496 while (count
-- > 0 && i
< 65536) {
497 if (__put_user(inb(i
),tmp
) < 0)
506 static ssize_t
write_port(struct file
* file
, const char __user
* buf
,
507 size_t count
, loff_t
*ppos
)
509 unsigned long i
= *ppos
;
510 const char __user
* tmp
= buf
;
512 if (!access_ok(VERIFY_READ
,buf
,count
))
514 while (count
-- > 0 && i
< 65536) {
516 if (__get_user(c
, tmp
))
527 static ssize_t
read_null(struct file
* file
, char __user
* buf
,
528 size_t count
, loff_t
*ppos
)
533 static ssize_t
write_null(struct file
* file
, const char __user
* buf
,
534 size_t count
, loff_t
*ppos
)
541 * For fun, we are using the MMU for this.
543 static inline size_t read_zero_pagealigned(char __user
* buf
, size_t size
)
545 struct mm_struct
*mm
;
546 struct vm_area_struct
* vma
;
547 unsigned long addr
=(unsigned long)buf
;
550 /* Oops, this was forgotten before. -ben */
551 down_read(&mm
->mmap_sem
);
553 /* For private mappings, just map in zero pages. */
554 for (vma
= find_vma(mm
, addr
); vma
; vma
= vma
->vm_next
) {
557 if (vma
->vm_start
> addr
|| (vma
->vm_flags
& VM_WRITE
) == 0)
559 if (vma
->vm_flags
& (VM_SHARED
| VM_HUGETLB
))
561 count
= vma
->vm_end
- addr
;
565 zap_page_range(vma
, addr
, count
, NULL
);
566 zeromap_page_range(vma
, addr
, count
, PAGE_COPY
);
575 up_read(&mm
->mmap_sem
);
577 /* The shared case is hard. Let's do the conventional zeroing. */
579 unsigned long unwritten
= clear_user(buf
, PAGE_SIZE
);
581 return size
+ unwritten
- PAGE_SIZE
;
589 up_read(&mm
->mmap_sem
);
593 static ssize_t
read_zero(struct file
* file
, char __user
* buf
,
594 size_t count
, loff_t
*ppos
)
596 unsigned long left
, unwritten
, written
= 0;
601 if (!access_ok(VERIFY_WRITE
, buf
, count
))
606 /* do we want to be clever? Arbitrary cut-off */
607 if (count
>= PAGE_SIZE
*4) {
608 unsigned long partial
;
610 /* How much left of the page? */
611 partial
= (PAGE_SIZE
-1) & -(unsigned long) buf
;
612 unwritten
= clear_user(buf
, partial
);
613 written
= partial
- unwritten
;
618 unwritten
= read_zero_pagealigned(buf
, left
& PAGE_MASK
);
619 written
+= (left
& PAGE_MASK
) - unwritten
;
622 buf
+= left
& PAGE_MASK
;
625 unwritten
= clear_user(buf
, left
);
626 written
+= left
- unwritten
;
628 return written
? written
: -EFAULT
;
631 static int mmap_zero(struct file
* file
, struct vm_area_struct
* vma
)
633 if (vma
->vm_flags
& VM_SHARED
)
634 return shmem_zero_setup(vma
);
635 if (zeromap_page_range(vma
, vma
->vm_start
, vma
->vm_end
- vma
->vm_start
, vma
->vm_page_prot
))
639 #else /* CONFIG_MMU */
640 static ssize_t
read_zero(struct file
* file
, char * buf
,
641 size_t count
, loff_t
*ppos
)
649 chunk
= 4096; /* Just for latency reasons */
650 if (clear_user(buf
, chunk
))
659 static int mmap_zero(struct file
* file
, struct vm_area_struct
* vma
)
663 #endif /* CONFIG_MMU */
665 static ssize_t
write_full(struct file
* file
, const char __user
* buf
,
666 size_t count
, loff_t
*ppos
)
672 * Special lseek() function for /dev/null and /dev/zero. Most notably, you
673 * can fopen() both devices with "a" now. This was previously impossible.
677 static loff_t
null_lseek(struct file
* file
, loff_t offset
, int orig
)
679 return file
->f_pos
= 0;
683 * The memory devices use the full 32/64 bits of the offset, and so we cannot
684 * check against negative addresses: they are ok. The return value is weird,
685 * though, in that case (0).
687 * also note that seeking relative to the "end of file" isn't supported:
688 * it has no meaning, so it returns -EINVAL.
690 static loff_t
memory_lseek(struct file
* file
, loff_t offset
, int orig
)
694 down(&file
->f_dentry
->d_inode
->i_sem
);
697 file
->f_pos
= offset
;
699 force_successful_syscall_return();
702 file
->f_pos
+= offset
;
704 force_successful_syscall_return();
709 up(&file
->f_dentry
->d_inode
->i_sem
);
713 static int open_port(struct inode
* inode
, struct file
* filp
)
715 return capable(CAP_SYS_RAWIO
) ? 0 : -EPERM
;
718 #define zero_lseek null_lseek
719 #define full_lseek null_lseek
720 #define write_zero write_null
721 #define read_full read_zero
722 #define open_mem open_port
723 #define open_kmem open_mem
725 static struct file_operations mem_fops
= {
726 .llseek
= memory_lseek
,
733 static struct file_operations kmem_fops
= {
734 .llseek
= memory_lseek
,
741 static struct file_operations null_fops
= {
742 .llseek
= null_lseek
,
747 #if defined(CONFIG_ISA) || !defined(__mc68000__)
748 static struct file_operations port_fops
= {
749 .llseek
= memory_lseek
,
756 static struct file_operations zero_fops
= {
757 .llseek
= zero_lseek
,
763 static struct backing_dev_info zero_bdi
= {
764 .capabilities
= BDI_CAP_MAP_COPY
,
767 static struct file_operations full_fops
= {
768 .llseek
= full_lseek
,
773 static ssize_t
kmsg_write(struct file
* file
, const char __user
* buf
,
774 size_t count
, loff_t
*ppos
)
779 tmp
= kmalloc(count
+ 1, GFP_KERNEL
);
783 if (!copy_from_user(tmp
, buf
, count
)) {
785 ret
= printk("%s", tmp
);
791 static struct file_operations kmsg_fops
= {
795 static int memory_open(struct inode
* inode
, struct file
* filp
)
797 switch (iminor(inode
)) {
799 filp
->f_op
= &mem_fops
;
802 filp
->f_op
= &kmem_fops
;
805 filp
->f_op
= &null_fops
;
807 #if defined(CONFIG_ISA) || !defined(__mc68000__)
809 filp
->f_op
= &port_fops
;
813 filp
->f_mapping
->backing_dev_info
= &zero_bdi
;
814 filp
->f_op
= &zero_fops
;
817 filp
->f_op
= &full_fops
;
820 filp
->f_op
= &random_fops
;
823 filp
->f_op
= &urandom_fops
;
826 filp
->f_op
= &kmsg_fops
;
831 if (filp
->f_op
&& filp
->f_op
->open
)
832 return filp
->f_op
->open(inode
,filp
);
836 static struct file_operations memory_fops
= {
837 .open
= memory_open
, /* just a selector for the real open */
840 static const struct {
844 struct file_operations
*fops
;
845 } devlist
[] = { /* list of minor devices */
846 {1, "mem", S_IRUSR
| S_IWUSR
| S_IRGRP
, &mem_fops
},
847 {2, "kmem", S_IRUSR
| S_IWUSR
| S_IRGRP
, &kmem_fops
},
848 {3, "null", S_IRUGO
| S_IWUGO
, &null_fops
},
849 #if defined(CONFIG_ISA) || !defined(__mc68000__)
850 {4, "port", S_IRUSR
| S_IWUSR
| S_IRGRP
, &port_fops
},
852 {5, "zero", S_IRUGO
| S_IWUGO
, &zero_fops
},
853 {7, "full", S_IRUGO
| S_IWUGO
, &full_fops
},
854 {8, "random", S_IRUGO
| S_IWUSR
, &random_fops
},
855 {9, "urandom", S_IRUGO
| S_IWUSR
, &urandom_fops
},
856 {11,"kmsg", S_IRUGO
| S_IWUSR
, &kmsg_fops
},
859 static struct class_simple
*mem_class
;
861 static int __init
chr_dev_init(void)
865 if (register_chrdev(MEM_MAJOR
,"mem",&memory_fops
))
866 printk("unable to get major %d for memory devs\n", MEM_MAJOR
);
868 mem_class
= class_simple_create(THIS_MODULE
, "mem");
869 for (i
= 0; i
< ARRAY_SIZE(devlist
); i
++) {
870 class_simple_device_add(mem_class
,
871 MKDEV(MEM_MAJOR
, devlist
[i
].minor
),
872 NULL
, devlist
[i
].name
);
873 devfs_mk_cdev(MKDEV(MEM_MAJOR
, devlist
[i
].minor
),
874 S_IFCHR
| devlist
[i
].mode
, devlist
[i
].name
);
880 fs_initcall(chr_dev_init
);