dm thin metadata: fix __udivdi3 undefined on 32-bit
[linux/fpc-iii.git] / drivers / char / mem.c
blob23f52a897283893796f2a2bdc666a9d333f0f262
1 /*
2 * linux/drivers/char/mem.c
4 * Copyright (C) 1991, 1992 Linus Torvalds
6 * Added devfs support.
7 * Jan-11-1998, C. Scott Ananian <cananian@alumni.princeton.edu>
8 * Shared /dev/zero mmapping support, Feb 2000, Kanoj Sarcar <kanoj@sgi.com>
9 */
11 #include <linux/mm.h>
12 #include <linux/miscdevice.h>
13 #include <linux/slab.h>
14 #include <linux/vmalloc.h>
15 #include <linux/mman.h>
16 #include <linux/random.h>
17 #include <linux/init.h>
18 #include <linux/raw.h>
19 #include <linux/tty.h>
20 #include <linux/capability.h>
21 #include <linux/ptrace.h>
22 #include <linux/device.h>
23 #include <linux/highmem.h>
24 #include <linux/backing-dev.h>
25 #include <linux/splice.h>
26 #include <linux/pfn.h>
27 #include <linux/export.h>
28 #include <linux/io.h>
29 #include <linux/uio.h>
31 #include <linux/uaccess.h>
33 #ifdef CONFIG_IA64
34 # include <linux/efi.h>
35 #endif
37 #define DEVPORT_MINOR 4
39 static inline unsigned long size_inside_page(unsigned long start,
40 unsigned long size)
42 unsigned long sz;
44 sz = PAGE_SIZE - (start & (PAGE_SIZE - 1));
46 return min(sz, size);
49 #ifndef ARCH_HAS_VALID_PHYS_ADDR_RANGE
50 static inline int valid_phys_addr_range(phys_addr_t addr, size_t count)
52 return addr + count <= __pa(high_memory);
55 static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t size)
57 return 1;
59 #endif
61 #ifdef CONFIG_STRICT_DEVMEM
62 static inline int page_is_allowed(unsigned long pfn)
64 return devmem_is_allowed(pfn);
66 static inline int range_is_allowed(unsigned long pfn, unsigned long size)
68 u64 from = ((u64)pfn) << PAGE_SHIFT;
69 u64 to = from + size;
70 u64 cursor = from;
72 while (cursor < to) {
73 if (!devmem_is_allowed(pfn))
74 return 0;
75 cursor += PAGE_SIZE;
76 pfn++;
78 return 1;
80 #else
81 static inline int page_is_allowed(unsigned long pfn)
83 return 1;
85 static inline int range_is_allowed(unsigned long pfn, unsigned long size)
87 return 1;
89 #endif
91 #ifndef unxlate_dev_mem_ptr
92 #define unxlate_dev_mem_ptr unxlate_dev_mem_ptr
93 void __weak unxlate_dev_mem_ptr(phys_addr_t phys, void *addr)
96 #endif
99 * This funcion reads the *physical* memory. The f_pos points directly to the
100 * memory location.
102 static ssize_t read_mem(struct file *file, char __user *buf,
103 size_t count, loff_t *ppos)
105 phys_addr_t p = *ppos;
106 ssize_t read, sz;
107 void *ptr;
109 if (p != *ppos)
110 return 0;
112 if (!valid_phys_addr_range(p, count))
113 return -EFAULT;
114 read = 0;
115 #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
116 /* we don't have page 0 mapped on sparc and m68k.. */
117 if (p < PAGE_SIZE) {
118 sz = size_inside_page(p, count);
119 if (sz > 0) {
120 if (clear_user(buf, sz))
121 return -EFAULT;
122 buf += sz;
123 p += sz;
124 count -= sz;
125 read += sz;
128 #endif
130 while (count > 0) {
131 unsigned long remaining;
132 int allowed;
134 sz = size_inside_page(p, count);
136 allowed = page_is_allowed(p >> PAGE_SHIFT);
137 if (!allowed)
138 return -EPERM;
139 if (allowed == 2) {
140 /* Show zeros for restricted memory. */
141 remaining = clear_user(buf, sz);
142 } else {
144 * On ia64 if a page has been mapped somewhere as
145 * uncached, then it must also be accessed uncached
146 * by the kernel or data corruption may occur.
148 ptr = xlate_dev_mem_ptr(p);
149 if (!ptr)
150 return -EFAULT;
152 remaining = copy_to_user(buf, ptr, sz);
154 unxlate_dev_mem_ptr(p, ptr);
157 if (remaining)
158 return -EFAULT;
160 buf += sz;
161 p += sz;
162 count -= sz;
163 read += sz;
166 *ppos += read;
167 return read;
170 static ssize_t write_mem(struct file *file, const char __user *buf,
171 size_t count, loff_t *ppos)
173 phys_addr_t p = *ppos;
174 ssize_t written, sz;
175 unsigned long copied;
176 void *ptr;
178 if (p != *ppos)
179 return -EFBIG;
181 if (!valid_phys_addr_range(p, count))
182 return -EFAULT;
184 written = 0;
186 #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
187 /* we don't have page 0 mapped on sparc and m68k.. */
188 if (p < PAGE_SIZE) {
189 sz = size_inside_page(p, count);
190 /* Hmm. Do something? */
191 buf += sz;
192 p += sz;
193 count -= sz;
194 written += sz;
196 #endif
198 while (count > 0) {
199 int allowed;
201 sz = size_inside_page(p, count);
203 allowed = page_is_allowed(p >> PAGE_SHIFT);
204 if (!allowed)
205 return -EPERM;
207 /* Skip actual writing when a page is marked as restricted. */
208 if (allowed == 1) {
210 * On ia64 if a page has been mapped somewhere as
211 * uncached, then it must also be accessed uncached
212 * by the kernel or data corruption may occur.
214 ptr = xlate_dev_mem_ptr(p);
215 if (!ptr) {
216 if (written)
217 break;
218 return -EFAULT;
221 copied = copy_from_user(ptr, buf, sz);
222 unxlate_dev_mem_ptr(p, ptr);
223 if (copied) {
224 written += sz - copied;
225 if (written)
226 break;
227 return -EFAULT;
231 buf += sz;
232 p += sz;
233 count -= sz;
234 written += sz;
237 *ppos += written;
238 return written;
241 int __weak phys_mem_access_prot_allowed(struct file *file,
242 unsigned long pfn, unsigned long size, pgprot_t *vma_prot)
244 return 1;
247 #ifndef __HAVE_PHYS_MEM_ACCESS_PROT
250 * Architectures vary in how they handle caching for addresses
251 * outside of main memory.
254 #ifdef pgprot_noncached
255 static int uncached_access(struct file *file, phys_addr_t addr)
257 #if defined(CONFIG_IA64)
259 * On ia64, we ignore O_DSYNC because we cannot tolerate memory
260 * attribute aliases.
262 return !(efi_mem_attributes(addr) & EFI_MEMORY_WB);
263 #elif defined(CONFIG_MIPS)
265 extern int __uncached_access(struct file *file,
266 unsigned long addr);
268 return __uncached_access(file, addr);
270 #else
272 * Accessing memory above the top the kernel knows about or through a
273 * file pointer
274 * that was marked O_DSYNC will be done non-cached.
276 if (file->f_flags & O_DSYNC)
277 return 1;
278 return addr >= __pa(high_memory);
279 #endif
281 #endif
283 static pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
284 unsigned long size, pgprot_t vma_prot)
286 #ifdef pgprot_noncached
287 phys_addr_t offset = pfn << PAGE_SHIFT;
289 if (uncached_access(file, offset))
290 return pgprot_noncached(vma_prot);
291 #endif
292 return vma_prot;
294 #endif
296 #ifndef CONFIG_MMU
297 static unsigned long get_unmapped_area_mem(struct file *file,
298 unsigned long addr,
299 unsigned long len,
300 unsigned long pgoff,
301 unsigned long flags)
303 if (!valid_mmap_phys_addr_range(pgoff, len))
304 return (unsigned long) -EINVAL;
305 return pgoff << PAGE_SHIFT;
308 /* permit direct mmap, for read, write or exec */
309 static unsigned memory_mmap_capabilities(struct file *file)
311 return NOMMU_MAP_DIRECT |
312 NOMMU_MAP_READ | NOMMU_MAP_WRITE | NOMMU_MAP_EXEC;
315 static unsigned zero_mmap_capabilities(struct file *file)
317 return NOMMU_MAP_COPY;
320 /* can't do an in-place private mapping if there's no MMU */
321 static inline int private_mapping_ok(struct vm_area_struct *vma)
323 return vma->vm_flags & VM_MAYSHARE;
325 #else
327 static inline int private_mapping_ok(struct vm_area_struct *vma)
329 return 1;
331 #endif
333 static const struct vm_operations_struct mmap_mem_ops = {
334 #ifdef CONFIG_HAVE_IOREMAP_PROT
335 .access = generic_access_phys
336 #endif
339 static int mmap_mem(struct file *file, struct vm_area_struct *vma)
341 size_t size = vma->vm_end - vma->vm_start;
342 phys_addr_t offset = (phys_addr_t)vma->vm_pgoff << PAGE_SHIFT;
344 /* It's illegal to wrap around the end of the physical address space. */
345 if (offset + (phys_addr_t)size - 1 < offset)
346 return -EINVAL;
348 if (!valid_mmap_phys_addr_range(vma->vm_pgoff, size))
349 return -EINVAL;
351 if (!private_mapping_ok(vma))
352 return -ENOSYS;
354 if (!range_is_allowed(vma->vm_pgoff, size))
355 return -EPERM;
357 if (!phys_mem_access_prot_allowed(file, vma->vm_pgoff, size,
358 &vma->vm_page_prot))
359 return -EINVAL;
361 vma->vm_page_prot = phys_mem_access_prot(file, vma->vm_pgoff,
362 size,
363 vma->vm_page_prot);
365 vma->vm_ops = &mmap_mem_ops;
367 /* Remap-pfn-range will mark the range VM_IO */
368 if (remap_pfn_range(vma,
369 vma->vm_start,
370 vma->vm_pgoff,
371 size,
372 vma->vm_page_prot)) {
373 return -EAGAIN;
375 return 0;
378 static int mmap_kmem(struct file *file, struct vm_area_struct *vma)
380 unsigned long pfn;
382 /* Turn a kernel-virtual address into a physical page frame */
383 pfn = __pa((u64)vma->vm_pgoff << PAGE_SHIFT) >> PAGE_SHIFT;
386 * RED-PEN: on some architectures there is more mapped memory than
387 * available in mem_map which pfn_valid checks for. Perhaps should add a
388 * new macro here.
390 * RED-PEN: vmalloc is not supported right now.
392 if (!pfn_valid(pfn))
393 return -EIO;
395 vma->vm_pgoff = pfn;
396 return mmap_mem(file, vma);
400 * This function reads the *virtual* memory as seen by the kernel.
402 static ssize_t read_kmem(struct file *file, char __user *buf,
403 size_t count, loff_t *ppos)
405 unsigned long p = *ppos;
406 ssize_t low_count, read, sz;
407 char *kbuf; /* k-addr because vread() takes vmlist_lock rwlock */
408 int err = 0;
410 read = 0;
411 if (p < (unsigned long) high_memory) {
412 low_count = count;
413 if (count > (unsigned long)high_memory - p)
414 low_count = (unsigned long)high_memory - p;
416 #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
417 /* we don't have page 0 mapped on sparc and m68k.. */
418 if (p < PAGE_SIZE && low_count > 0) {
419 sz = size_inside_page(p, low_count);
420 if (clear_user(buf, sz))
421 return -EFAULT;
422 buf += sz;
423 p += sz;
424 read += sz;
425 low_count -= sz;
426 count -= sz;
428 #endif
429 while (low_count > 0) {
430 sz = size_inside_page(p, low_count);
433 * On ia64 if a page has been mapped somewhere as
434 * uncached, then it must also be accessed uncached
435 * by the kernel or data corruption may occur
437 kbuf = xlate_dev_kmem_ptr((void *)p);
439 if (copy_to_user(buf, kbuf, sz))
440 return -EFAULT;
441 buf += sz;
442 p += sz;
443 read += sz;
444 low_count -= sz;
445 count -= sz;
449 if (count > 0) {
450 kbuf = (char *)__get_free_page(GFP_KERNEL);
451 if (!kbuf)
452 return -ENOMEM;
453 while (count > 0) {
454 sz = size_inside_page(p, count);
455 if (!is_vmalloc_or_module_addr((void *)p)) {
456 err = -ENXIO;
457 break;
459 sz = vread(kbuf, (char *)p, sz);
460 if (!sz)
461 break;
462 if (copy_to_user(buf, kbuf, sz)) {
463 err = -EFAULT;
464 break;
466 count -= sz;
467 buf += sz;
468 read += sz;
469 p += sz;
471 free_page((unsigned long)kbuf);
473 *ppos = p;
474 return read ? read : err;
478 static ssize_t do_write_kmem(unsigned long p, const char __user *buf,
479 size_t count, loff_t *ppos)
481 ssize_t written, sz;
482 unsigned long copied;
484 written = 0;
485 #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
486 /* we don't have page 0 mapped on sparc and m68k.. */
487 if (p < PAGE_SIZE) {
488 sz = size_inside_page(p, count);
489 /* Hmm. Do something? */
490 buf += sz;
491 p += sz;
492 count -= sz;
493 written += sz;
495 #endif
497 while (count > 0) {
498 void *ptr;
500 sz = size_inside_page(p, count);
503 * On ia64 if a page has been mapped somewhere as uncached, then
504 * it must also be accessed uncached by the kernel or data
505 * corruption may occur.
507 ptr = xlate_dev_kmem_ptr((void *)p);
509 copied = copy_from_user(ptr, buf, sz);
510 if (copied) {
511 written += sz - copied;
512 if (written)
513 break;
514 return -EFAULT;
516 buf += sz;
517 p += sz;
518 count -= sz;
519 written += sz;
522 *ppos += written;
523 return written;
527 * This function writes to the *virtual* memory as seen by the kernel.
529 static ssize_t write_kmem(struct file *file, const char __user *buf,
530 size_t count, loff_t *ppos)
532 unsigned long p = *ppos;
533 ssize_t wrote = 0;
534 ssize_t virtr = 0;
535 char *kbuf; /* k-addr because vwrite() takes vmlist_lock rwlock */
536 int err = 0;
538 if (p < (unsigned long) high_memory) {
539 unsigned long to_write = min_t(unsigned long, count,
540 (unsigned long)high_memory - p);
541 wrote = do_write_kmem(p, buf, to_write, ppos);
542 if (wrote != to_write)
543 return wrote;
544 p += wrote;
545 buf += wrote;
546 count -= wrote;
549 if (count > 0) {
550 kbuf = (char *)__get_free_page(GFP_KERNEL);
551 if (!kbuf)
552 return wrote ? wrote : -ENOMEM;
553 while (count > 0) {
554 unsigned long sz = size_inside_page(p, count);
555 unsigned long n;
557 if (!is_vmalloc_or_module_addr((void *)p)) {
558 err = -ENXIO;
559 break;
561 n = copy_from_user(kbuf, buf, sz);
562 if (n) {
563 err = -EFAULT;
564 break;
566 vwrite(kbuf, (char *)p, sz);
567 count -= sz;
568 buf += sz;
569 virtr += sz;
570 p += sz;
572 free_page((unsigned long)kbuf);
575 *ppos = p;
576 return virtr + wrote ? : err;
579 static ssize_t read_port(struct file *file, char __user *buf,
580 size_t count, loff_t *ppos)
582 unsigned long i = *ppos;
583 char __user *tmp = buf;
585 if (!access_ok(VERIFY_WRITE, buf, count))
586 return -EFAULT;
587 while (count-- > 0 && i < 65536) {
588 if (__put_user(inb(i), tmp) < 0)
589 return -EFAULT;
590 i++;
591 tmp++;
593 *ppos = i;
594 return tmp-buf;
597 static ssize_t write_port(struct file *file, const char __user *buf,
598 size_t count, loff_t *ppos)
600 unsigned long i = *ppos;
601 const char __user *tmp = buf;
603 if (!access_ok(VERIFY_READ, buf, count))
604 return -EFAULT;
605 while (count-- > 0 && i < 65536) {
606 char c;
608 if (__get_user(c, tmp)) {
609 if (tmp > buf)
610 break;
611 return -EFAULT;
613 outb(c, i);
614 i++;
615 tmp++;
617 *ppos = i;
618 return tmp-buf;
621 static ssize_t read_null(struct file *file, char __user *buf,
622 size_t count, loff_t *ppos)
624 return 0;
627 static ssize_t write_null(struct file *file, const char __user *buf,
628 size_t count, loff_t *ppos)
630 return count;
633 static ssize_t read_iter_null(struct kiocb *iocb, struct iov_iter *to)
635 return 0;
638 static ssize_t write_iter_null(struct kiocb *iocb, struct iov_iter *from)
640 size_t count = iov_iter_count(from);
641 iov_iter_advance(from, count);
642 return count;
645 static int pipe_to_null(struct pipe_inode_info *info, struct pipe_buffer *buf,
646 struct splice_desc *sd)
648 return sd->len;
651 static ssize_t splice_write_null(struct pipe_inode_info *pipe, struct file *out,
652 loff_t *ppos, size_t len, unsigned int flags)
654 return splice_from_pipe(pipe, out, ppos, len, flags, pipe_to_null);
657 static ssize_t read_iter_zero(struct kiocb *iocb, struct iov_iter *iter)
659 size_t written = 0;
661 while (iov_iter_count(iter)) {
662 size_t chunk = iov_iter_count(iter), n;
664 if (chunk > PAGE_SIZE)
665 chunk = PAGE_SIZE; /* Just for latency reasons */
666 n = iov_iter_zero(chunk, iter);
667 if (!n && iov_iter_count(iter))
668 return written ? written : -EFAULT;
669 written += n;
670 if (signal_pending(current))
671 return written ? written : -ERESTARTSYS;
672 cond_resched();
674 return written;
677 static int mmap_zero(struct file *file, struct vm_area_struct *vma)
679 #ifndef CONFIG_MMU
680 return -ENOSYS;
681 #endif
682 if (vma->vm_flags & VM_SHARED)
683 return shmem_zero_setup(vma);
684 return 0;
687 static ssize_t write_full(struct file *file, const char __user *buf,
688 size_t count, loff_t *ppos)
690 return -ENOSPC;
694 * Special lseek() function for /dev/null and /dev/zero. Most notably, you
695 * can fopen() both devices with "a" now. This was previously impossible.
696 * -- SRB.
698 static loff_t null_lseek(struct file *file, loff_t offset, int orig)
700 return file->f_pos = 0;
704 * The memory devices use the full 32/64 bits of the offset, and so we cannot
705 * check against negative addresses: they are ok. The return value is weird,
706 * though, in that case (0).
708 * also note that seeking relative to the "end of file" isn't supported:
709 * it has no meaning, so it returns -EINVAL.
711 static loff_t memory_lseek(struct file *file, loff_t offset, int orig)
713 loff_t ret;
715 mutex_lock(&file_inode(file)->i_mutex);
716 switch (orig) {
717 case SEEK_CUR:
718 offset += file->f_pos;
719 case SEEK_SET:
720 /* to avoid userland mistaking f_pos=-9 as -EBADF=-9 */
721 if (IS_ERR_VALUE((unsigned long long)offset)) {
722 ret = -EOVERFLOW;
723 break;
725 file->f_pos = offset;
726 ret = file->f_pos;
727 force_successful_syscall_return();
728 break;
729 default:
730 ret = -EINVAL;
732 mutex_unlock(&file_inode(file)->i_mutex);
733 return ret;
736 static int open_port(struct inode *inode, struct file *filp)
738 return capable(CAP_SYS_RAWIO) ? 0 : -EPERM;
741 #define zero_lseek null_lseek
742 #define full_lseek null_lseek
743 #define write_zero write_null
744 #define write_iter_zero write_iter_null
745 #define open_mem open_port
746 #define open_kmem open_mem
748 static const struct file_operations __maybe_unused mem_fops = {
749 .llseek = memory_lseek,
750 .read = read_mem,
751 .write = write_mem,
752 .mmap = mmap_mem,
753 .open = open_mem,
754 #ifndef CONFIG_MMU
755 .get_unmapped_area = get_unmapped_area_mem,
756 .mmap_capabilities = memory_mmap_capabilities,
757 #endif
760 static const struct file_operations __maybe_unused kmem_fops = {
761 .llseek = memory_lseek,
762 .read = read_kmem,
763 .write = write_kmem,
764 .mmap = mmap_kmem,
765 .open = open_kmem,
766 #ifndef CONFIG_MMU
767 .get_unmapped_area = get_unmapped_area_mem,
768 .mmap_capabilities = memory_mmap_capabilities,
769 #endif
772 static const struct file_operations null_fops = {
773 .llseek = null_lseek,
774 .read = read_null,
775 .write = write_null,
776 .read_iter = read_iter_null,
777 .write_iter = write_iter_null,
778 .splice_write = splice_write_null,
781 static const struct file_operations __maybe_unused port_fops = {
782 .llseek = memory_lseek,
783 .read = read_port,
784 .write = write_port,
785 .open = open_port,
788 static const struct file_operations zero_fops = {
789 .llseek = zero_lseek,
790 .write = write_zero,
791 .read_iter = read_iter_zero,
792 .write_iter = write_iter_zero,
793 .mmap = mmap_zero,
794 #ifndef CONFIG_MMU
795 .mmap_capabilities = zero_mmap_capabilities,
796 #endif
799 static const struct file_operations full_fops = {
800 .llseek = full_lseek,
801 .read_iter = read_iter_zero,
802 .write = write_full,
805 static const struct memdev {
806 const char *name;
807 umode_t mode;
808 const struct file_operations *fops;
809 fmode_t fmode;
810 } devlist[] = {
811 #ifdef CONFIG_DEVMEM
812 [1] = { "mem", 0, &mem_fops, FMODE_UNSIGNED_OFFSET },
813 #endif
814 #ifdef CONFIG_DEVKMEM
815 [2] = { "kmem", 0, &kmem_fops, FMODE_UNSIGNED_OFFSET },
816 #endif
817 [3] = { "null", 0666, &null_fops, 0 },
818 #ifdef CONFIG_DEVPORT
819 [4] = { "port", 0, &port_fops, 0 },
820 #endif
821 [5] = { "zero", 0666, &zero_fops, 0 },
822 [7] = { "full", 0666, &full_fops, 0 },
823 [8] = { "random", 0666, &random_fops, 0 },
824 [9] = { "urandom", 0666, &urandom_fops, 0 },
825 #ifdef CONFIG_PRINTK
826 [11] = { "kmsg", 0644, &kmsg_fops, 0 },
827 #endif
830 static int memory_open(struct inode *inode, struct file *filp)
832 int minor;
833 const struct memdev *dev;
835 minor = iminor(inode);
836 if (minor >= ARRAY_SIZE(devlist))
837 return -ENXIO;
839 dev = &devlist[minor];
840 if (!dev->fops)
841 return -ENXIO;
843 filp->f_op = dev->fops;
844 filp->f_mode |= dev->fmode;
846 if (dev->fops->open)
847 return dev->fops->open(inode, filp);
849 return 0;
852 static const struct file_operations memory_fops = {
853 .open = memory_open,
854 .llseek = noop_llseek,
857 static char *mem_devnode(struct device *dev, umode_t *mode)
859 if (mode && devlist[MINOR(dev->devt)].mode)
860 *mode = devlist[MINOR(dev->devt)].mode;
861 return NULL;
864 static struct class *mem_class;
866 static int __init chr_dev_init(void)
868 int minor;
870 if (register_chrdev(MEM_MAJOR, "mem", &memory_fops))
871 printk("unable to get major %d for memory devs\n", MEM_MAJOR);
873 mem_class = class_create(THIS_MODULE, "mem");
874 if (IS_ERR(mem_class))
875 return PTR_ERR(mem_class);
877 mem_class->devnode = mem_devnode;
878 for (minor = 1; minor < ARRAY_SIZE(devlist); minor++) {
879 if (!devlist[minor].name)
880 continue;
883 * Create /dev/port?
885 if ((minor == DEVPORT_MINOR) && !arch_has_dev_port())
886 continue;
888 device_create(mem_class, NULL, MKDEV(MEM_MAJOR, minor),
889 NULL, devlist[minor].name);
892 return tty_init();
895 fs_initcall(chr_dev_init);