2 * Copyright © 1999-2010 David Woodhouse <dwmw2@infradead.org>
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
20 #include <linux/device.h>
23 #include <linux/err.h>
24 #include <linux/init.h>
25 #include <linux/kernel.h>
26 #include <linux/module.h>
27 #include <linux/slab.h>
28 #include <linux/sched.h>
29 #include <linux/mutex.h>
30 #include <linux/backing-dev.h>
31 #include <linux/compat.h>
32 #include <linux/mount.h>
33 #include <linux/blkpg.h>
34 #include <linux/mtd/mtd.h>
35 #include <linux/mtd/partitions.h>
36 #include <linux/mtd/map.h>
38 #include <asm/uaccess.h>
40 #define MTD_INODE_FS_MAGIC 0x11307854
41 static DEFINE_MUTEX(mtd_mutex
);
42 static struct vfsmount
*mtd_inode_mnt __read_mostly
;
45 * Data structure to hold the pointer to the mtd device as well
46 * as mode information of various use cases.
48 struct mtd_file_info
{
51 enum mtd_file_modes mode
;
54 static loff_t
mtd_lseek (struct file
*file
, loff_t offset
, int orig
)
56 struct mtd_file_info
*mfi
= file
->private_data
;
57 struct mtd_info
*mtd
= mfi
->mtd
;
63 offset
+= file
->f_pos
;
72 if (offset
>= 0 && offset
<= mtd
->size
)
73 return file
->f_pos
= offset
;
80 static int mtd_open(struct inode
*inode
, struct file
*file
)
82 int minor
= iminor(inode
);
83 int devnum
= minor
>> 1;
86 struct mtd_file_info
*mfi
;
87 struct inode
*mtd_ino
;
89 pr_debug("MTD_open\n");
91 /* You can't open the RO devices RW */
92 if ((file
->f_mode
& FMODE_WRITE
) && (minor
& 1))
95 mutex_lock(&mtd_mutex
);
96 mtd
= get_mtd_device(NULL
, devnum
);
103 if (mtd
->type
== MTD_ABSENT
) {
109 mtd_ino
= iget_locked(mtd_inode_mnt
->mnt_sb
, devnum
);
115 if (mtd_ino
->i_state
& I_NEW
) {
116 mtd_ino
->i_private
= mtd
;
117 mtd_ino
->i_mode
= S_IFCHR
;
118 mtd_ino
->i_data
.backing_dev_info
= mtd
->backing_dev_info
;
119 unlock_new_inode(mtd_ino
);
121 file
->f_mapping
= mtd_ino
->i_mapping
;
123 /* You can't open it RW if it's not a writeable device */
124 if ((file
->f_mode
& FMODE_WRITE
) && !(mtd
->flags
& MTD_WRITEABLE
)) {
131 mfi
= kzalloc(sizeof(*mfi
), GFP_KERNEL
);
140 file
->private_data
= mfi
;
143 mutex_unlock(&mtd_mutex
);
147 /*====================================================================*/
149 static int mtd_close(struct inode
*inode
, struct file
*file
)
151 struct mtd_file_info
*mfi
= file
->private_data
;
152 struct mtd_info
*mtd
= mfi
->mtd
;
154 pr_debug("MTD_close\n");
156 /* Only sync if opened RW */
157 if ((file
->f_mode
& FMODE_WRITE
) && mtd
->sync
)
163 file
->private_data
= NULL
;
169 /* Back in June 2001, dwmw2 wrote:
171 * FIXME: This _really_ needs to die. In 2.5, we should lock the
172 * userspace buffer down and use it directly with readv/writev.
174 * The implementation below, using mtd_kmalloc_up_to, mitigates
175 * allocation failures when the system is under low-memory situations
176 * or if memory is highly fragmented at the cost of reducing the
177 * performance of the requested transfer due to a smaller buffer size.
179 * A more complex but more memory-efficient implementation based on
180 * get_user_pages and iovecs to cover extents of those pages is a
181 * longer-term goal, as intimated by dwmw2 above. However, for the
182 * write case, this requires yet more complex head and tail transfer
183 * handling when those head and tail offsets and sizes are such that
184 * alignment requirements are not met in the NAND subdriver.
187 static ssize_t
mtd_read(struct file
*file
, char __user
*buf
, size_t count
,loff_t
*ppos
)
189 struct mtd_file_info
*mfi
= file
->private_data
;
190 struct mtd_info
*mtd
= mfi
->mtd
;
192 size_t total_retlen
=0;
198 pr_debug("MTD_read\n");
200 if (*ppos
+ count
> mtd
->size
)
201 count
= mtd
->size
- *ppos
;
206 kbuf
= mtd_kmalloc_up_to(mtd
, &size
);
211 len
= min_t(size_t, count
, size
);
214 case MTD_MODE_OTP_FACTORY
:
215 ret
= mtd
->read_fact_prot_reg(mtd
, *ppos
, len
, &retlen
, kbuf
);
217 case MTD_MODE_OTP_USER
:
218 ret
= mtd
->read_user_prot_reg(mtd
, *ppos
, len
, &retlen
, kbuf
);
222 struct mtd_oob_ops ops
;
224 ops
.mode
= MTD_OOB_RAW
;
229 ret
= mtd
->read_oob(mtd
, *ppos
, &ops
);
234 ret
= mtd
->read(mtd
, *ppos
, len
, &retlen
, kbuf
);
236 /* Nand returns -EBADMSG on ECC errors, but it returns
237 * the data. For our userspace tools it is important
238 * to dump areas with ECC errors!
239 * For kernel internal usage it also might return -EUCLEAN
240 * to signal the caller that a bitflip has occurred and has
241 * been corrected by the ECC algorithm.
242 * Userspace software which accesses NAND this way
243 * must be aware of the fact that it deals with NAND
245 if (!ret
|| (ret
== -EUCLEAN
) || (ret
== -EBADMSG
)) {
247 if (copy_to_user(buf
, kbuf
, retlen
)) {
252 total_retlen
+= retlen
;
270 static ssize_t
mtd_write(struct file
*file
, const char __user
*buf
, size_t count
,loff_t
*ppos
)
272 struct mtd_file_info
*mfi
= file
->private_data
;
273 struct mtd_info
*mtd
= mfi
->mtd
;
277 size_t total_retlen
=0;
281 pr_debug("MTD_write\n");
283 if (*ppos
== mtd
->size
)
286 if (*ppos
+ count
> mtd
->size
)
287 count
= mtd
->size
- *ppos
;
292 kbuf
= mtd_kmalloc_up_to(mtd
, &size
);
297 len
= min_t(size_t, count
, size
);
299 if (copy_from_user(kbuf
, buf
, len
)) {
305 case MTD_MODE_OTP_FACTORY
:
308 case MTD_MODE_OTP_USER
:
309 if (!mtd
->write_user_prot_reg
) {
313 ret
= mtd
->write_user_prot_reg(mtd
, *ppos
, len
, &retlen
, kbuf
);
318 struct mtd_oob_ops ops
;
320 ops
.mode
= MTD_OOB_RAW
;
326 ret
= mtd
->write_oob(mtd
, *ppos
, &ops
);
332 ret
= (*(mtd
->write
))(mtd
, *ppos
, len
, &retlen
, kbuf
);
336 total_retlen
+= retlen
;
350 /*======================================================================
352 IOCTL calls for getting device parameters.
354 ======================================================================*/
355 static void mtdchar_erase_callback (struct erase_info
*instr
)
357 wake_up((wait_queue_head_t
*)instr
->priv
);
360 #ifdef CONFIG_HAVE_MTD_OTP
361 static int otp_select_filemode(struct mtd_file_info
*mfi
, int mode
)
363 struct mtd_info
*mtd
= mfi
->mtd
;
367 case MTD_OTP_FACTORY
:
368 if (!mtd
->read_fact_prot_reg
)
371 mfi
->mode
= MTD_MODE_OTP_FACTORY
;
374 if (!mtd
->read_fact_prot_reg
)
377 mfi
->mode
= MTD_MODE_OTP_USER
;
387 # define otp_select_filemode(f,m) -EOPNOTSUPP
390 static int mtd_do_writeoob(struct file
*file
, struct mtd_info
*mtd
,
391 uint64_t start
, uint32_t length
, void __user
*ptr
,
392 uint32_t __user
*retp
)
394 struct mtd_oob_ops ops
;
398 if (!(file
->f_mode
& FMODE_WRITE
))
407 ret
= access_ok(VERIFY_READ
, ptr
, length
) ? 0 : -EFAULT
;
413 ops
.ooboffs
= start
& (mtd
->writesize
- 1);
415 ops
.mode
= MTD_OOB_PLACE
;
417 if (ops
.ooboffs
&& ops
.ooblen
> (mtd
->oobsize
- ops
.ooboffs
))
420 ops
.oobbuf
= memdup_user(ptr
, length
);
421 if (IS_ERR(ops
.oobbuf
))
422 return PTR_ERR(ops
.oobbuf
);
424 start
&= ~((uint64_t)mtd
->writesize
- 1);
425 ret
= mtd
->write_oob(mtd
, start
, &ops
);
427 if (ops
.oobretlen
> 0xFFFFFFFFU
)
429 retlen
= ops
.oobretlen
;
430 if (copy_to_user(retp
, &retlen
, sizeof(length
)))
437 static int mtd_do_readoob(struct mtd_info
*mtd
, uint64_t start
,
438 uint32_t length
, void __user
*ptr
, uint32_t __user
*retp
)
440 struct mtd_oob_ops ops
;
449 ret
= access_ok(VERIFY_WRITE
, ptr
,
450 length
) ? 0 : -EFAULT
;
455 ops
.ooboffs
= start
& (mtd
->writesize
- 1);
457 ops
.mode
= MTD_OOB_PLACE
;
459 if (ops
.ooboffs
&& ops
.ooblen
> (mtd
->oobsize
- ops
.ooboffs
))
462 ops
.oobbuf
= kmalloc(length
, GFP_KERNEL
);
466 start
&= ~((uint64_t)mtd
->writesize
- 1);
467 ret
= mtd
->read_oob(mtd
, start
, &ops
);
469 if (put_user(ops
.oobretlen
, retp
))
471 else if (ops
.oobretlen
&& copy_to_user(ptr
, ops
.oobbuf
,
478 * NAND returns -EBADMSG on ECC errors, but it returns the OOB
479 * data. For our userspace tools it is important to dump areas
481 * For kernel internal usage it also might return -EUCLEAN
482 * to signal the caller that a bitflip has occured and has
483 * been corrected by the ECC algorithm.
485 * Note: currently the standard NAND function, nand_read_oob_std,
486 * does not calculate ECC for the OOB area, so do not rely on
487 * this behavior unless you have replaced it with your own.
489 if (ret
== -EUCLEAN
|| ret
== -EBADMSG
)
496 * Copies (and truncates, if necessary) data from the larger struct,
497 * nand_ecclayout, to the smaller, deprecated layout struct,
498 * nand_ecclayout_user. This is necessary only to support the deprecated
499 * API ioctl ECCGETLAYOUT while allowing all new functionality to use
500 * nand_ecclayout flexibly (i.e. the struct may change size in new
501 * releases without requiring major rewrites).
503 static int shrink_ecclayout(const struct nand_ecclayout
*from
,
504 struct nand_ecclayout_user
*to
)
511 memset(to
, 0, sizeof(*to
));
513 to
->eccbytes
= min((int)from
->eccbytes
, MTD_MAX_ECCPOS_ENTRIES
);
514 for (i
= 0; i
< to
->eccbytes
; i
++)
515 to
->eccpos
[i
] = from
->eccpos
[i
];
517 for (i
= 0; i
< MTD_MAX_OOBFREE_ENTRIES
; i
++) {
518 if (from
->oobfree
[i
].length
== 0 &&
519 from
->oobfree
[i
].offset
== 0)
521 to
->oobavail
+= from
->oobfree
[i
].length
;
522 to
->oobfree
[i
] = from
->oobfree
[i
];
528 static int mtd_blkpg_ioctl(struct mtd_info
*mtd
,
529 struct blkpg_ioctl_arg __user
*arg
)
531 struct blkpg_ioctl_arg a
;
532 struct blkpg_partition p
;
534 if (!capable(CAP_SYS_ADMIN
))
537 if (copy_from_user(&a
, arg
, sizeof(struct blkpg_ioctl_arg
)))
540 if (copy_from_user(&p
, a
.data
, sizeof(struct blkpg_partition
)))
544 case BLKPG_ADD_PARTITION
:
546 /* Only master mtd device must be used to add partitions */
547 if (mtd_is_partition(mtd
))
550 return mtd_add_partition(mtd
, p
.devname
, p
.start
, p
.length
);
552 case BLKPG_DEL_PARTITION
:
557 return mtd_del_partition(mtd
, p
.pno
);
564 static int mtd_ioctl(struct file
*file
, u_int cmd
, u_long arg
)
566 struct mtd_file_info
*mfi
= file
->private_data
;
567 struct mtd_info
*mtd
= mfi
->mtd
;
568 void __user
*argp
= (void __user
*)arg
;
571 struct mtd_info_user info
;
573 pr_debug("MTD_ioctl\n");
575 size
= (cmd
& IOCSIZE_MASK
) >> IOCSIZE_SHIFT
;
577 if (!access_ok(VERIFY_READ
, argp
, size
))
581 if (!access_ok(VERIFY_WRITE
, argp
, size
))
586 case MEMGETREGIONCOUNT
:
587 if (copy_to_user(argp
, &(mtd
->numeraseregions
), sizeof(int)))
591 case MEMGETREGIONINFO
:
594 struct mtd_erase_region_info
*kr
;
595 struct region_info_user __user
*ur
= argp
;
597 if (get_user(ur_idx
, &(ur
->regionindex
)))
600 if (ur_idx
>= mtd
->numeraseregions
)
603 kr
= &(mtd
->eraseregions
[ur_idx
]);
605 if (put_user(kr
->offset
, &(ur
->offset
))
606 || put_user(kr
->erasesize
, &(ur
->erasesize
))
607 || put_user(kr
->numblocks
, &(ur
->numblocks
)))
614 memset(&info
, 0, sizeof(info
));
615 info
.type
= mtd
->type
;
616 info
.flags
= mtd
->flags
;
617 info
.size
= mtd
->size
;
618 info
.erasesize
= mtd
->erasesize
;
619 info
.writesize
= mtd
->writesize
;
620 info
.oobsize
= mtd
->oobsize
;
621 /* The below fields are obsolete */
623 if (copy_to_user(argp
, &info
, sizeof(struct mtd_info_user
)))
630 struct erase_info
*erase
;
632 if(!(file
->f_mode
& FMODE_WRITE
))
635 erase
=kzalloc(sizeof(struct erase_info
),GFP_KERNEL
);
639 wait_queue_head_t waitq
;
640 DECLARE_WAITQUEUE(wait
, current
);
642 init_waitqueue_head(&waitq
);
644 if (cmd
== MEMERASE64
) {
645 struct erase_info_user64 einfo64
;
647 if (copy_from_user(&einfo64
, argp
,
648 sizeof(struct erase_info_user64
))) {
652 erase
->addr
= einfo64
.start
;
653 erase
->len
= einfo64
.length
;
655 struct erase_info_user einfo32
;
657 if (copy_from_user(&einfo32
, argp
,
658 sizeof(struct erase_info_user
))) {
662 erase
->addr
= einfo32
.start
;
663 erase
->len
= einfo32
.length
;
666 erase
->callback
= mtdchar_erase_callback
;
667 erase
->priv
= (unsigned long)&waitq
;
670 FIXME: Allow INTERRUPTIBLE. Which means
671 not having the wait_queue head on the stack.
673 If the wq_head is on the stack, and we
674 leave because we got interrupted, then the
675 wq_head is no longer there when the
676 callback routine tries to wake us up.
678 ret
= mtd
->erase(mtd
, erase
);
680 set_current_state(TASK_UNINTERRUPTIBLE
);
681 add_wait_queue(&waitq
, &wait
);
682 if (erase
->state
!= MTD_ERASE_DONE
&&
683 erase
->state
!= MTD_ERASE_FAILED
)
685 remove_wait_queue(&waitq
, &wait
);
686 set_current_state(TASK_RUNNING
);
688 ret
= (erase
->state
== MTD_ERASE_FAILED
)?-EIO
:0;
697 struct mtd_oob_buf buf
;
698 struct mtd_oob_buf __user
*buf_user
= argp
;
700 /* NOTE: writes return length to buf_user->length */
701 if (copy_from_user(&buf
, argp
, sizeof(buf
)))
704 ret
= mtd_do_writeoob(file
, mtd
, buf
.start
, buf
.length
,
705 buf
.ptr
, &buf_user
->length
);
711 struct mtd_oob_buf buf
;
712 struct mtd_oob_buf __user
*buf_user
= argp
;
714 /* NOTE: writes return length to buf_user->start */
715 if (copy_from_user(&buf
, argp
, sizeof(buf
)))
718 ret
= mtd_do_readoob(mtd
, buf
.start
, buf
.length
,
719 buf
.ptr
, &buf_user
->start
);
725 struct mtd_oob_buf64 buf
;
726 struct mtd_oob_buf64 __user
*buf_user
= argp
;
728 if (copy_from_user(&buf
, argp
, sizeof(buf
)))
731 ret
= mtd_do_writeoob(file
, mtd
, buf
.start
, buf
.length
,
732 (void __user
*)(uintptr_t)buf
.usr_ptr
,
739 struct mtd_oob_buf64 buf
;
740 struct mtd_oob_buf64 __user
*buf_user
= argp
;
742 if (copy_from_user(&buf
, argp
, sizeof(buf
)))
745 ret
= mtd_do_readoob(mtd
, buf
.start
, buf
.length
,
746 (void __user
*)(uintptr_t)buf
.usr_ptr
,
753 struct erase_info_user einfo
;
755 if (copy_from_user(&einfo
, argp
, sizeof(einfo
)))
761 ret
= mtd
->lock(mtd
, einfo
.start
, einfo
.length
);
767 struct erase_info_user einfo
;
769 if (copy_from_user(&einfo
, argp
, sizeof(einfo
)))
775 ret
= mtd
->unlock(mtd
, einfo
.start
, einfo
.length
);
781 struct erase_info_user einfo
;
783 if (copy_from_user(&einfo
, argp
, sizeof(einfo
)))
789 ret
= mtd
->is_locked(mtd
, einfo
.start
, einfo
.length
);
793 /* Legacy interface */
796 struct nand_oobinfo oi
;
800 if (mtd
->ecclayout
->eccbytes
> ARRAY_SIZE(oi
.eccpos
))
803 oi
.useecc
= MTD_NANDECC_AUTOPLACE
;
804 memcpy(&oi
.eccpos
, mtd
->ecclayout
->eccpos
, sizeof(oi
.eccpos
));
805 memcpy(&oi
.oobfree
, mtd
->ecclayout
->oobfree
,
807 oi
.eccbytes
= mtd
->ecclayout
->eccbytes
;
809 if (copy_to_user(argp
, &oi
, sizeof(struct nand_oobinfo
)))
818 if (copy_from_user(&offs
, argp
, sizeof(loff_t
)))
820 if (!mtd
->block_isbad
)
823 return mtd
->block_isbad(mtd
, offs
);
831 if (copy_from_user(&offs
, argp
, sizeof(loff_t
)))
833 if (!mtd
->block_markbad
)
836 return mtd
->block_markbad(mtd
, offs
);
840 #ifdef CONFIG_HAVE_MTD_OTP
844 if (copy_from_user(&mode
, argp
, sizeof(int)))
847 mfi
->mode
= MTD_MODE_NORMAL
;
849 ret
= otp_select_filemode(mfi
, mode
);
855 case OTPGETREGIONCOUNT
:
856 case OTPGETREGIONINFO
:
858 struct otp_info
*buf
= kmalloc(4096, GFP_KERNEL
);
863 case MTD_MODE_OTP_FACTORY
:
864 if (mtd
->get_fact_prot_info
)
865 ret
= mtd
->get_fact_prot_info(mtd
, buf
, 4096);
867 case MTD_MODE_OTP_USER
:
868 if (mtd
->get_user_prot_info
)
869 ret
= mtd
->get_user_prot_info(mtd
, buf
, 4096);
875 if (cmd
== OTPGETREGIONCOUNT
) {
876 int nbr
= ret
/ sizeof(struct otp_info
);
877 ret
= copy_to_user(argp
, &nbr
, sizeof(int));
879 ret
= copy_to_user(argp
, buf
, ret
);
889 struct otp_info oinfo
;
891 if (mfi
->mode
!= MTD_MODE_OTP_USER
)
893 if (copy_from_user(&oinfo
, argp
, sizeof(oinfo
)))
895 if (!mtd
->lock_user_prot_reg
)
897 ret
= mtd
->lock_user_prot_reg(mtd
, oinfo
.start
, oinfo
.length
);
902 /* This ioctl is being deprecated - it truncates the ECC layout */
905 struct nand_ecclayout_user
*usrlay
;
910 usrlay
= kmalloc(sizeof(*usrlay
), GFP_KERNEL
);
914 shrink_ecclayout(mtd
->ecclayout
, usrlay
);
916 if (copy_to_user(argp
, usrlay
, sizeof(*usrlay
)))
924 if (copy_to_user(argp
, &mtd
->ecc_stats
,
925 sizeof(struct mtd_ecc_stats
)))
935 case MTD_MODE_OTP_FACTORY
:
936 case MTD_MODE_OTP_USER
:
937 ret
= otp_select_filemode(mfi
, arg
);
941 if (!mtd
->read_oob
|| !mtd
->write_oob
)
945 case MTD_MODE_NORMAL
:
956 ret
= mtd_blkpg_ioctl(mtd
,
957 (struct blkpg_ioctl_arg __user
*)arg
);
963 /* No reread partition feature. Just return ok */
975 static long mtd_unlocked_ioctl(struct file
*file
, u_int cmd
, u_long arg
)
979 mutex_lock(&mtd_mutex
);
980 ret
= mtd_ioctl(file
, cmd
, arg
);
981 mutex_unlock(&mtd_mutex
);
988 struct mtd_oob_buf32
{
991 compat_caddr_t ptr
; /* unsigned char* */
994 #define MEMWRITEOOB32 _IOWR('M', 3, struct mtd_oob_buf32)
995 #define MEMREADOOB32 _IOWR('M', 4, struct mtd_oob_buf32)
997 static long mtd_compat_ioctl(struct file
*file
, unsigned int cmd
,
1000 struct mtd_file_info
*mfi
= file
->private_data
;
1001 struct mtd_info
*mtd
= mfi
->mtd
;
1002 void __user
*argp
= compat_ptr(arg
);
1005 mutex_lock(&mtd_mutex
);
1010 struct mtd_oob_buf32 buf
;
1011 struct mtd_oob_buf32 __user
*buf_user
= argp
;
1013 if (copy_from_user(&buf
, argp
, sizeof(buf
)))
1016 ret
= mtd_do_writeoob(file
, mtd
, buf
.start
,
1017 buf
.length
, compat_ptr(buf
.ptr
),
1024 struct mtd_oob_buf32 buf
;
1025 struct mtd_oob_buf32 __user
*buf_user
= argp
;
1027 /* NOTE: writes return length to buf->start */
1028 if (copy_from_user(&buf
, argp
, sizeof(buf
)))
1031 ret
= mtd_do_readoob(mtd
, buf
.start
,
1032 buf
.length
, compat_ptr(buf
.ptr
),
1037 ret
= mtd_ioctl(file
, cmd
, (unsigned long)argp
);
1040 mutex_unlock(&mtd_mutex
);
1045 #endif /* CONFIG_COMPAT */
1048 * try to determine where a shared mapping can be made
1049 * - only supported for NOMMU at the moment (MMU can't doesn't copy private
1053 static unsigned long mtd_get_unmapped_area(struct file
*file
,
1056 unsigned long pgoff
,
1057 unsigned long flags
)
1059 struct mtd_file_info
*mfi
= file
->private_data
;
1060 struct mtd_info
*mtd
= mfi
->mtd
;
1062 if (mtd
->get_unmapped_area
) {
1063 unsigned long offset
;
1066 return (unsigned long) -EINVAL
;
1068 if (len
> mtd
->size
|| pgoff
>= (mtd
->size
>> PAGE_SHIFT
))
1069 return (unsigned long) -EINVAL
;
1071 offset
= pgoff
<< PAGE_SHIFT
;
1072 if (offset
> mtd
->size
- len
)
1073 return (unsigned long) -EINVAL
;
1075 return mtd
->get_unmapped_area(mtd
, len
, offset
, flags
);
1078 /* can't map directly */
1079 return (unsigned long) -ENOSYS
;
1084 * set up a mapping for shared memory segments
1086 static int mtd_mmap(struct file
*file
, struct vm_area_struct
*vma
)
1089 struct mtd_file_info
*mfi
= file
->private_data
;
1090 struct mtd_info
*mtd
= mfi
->mtd
;
1091 struct map_info
*map
= mtd
->priv
;
1092 unsigned long start
;
1096 if (mtd
->type
== MTD_RAM
|| mtd
->type
== MTD_ROM
) {
1097 off
= vma
->vm_pgoff
<< PAGE_SHIFT
;
1099 len
= PAGE_ALIGN((start
& ~PAGE_MASK
) + map
->size
);
1101 if ((vma
->vm_end
- vma
->vm_start
+ off
) > len
)
1105 vma
->vm_pgoff
= off
>> PAGE_SHIFT
;
1106 vma
->vm_flags
|= VM_IO
| VM_RESERVED
;
1108 #ifdef pgprot_noncached
1109 if (file
->f_flags
& O_DSYNC
|| off
>= __pa(high_memory
))
1110 vma
->vm_page_prot
= pgprot_noncached(vma
->vm_page_prot
);
1112 if (io_remap_pfn_range(vma
, vma
->vm_start
, off
>> PAGE_SHIFT
,
1113 vma
->vm_end
- vma
->vm_start
,
1121 return vma
->vm_flags
& VM_SHARED
? 0 : -ENOSYS
;
1125 static const struct file_operations mtd_fops
= {
1126 .owner
= THIS_MODULE
,
1127 .llseek
= mtd_lseek
,
1130 .unlocked_ioctl
= mtd_unlocked_ioctl
,
1131 #ifdef CONFIG_COMPAT
1132 .compat_ioctl
= mtd_compat_ioctl
,
1135 .release
= mtd_close
,
1138 .get_unmapped_area
= mtd_get_unmapped_area
,
1142 static struct dentry
*mtd_inodefs_mount(struct file_system_type
*fs_type
,
1143 int flags
, const char *dev_name
, void *data
)
1145 return mount_pseudo(fs_type
, "mtd_inode:", NULL
, NULL
, MTD_INODE_FS_MAGIC
);
1148 static struct file_system_type mtd_inodefs_type
= {
1149 .name
= "mtd_inodefs",
1150 .mount
= mtd_inodefs_mount
,
1151 .kill_sb
= kill_anon_super
,
1154 static void mtdchar_notify_add(struct mtd_info
*mtd
)
1158 static void mtdchar_notify_remove(struct mtd_info
*mtd
)
1160 struct inode
*mtd_ino
= ilookup(mtd_inode_mnt
->mnt_sb
, mtd
->index
);
1163 /* Destroy the inode if it exists */
1164 mtd_ino
->i_nlink
= 0;
1169 static struct mtd_notifier mtdchar_notifier
= {
1170 .add
= mtdchar_notify_add
,
1171 .remove
= mtdchar_notify_remove
,
1174 static int __init
init_mtdchar(void)
1178 ret
= __register_chrdev(MTD_CHAR_MAJOR
, 0, 1 << MINORBITS
,
1181 pr_notice("Can't allocate major number %d for "
1182 "Memory Technology Devices.\n", MTD_CHAR_MAJOR
);
1186 ret
= register_filesystem(&mtd_inodefs_type
);
1188 pr_notice("Can't register mtd_inodefs filesystem: %d\n", ret
);
1189 goto err_unregister_chdev
;
1192 mtd_inode_mnt
= kern_mount(&mtd_inodefs_type
);
1193 if (IS_ERR(mtd_inode_mnt
)) {
1194 ret
= PTR_ERR(mtd_inode_mnt
);
1195 pr_notice("Error mounting mtd_inodefs filesystem: %d\n", ret
);
1196 goto err_unregister_filesystem
;
1198 register_mtd_user(&mtdchar_notifier
);
1202 err_unregister_filesystem
:
1203 unregister_filesystem(&mtd_inodefs_type
);
1204 err_unregister_chdev
:
1205 __unregister_chrdev(MTD_CHAR_MAJOR
, 0, 1 << MINORBITS
, "mtd");
1209 static void __exit
cleanup_mtdchar(void)
1211 unregister_mtd_user(&mtdchar_notifier
);
1212 kern_unmount(mtd_inode_mnt
);
1213 unregister_filesystem(&mtd_inodefs_type
);
1214 __unregister_chrdev(MTD_CHAR_MAJOR
, 0, 1 << MINORBITS
, "mtd");
1217 module_init(init_mtdchar
);
1218 module_exit(cleanup_mtdchar
);
1220 MODULE_ALIAS_CHARDEV_MAJOR(MTD_CHAR_MAJOR
);
1222 MODULE_LICENSE("GPL");
1223 MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>");
1224 MODULE_DESCRIPTION("Direct character-device access to MTD devices");
1225 MODULE_ALIAS_CHARDEV_MAJOR(MTD_CHAR_MAJOR
);