2 * Copyright (c) International Business Machines Corp., 2006
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
12 * the GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 * Author: Artem Bityutskiy (Битюцкий Артём)
22 * This file includes implementation of UBI character device operations.
24 * There are two kinds of character devices in UBI: UBI character devices and
25 * UBI volume character devices. UBI character devices allow users to
26 * manipulate whole volumes: create, remove, and re-size them. Volume character
27 * devices provide volume I/O capabilities.
29 * Major and minor numbers are assigned dynamically to both UBI and volume
32 * Well, there is the third kind of character devices - the UBI control
33 * character device, which allows to manipulate by UBI devices - create and
34 * delete them. In other words, it is used for attaching and detaching MTD
38 #include <linux/module.h>
39 #include <linux/stat.h>
40 #include <linux/slab.h>
41 #include <linux/ioctl.h>
42 #include <linux/capability.h>
43 #include <linux/uaccess.h>
44 #include <linux/compat.h>
45 #include <linux/math64.h>
46 #include <mtd/ubi-user.h>
50 * get_exclusive - get exclusive access to an UBI volume.
51 * @desc: volume descriptor
53 * This function changes UBI volume open mode to "exclusive". Returns previous
54 * mode value (positive integer) in case of success and a negative error code
57 static int get_exclusive(struct ubi_volume_desc
*desc
)
60 struct ubi_volume
*vol
= desc
->vol
;
62 spin_lock(&vol
->ubi
->volumes_lock
);
63 users
= vol
->readers
+ vol
->writers
+ vol
->exclusive
+ vol
->metaonly
;
64 ubi_assert(users
> 0);
66 ubi_err(vol
->ubi
, "%d users for volume %d", users
, vol
->vol_id
);
69 vol
->readers
= vol
->writers
= vol
->metaonly
= 0;
72 desc
->mode
= UBI_EXCLUSIVE
;
74 spin_unlock(&vol
->ubi
->volumes_lock
);
80 * revoke_exclusive - revoke exclusive mode.
81 * @desc: volume descriptor
82 * @mode: new mode to switch to
84 static void revoke_exclusive(struct ubi_volume_desc
*desc
, int mode
)
86 struct ubi_volume
*vol
= desc
->vol
;
88 spin_lock(&vol
->ubi
->volumes_lock
);
89 ubi_assert(vol
->readers
== 0 && vol
->writers
== 0 && vol
->metaonly
== 0);
90 ubi_assert(vol
->exclusive
== 1 && desc
->mode
== UBI_EXCLUSIVE
);
92 if (mode
== UBI_READONLY
)
94 else if (mode
== UBI_READWRITE
)
96 else if (mode
== UBI_METAONLY
)
100 spin_unlock(&vol
->ubi
->volumes_lock
);
105 static int vol_cdev_open(struct inode
*inode
, struct file
*file
)
107 struct ubi_volume_desc
*desc
;
108 int vol_id
= iminor(inode
) - 1, mode
, ubi_num
;
110 ubi_num
= ubi_major2num(imajor(inode
));
114 if (file
->f_mode
& FMODE_WRITE
)
115 mode
= UBI_READWRITE
;
119 dbg_gen("open device %d, volume %d, mode %d",
120 ubi_num
, vol_id
, mode
);
122 desc
= ubi_open_volume(ubi_num
, vol_id
, mode
);
124 return PTR_ERR(desc
);
126 file
->private_data
= desc
;
130 static int vol_cdev_release(struct inode
*inode
, struct file
*file
)
132 struct ubi_volume_desc
*desc
= file
->private_data
;
133 struct ubi_volume
*vol
= desc
->vol
;
135 dbg_gen("release device %d, volume %d, mode %d",
136 vol
->ubi
->ubi_num
, vol
->vol_id
, desc
->mode
);
139 ubi_warn(vol
->ubi
, "update of volume %d not finished, volume is damaged",
141 ubi_assert(!vol
->changing_leb
);
144 } else if (vol
->changing_leb
) {
145 dbg_gen("only %lld of %lld bytes received for atomic LEB change for volume %d:%d, cancel",
146 vol
->upd_received
, vol
->upd_bytes
, vol
->ubi
->ubi_num
,
148 vol
->changing_leb
= 0;
152 ubi_close_volume(desc
);
156 static loff_t
vol_cdev_llseek(struct file
*file
, loff_t offset
, int origin
)
158 struct ubi_volume_desc
*desc
= file
->private_data
;
159 struct ubi_volume
*vol
= desc
->vol
;
162 /* Update is in progress, seeking is prohibited */
163 ubi_err(vol
->ubi
, "updating");
167 return fixed_size_llseek(file
, offset
, origin
, vol
->used_bytes
);
170 static int vol_cdev_fsync(struct file
*file
, loff_t start
, loff_t end
,
173 struct ubi_volume_desc
*desc
= file
->private_data
;
174 struct ubi_device
*ubi
= desc
->vol
->ubi
;
175 struct inode
*inode
= file_inode(file
);
178 err
= ubi_sync(ubi
->ubi_num
);
184 static ssize_t
vol_cdev_read(struct file
*file
, __user
char *buf
, size_t count
,
187 struct ubi_volume_desc
*desc
= file
->private_data
;
188 struct ubi_volume
*vol
= desc
->vol
;
189 struct ubi_device
*ubi
= vol
->ubi
;
190 int err
, lnum
, off
, len
, tbuf_size
;
191 size_t count_save
= count
;
194 dbg_gen("read %zd bytes from offset %lld of volume %d",
195 count
, *offp
, vol
->vol_id
);
198 ubi_err(vol
->ubi
, "updating");
201 if (vol
->upd_marker
) {
202 ubi_err(vol
->ubi
, "damaged volume, update marker is set");
205 if (*offp
== vol
->used_bytes
|| count
== 0)
209 dbg_gen("read from corrupted volume %d", vol
->vol_id
);
211 if (*offp
+ count
> vol
->used_bytes
)
212 count_save
= count
= vol
->used_bytes
- *offp
;
214 tbuf_size
= vol
->usable_leb_size
;
215 if (count
< tbuf_size
)
216 tbuf_size
= ALIGN(count
, ubi
->min_io_size
);
217 tbuf
= vmalloc(tbuf_size
);
221 len
= count
> tbuf_size
? tbuf_size
: count
;
222 lnum
= div_u64_rem(*offp
, vol
->usable_leb_size
, &off
);
227 if (off
+ len
>= vol
->usable_leb_size
)
228 len
= vol
->usable_leb_size
- off
;
230 err
= ubi_eba_read_leb(ubi
, vol
, lnum
, tbuf
, off
, len
, 0);
235 if (off
== vol
->usable_leb_size
) {
237 off
-= vol
->usable_leb_size
;
243 err
= copy_to_user(buf
, tbuf
, len
);
250 len
= count
> tbuf_size
? tbuf_size
: count
;
254 return err
? err
: count_save
- count
;
258 * This function allows to directly write to dynamic UBI volumes, without
259 * issuing the volume update operation.
261 static ssize_t
vol_cdev_direct_write(struct file
*file
, const char __user
*buf
,
262 size_t count
, loff_t
*offp
)
264 struct ubi_volume_desc
*desc
= file
->private_data
;
265 struct ubi_volume
*vol
= desc
->vol
;
266 struct ubi_device
*ubi
= vol
->ubi
;
267 int lnum
, off
, len
, tbuf_size
, err
= 0;
268 size_t count_save
= count
;
271 if (!vol
->direct_writes
)
274 dbg_gen("requested: write %zd bytes to offset %lld of volume %u",
275 count
, *offp
, vol
->vol_id
);
277 if (vol
->vol_type
== UBI_STATIC_VOLUME
)
280 lnum
= div_u64_rem(*offp
, vol
->usable_leb_size
, &off
);
281 if (off
& (ubi
->min_io_size
- 1)) {
282 ubi_err(ubi
, "unaligned position");
286 if (*offp
+ count
> vol
->used_bytes
)
287 count_save
= count
= vol
->used_bytes
- *offp
;
289 /* We can write only in fractions of the minimum I/O unit */
290 if (count
& (ubi
->min_io_size
- 1)) {
291 ubi_err(ubi
, "unaligned write length");
295 tbuf_size
= vol
->usable_leb_size
;
296 if (count
< tbuf_size
)
297 tbuf_size
= ALIGN(count
, ubi
->min_io_size
);
298 tbuf
= vmalloc(tbuf_size
);
302 len
= count
> tbuf_size
? tbuf_size
: count
;
307 if (off
+ len
>= vol
->usable_leb_size
)
308 len
= vol
->usable_leb_size
- off
;
310 err
= copy_from_user(tbuf
, buf
, len
);
316 err
= ubi_eba_write_leb(ubi
, vol
, lnum
, tbuf
, off
, len
);
321 if (off
== vol
->usable_leb_size
) {
323 off
-= vol
->usable_leb_size
;
329 len
= count
> tbuf_size
? tbuf_size
: count
;
333 return err
? err
: count_save
- count
;
336 static ssize_t
vol_cdev_write(struct file
*file
, const char __user
*buf
,
337 size_t count
, loff_t
*offp
)
340 struct ubi_volume_desc
*desc
= file
->private_data
;
341 struct ubi_volume
*vol
= desc
->vol
;
342 struct ubi_device
*ubi
= vol
->ubi
;
344 if (!vol
->updating
&& !vol
->changing_leb
)
345 return vol_cdev_direct_write(file
, buf
, count
, offp
);
348 err
= ubi_more_update_data(ubi
, vol
, buf
, count
);
350 err
= ubi_more_leb_change_data(ubi
, vol
, buf
, count
);
353 ubi_err(ubi
, "cannot accept more %zd bytes of data, error %d",
360 * The operation is finished, @err contains number of actually
365 if (vol
->changing_leb
) {
366 revoke_exclusive(desc
, UBI_READWRITE
);
371 * We voluntarily do not take into account the skip_check flag
372 * as we want to make sure what we wrote was correctly written.
374 err
= ubi_check_volume(ubi
, vol
->vol_id
);
379 ubi_warn(ubi
, "volume %d on UBI device %d is corrupted",
380 vol
->vol_id
, ubi
->ubi_num
);
384 ubi_volume_notify(ubi
, vol
, UBI_VOLUME_UPDATED
);
385 revoke_exclusive(desc
, UBI_READWRITE
);
391 static long vol_cdev_ioctl(struct file
*file
, unsigned int cmd
,
395 struct ubi_volume_desc
*desc
= file
->private_data
;
396 struct ubi_volume
*vol
= desc
->vol
;
397 struct ubi_device
*ubi
= vol
->ubi
;
398 void __user
*argp
= (void __user
*)arg
;
401 /* Volume update command */
404 int64_t bytes
, rsvd_bytes
;
406 if (!capable(CAP_SYS_RESOURCE
)) {
411 err
= copy_from_user(&bytes
, argp
, sizeof(int64_t));
417 if (desc
->mode
== UBI_READONLY
) {
422 rsvd_bytes
= (long long)vol
->reserved_pebs
*
423 vol
->usable_leb_size
;
424 if (bytes
< 0 || bytes
> rsvd_bytes
) {
429 err
= get_exclusive(desc
);
433 err
= ubi_start_update(ubi
, vol
, bytes
);
435 ubi_volume_notify(ubi
, vol
, UBI_VOLUME_UPDATED
);
436 revoke_exclusive(desc
, UBI_READWRITE
);
441 /* Atomic logical eraseblock change command */
444 struct ubi_leb_change_req req
;
446 err
= copy_from_user(&req
, argp
,
447 sizeof(struct ubi_leb_change_req
));
453 if (desc
->mode
== UBI_READONLY
||
454 vol
->vol_type
== UBI_STATIC_VOLUME
) {
459 /* Validate the request */
461 if (!ubi_leb_valid(vol
, req
.lnum
) ||
462 req
.bytes
< 0 || req
.bytes
> vol
->usable_leb_size
)
465 err
= get_exclusive(desc
);
469 err
= ubi_start_leb_change(ubi
, vol
, &req
);
471 revoke_exclusive(desc
, UBI_READWRITE
);
475 /* Logical eraseblock erasure command */
480 err
= get_user(lnum
, (__user
int32_t *)argp
);
486 if (desc
->mode
== UBI_READONLY
||
487 vol
->vol_type
== UBI_STATIC_VOLUME
) {
492 if (!ubi_leb_valid(vol
, lnum
)) {
497 dbg_gen("erase LEB %d:%d", vol
->vol_id
, lnum
);
498 err
= ubi_eba_unmap_leb(ubi
, vol
, lnum
);
502 err
= ubi_wl_flush(ubi
, UBI_ALL
, UBI_ALL
);
506 /* Logical eraseblock map command */
509 struct ubi_map_req req
;
511 err
= copy_from_user(&req
, argp
, sizeof(struct ubi_map_req
));
516 err
= ubi_leb_map(desc
, req
.lnum
);
520 /* Logical eraseblock un-map command */
525 err
= get_user(lnum
, (__user
int32_t *)argp
);
530 err
= ubi_leb_unmap(desc
, lnum
);
534 /* Check if logical eraseblock is mapped command */
539 err
= get_user(lnum
, (__user
int32_t *)argp
);
544 err
= ubi_is_mapped(desc
, lnum
);
548 /* Set volume property command */
549 case UBI_IOCSETVOLPROP
:
551 struct ubi_set_vol_prop_req req
;
553 err
= copy_from_user(&req
, argp
,
554 sizeof(struct ubi_set_vol_prop_req
));
559 switch (req
.property
) {
560 case UBI_VOL_PROP_DIRECT_WRITE
:
561 mutex_lock(&ubi
->device_mutex
);
562 desc
->vol
->direct_writes
= !!req
.value
;
563 mutex_unlock(&ubi
->device_mutex
);
572 /* Create a R/O block device on top of the UBI volume */
573 case UBI_IOCVOLCRBLK
:
575 struct ubi_volume_info vi
;
577 ubi_get_volume_info(desc
, &vi
);
578 err
= ubiblock_create(&vi
);
582 /* Remove the R/O block device */
583 case UBI_IOCVOLRMBLK
:
585 struct ubi_volume_info vi
;
587 ubi_get_volume_info(desc
, &vi
);
588 err
= ubiblock_remove(&vi
);
600 * verify_mkvol_req - verify volume creation request.
601 * @ubi: UBI device description object
602 * @req: the request to check
604 * This function zero if the request is correct, and %-EINVAL if not.
606 static int verify_mkvol_req(const struct ubi_device
*ubi
,
607 const struct ubi_mkvol_req
*req
)
609 int n
, err
= -EINVAL
;
611 if (req
->bytes
< 0 || req
->alignment
< 0 || req
->vol_type
< 0 ||
615 if ((req
->vol_id
< 0 || req
->vol_id
>= ubi
->vtbl_slots
) &&
616 req
->vol_id
!= UBI_VOL_NUM_AUTO
)
619 if (req
->alignment
== 0)
625 if (req
->vol_type
!= UBI_DYNAMIC_VOLUME
&&
626 req
->vol_type
!= UBI_STATIC_VOLUME
)
629 if (req
->flags
& ~UBI_VOL_VALID_FLGS
)
632 if (req
->flags
& UBI_VOL_SKIP_CRC_CHECK_FLG
&&
633 req
->vol_type
!= UBI_STATIC_VOLUME
)
636 if (req
->alignment
> ubi
->leb_size
)
639 n
= req
->alignment
& (ubi
->min_io_size
- 1);
640 if (req
->alignment
!= 1 && n
)
643 if (!req
->name
[0] || !req
->name_len
)
646 if (req
->name_len
> UBI_VOL_NAME_MAX
) {
651 n
= strnlen(req
->name
, req
->name_len
+ 1);
652 if (n
!= req
->name_len
)
658 ubi_err(ubi
, "bad volume creation request");
659 ubi_dump_mkvol_req(req
);
664 * verify_rsvol_req - verify volume re-size request.
665 * @ubi: UBI device description object
666 * @req: the request to check
668 * This function returns zero if the request is correct, and %-EINVAL if not.
670 static int verify_rsvol_req(const struct ubi_device
*ubi
,
671 const struct ubi_rsvol_req
*req
)
676 if (req
->vol_id
< 0 || req
->vol_id
>= ubi
->vtbl_slots
)
683 * rename_volumes - rename UBI volumes.
684 * @ubi: UBI device description object
685 * @req: volumes re-name request
687 * This is a helper function for the volume re-name IOCTL which validates the
688 * the request, opens the volume and calls corresponding volumes management
689 * function. Returns zero in case of success and a negative error code in case
692 static int rename_volumes(struct ubi_device
*ubi
,
693 struct ubi_rnvol_req
*req
)
696 struct list_head rename_list
;
697 struct ubi_rename_entry
*re
, *re1
;
699 if (req
->count
< 0 || req
->count
> UBI_MAX_RNVOL
)
705 /* Validate volume IDs and names in the request */
706 for (i
= 0; i
< req
->count
; i
++) {
707 if (req
->ents
[i
].vol_id
< 0 ||
708 req
->ents
[i
].vol_id
>= ubi
->vtbl_slots
)
710 if (req
->ents
[i
].name_len
< 0)
712 if (req
->ents
[i
].name_len
> UBI_VOL_NAME_MAX
)
713 return -ENAMETOOLONG
;
714 req
->ents
[i
].name
[req
->ents
[i
].name_len
] = '\0';
715 n
= strlen(req
->ents
[i
].name
);
716 if (n
!= req
->ents
[i
].name_len
)
720 /* Make sure volume IDs and names are unique */
721 for (i
= 0; i
< req
->count
- 1; i
++) {
722 for (n
= i
+ 1; n
< req
->count
; n
++) {
723 if (req
->ents
[i
].vol_id
== req
->ents
[n
].vol_id
) {
724 ubi_err(ubi
, "duplicated volume id %d",
725 req
->ents
[i
].vol_id
);
728 if (!strcmp(req
->ents
[i
].name
, req
->ents
[n
].name
)) {
729 ubi_err(ubi
, "duplicated volume name \"%s\"",
736 /* Create the re-name list */
737 INIT_LIST_HEAD(&rename_list
);
738 for (i
= 0; i
< req
->count
; i
++) {
739 int vol_id
= req
->ents
[i
].vol_id
;
740 int name_len
= req
->ents
[i
].name_len
;
741 const char *name
= req
->ents
[i
].name
;
743 re
= kzalloc(sizeof(struct ubi_rename_entry
), GFP_KERNEL
);
749 re
->desc
= ubi_open_volume(ubi
->ubi_num
, vol_id
, UBI_METAONLY
);
750 if (IS_ERR(re
->desc
)) {
751 err
= PTR_ERR(re
->desc
);
752 ubi_err(ubi
, "cannot open volume %d, error %d",
758 /* Skip this re-naming if the name does not really change */
759 if (re
->desc
->vol
->name_len
== name_len
&&
760 !memcmp(re
->desc
->vol
->name
, name
, name_len
)) {
761 ubi_close_volume(re
->desc
);
766 re
->new_name_len
= name_len
;
767 memcpy(re
->new_name
, name
, name_len
);
768 list_add_tail(&re
->list
, &rename_list
);
769 dbg_gen("will rename volume %d from \"%s\" to \"%s\"",
770 vol_id
, re
->desc
->vol
->name
, name
);
773 if (list_empty(&rename_list
))
776 /* Find out the volumes which have to be removed */
777 list_for_each_entry(re
, &rename_list
, list
) {
778 struct ubi_volume_desc
*desc
;
779 int no_remove_needed
= 0;
782 * Volume @re->vol_id is going to be re-named to
783 * @re->new_name, while its current name is @name. If a volume
784 * with name @re->new_name currently exists, it has to be
785 * removed, unless it is also re-named in the request (@req).
787 list_for_each_entry(re1
, &rename_list
, list
) {
788 if (re
->new_name_len
== re1
->desc
->vol
->name_len
&&
789 !memcmp(re
->new_name
, re1
->desc
->vol
->name
,
790 re1
->desc
->vol
->name_len
)) {
791 no_remove_needed
= 1;
796 if (no_remove_needed
)
800 * It seems we need to remove volume with name @re->new_name,
803 desc
= ubi_open_volume_nm(ubi
->ubi_num
, re
->new_name
,
808 /* Re-naming into a non-existing volume name */
811 /* The volume exists but busy, or an error occurred */
812 ubi_err(ubi
, "cannot open volume \"%s\", error %d",
817 re1
= kzalloc(sizeof(struct ubi_rename_entry
), GFP_KERNEL
);
820 ubi_close_volume(desc
);
826 list_add(&re1
->list
, &rename_list
);
827 dbg_gen("will remove volume %d, name \"%s\"",
828 re1
->desc
->vol
->vol_id
, re1
->desc
->vol
->name
);
831 mutex_lock(&ubi
->device_mutex
);
832 err
= ubi_rename_volumes(ubi
, &rename_list
);
833 mutex_unlock(&ubi
->device_mutex
);
836 list_for_each_entry_safe(re
, re1
, &rename_list
, list
) {
837 ubi_close_volume(re
->desc
);
844 static long ubi_cdev_ioctl(struct file
*file
, unsigned int cmd
,
848 struct ubi_device
*ubi
;
849 struct ubi_volume_desc
*desc
;
850 void __user
*argp
= (void __user
*)arg
;
852 if (!capable(CAP_SYS_RESOURCE
))
855 ubi
= ubi_get_by_major(imajor(file
->f_mapping
->host
));
860 /* Create volume command */
863 struct ubi_mkvol_req req
;
865 dbg_gen("create volume");
866 err
= copy_from_user(&req
, argp
, sizeof(struct ubi_mkvol_req
));
872 err
= verify_mkvol_req(ubi
, &req
);
876 mutex_lock(&ubi
->device_mutex
);
877 err
= ubi_create_volume(ubi
, &req
);
878 mutex_unlock(&ubi
->device_mutex
);
882 err
= put_user(req
.vol_id
, (__user
int32_t *)argp
);
889 /* Remove volume command */
894 dbg_gen("remove volume");
895 err
= get_user(vol_id
, (__user
int32_t *)argp
);
901 desc
= ubi_open_volume(ubi
->ubi_num
, vol_id
, UBI_EXCLUSIVE
);
907 mutex_lock(&ubi
->device_mutex
);
908 err
= ubi_remove_volume(desc
, 0);
909 mutex_unlock(&ubi
->device_mutex
);
912 * The volume is deleted (unless an error occurred), and the
913 * 'struct ubi_volume' object will be freed when
914 * 'ubi_close_volume()' will call 'put_device()'.
916 ubi_close_volume(desc
);
920 /* Re-size volume command */
924 struct ubi_rsvol_req req
;
926 dbg_gen("re-size volume");
927 err
= copy_from_user(&req
, argp
, sizeof(struct ubi_rsvol_req
));
933 err
= verify_rsvol_req(ubi
, &req
);
937 desc
= ubi_open_volume(ubi
->ubi_num
, req
.vol_id
, UBI_EXCLUSIVE
);
943 pebs
= div_u64(req
.bytes
+ desc
->vol
->usable_leb_size
- 1,
944 desc
->vol
->usable_leb_size
);
946 mutex_lock(&ubi
->device_mutex
);
947 err
= ubi_resize_volume(desc
, pebs
);
948 mutex_unlock(&ubi
->device_mutex
);
949 ubi_close_volume(desc
);
953 /* Re-name volumes command */
956 struct ubi_rnvol_req
*req
;
958 dbg_gen("re-name volumes");
959 req
= kmalloc(sizeof(struct ubi_rnvol_req
), GFP_KERNEL
);
965 err
= copy_from_user(req
, argp
, sizeof(struct ubi_rnvol_req
));
972 err
= rename_volumes(ubi
, req
);
986 static long ctrl_cdev_ioctl(struct file
*file
, unsigned int cmd
,
990 void __user
*argp
= (void __user
*)arg
;
992 if (!capable(CAP_SYS_RESOURCE
))
996 /* Attach an MTD device command */
999 struct ubi_attach_req req
;
1000 struct mtd_info
*mtd
;
1002 dbg_gen("attach MTD device");
1003 err
= copy_from_user(&req
, argp
, sizeof(struct ubi_attach_req
));
1009 if (req
.mtd_num
< 0 ||
1010 (req
.ubi_num
< 0 && req
.ubi_num
!= UBI_DEV_NUM_AUTO
)) {
1015 mtd
= get_mtd_device(NULL
, req
.mtd_num
);
1022 * Note, further request verification is done by
1023 * 'ubi_attach_mtd_dev()'.
1025 mutex_lock(&ubi_devices_mutex
);
1026 err
= ubi_attach_mtd_dev(mtd
, req
.ubi_num
, req
.vid_hdr_offset
,
1027 req
.max_beb_per1024
);
1028 mutex_unlock(&ubi_devices_mutex
);
1030 put_mtd_device(mtd
);
1032 /* @err contains UBI device number */
1033 err
= put_user(err
, (__user
int32_t *)argp
);
1038 /* Detach an MTD device command */
1043 dbg_gen("detach MTD device");
1044 err
= get_user(ubi_num
, (__user
int32_t *)argp
);
1050 mutex_lock(&ubi_devices_mutex
);
1051 err
= ubi_detach_mtd_dev(ubi_num
, 0);
1052 mutex_unlock(&ubi_devices_mutex
);
1064 #ifdef CONFIG_COMPAT
1065 static long vol_cdev_compat_ioctl(struct file
*file
, unsigned int cmd
,
1068 unsigned long translated_arg
= (unsigned long)compat_ptr(arg
);
1070 return vol_cdev_ioctl(file
, cmd
, translated_arg
);
1073 static long ubi_cdev_compat_ioctl(struct file
*file
, unsigned int cmd
,
1076 unsigned long translated_arg
= (unsigned long)compat_ptr(arg
);
1078 return ubi_cdev_ioctl(file
, cmd
, translated_arg
);
1081 static long ctrl_cdev_compat_ioctl(struct file
*file
, unsigned int cmd
,
1084 unsigned long translated_arg
= (unsigned long)compat_ptr(arg
);
1086 return ctrl_cdev_ioctl(file
, cmd
, translated_arg
);
1089 #define vol_cdev_compat_ioctl NULL
1090 #define ubi_cdev_compat_ioctl NULL
1091 #define ctrl_cdev_compat_ioctl NULL
1094 /* UBI volume character device operations */
1095 const struct file_operations ubi_vol_cdev_operations
= {
1096 .owner
= THIS_MODULE
,
1097 .open
= vol_cdev_open
,
1098 .release
= vol_cdev_release
,
1099 .llseek
= vol_cdev_llseek
,
1100 .read
= vol_cdev_read
,
1101 .write
= vol_cdev_write
,
1102 .fsync
= vol_cdev_fsync
,
1103 .unlocked_ioctl
= vol_cdev_ioctl
,
1104 .compat_ioctl
= vol_cdev_compat_ioctl
,
1107 /* UBI character device operations */
1108 const struct file_operations ubi_cdev_operations
= {
1109 .owner
= THIS_MODULE
,
1110 .llseek
= no_llseek
,
1111 .unlocked_ioctl
= ubi_cdev_ioctl
,
1112 .compat_ioctl
= ubi_cdev_compat_ioctl
,
1115 /* UBI control character device operations */
1116 const struct file_operations ubi_ctrl_cdev_operations
= {
1117 .owner
= THIS_MODULE
,
1118 .unlocked_ioctl
= ctrl_cdev_ioctl
,
1119 .compat_ioctl
= ctrl_cdev_compat_ioctl
,
1120 .llseek
= no_llseek
,