2 * linux/drivers/char/raw.c
4 * Front-end raw character devices. These can be bound to any block
5 * devices to provide genuine Unix raw character device semantics.
7 * We reserve minor number 0 for a control interface. ioctl()s on this
8 * device are used to bind the other minor numbers to block devices.
11 #include <linux/init.h>
13 #include <linux/major.h>
14 #include <linux/blkdev.h>
15 #include <linux/backing-dev.h>
16 #include <linux/module.h>
17 #include <linux/raw.h>
18 #include <linux/capability.h>
19 #include <linux/uio.h>
20 #include <linux/cdev.h>
21 #include <linux/device.h>
22 #include <linux/mutex.h>
23 #include <linux/gfp.h>
24 #include <linux/compat.h>
25 #include <linux/vmalloc.h>
27 #include <linux/uaccess.h>
29 struct raw_device_data
{
30 struct block_device
*binding
;
34 static struct class *raw_class
;
35 static struct raw_device_data
*raw_devices
;
36 static DEFINE_MUTEX(raw_mutex
);
37 static const struct file_operations raw_ctl_fops
; /* forward declaration */
39 static int max_raw_minors
= MAX_RAW_MINORS
;
41 module_param(max_raw_minors
, int, 0);
42 MODULE_PARM_DESC(max_raw_minors
, "Maximum number of raw devices (1-65536)");
45 * Open/close code for raw IO.
47 * We just rewrite the i_mapping for the /dev/raw/rawN file descriptor to
48 * point at the blockdev's address_space and set the file handle to use
51 * Set the device's soft blocksize to the minimum possible. This gives the
52 * finest possible alignment and has no adverse impact on performance.
54 static int raw_open(struct inode
*inode
, struct file
*filp
)
56 const int minor
= iminor(inode
);
57 struct block_device
*bdev
;
60 if (minor
== 0) { /* It is the control device */
61 filp
->f_op
= &raw_ctl_fops
;
65 mutex_lock(&raw_mutex
);
68 * All we need to do on open is check that the device is bound.
70 bdev
= raw_devices
[minor
].binding
;
75 err
= blkdev_get(bdev
, filp
->f_mode
| FMODE_EXCL
, raw_open
);
78 err
= set_blocksize(bdev
, bdev_logical_block_size(bdev
));
81 filp
->f_flags
|= O_DIRECT
;
82 filp
->f_mapping
= bdev
->bd_inode
->i_mapping
;
83 if (++raw_devices
[minor
].inuse
== 1)
84 file_inode(filp
)->i_mapping
=
85 bdev
->bd_inode
->i_mapping
;
86 filp
->private_data
= bdev
;
87 mutex_unlock(&raw_mutex
);
91 blkdev_put(bdev
, filp
->f_mode
| FMODE_EXCL
);
93 mutex_unlock(&raw_mutex
);
98 * When the final fd which refers to this character-special node is closed, we
99 * make its ->mapping point back at its own i_data.
101 static int raw_release(struct inode
*inode
, struct file
*filp
)
103 const int minor
= iminor(inode
);
104 struct block_device
*bdev
;
106 mutex_lock(&raw_mutex
);
107 bdev
= raw_devices
[minor
].binding
;
108 if (--raw_devices
[minor
].inuse
== 0)
109 /* Here inode->i_mapping == bdev->bd_inode->i_mapping */
110 inode
->i_mapping
= &inode
->i_data
;
111 mutex_unlock(&raw_mutex
);
113 blkdev_put(bdev
, filp
->f_mode
| FMODE_EXCL
);
118 * Forward ioctls to the underlying block device.
121 raw_ioctl(struct file
*filp
, unsigned int command
, unsigned long arg
)
123 struct block_device
*bdev
= filp
->private_data
;
124 return blkdev_ioctl(bdev
, 0, command
, arg
);
127 static int bind_set(int number
, u64 major
, u64 minor
)
129 dev_t dev
= MKDEV(major
, minor
);
130 struct raw_device_data
*rawdev
;
133 if (number
<= 0 || number
>= max_raw_minors
)
136 if (MAJOR(dev
) != major
|| MINOR(dev
) != minor
)
139 rawdev
= &raw_devices
[number
];
142 * This is like making block devices, so demand the
145 if (!capable(CAP_SYS_ADMIN
))
149 * For now, we don't need to check that the underlying
150 * block device is present or not: we can do that when
151 * the raw device is opened. Just check that the
152 * major/minor numbers make sense.
155 if (MAJOR(dev
) == 0 && dev
!= 0)
158 mutex_lock(&raw_mutex
);
160 mutex_unlock(&raw_mutex
);
163 if (rawdev
->binding
) {
164 bdput(rawdev
->binding
);
165 module_put(THIS_MODULE
);
169 rawdev
->binding
= NULL
;
170 device_destroy(raw_class
, MKDEV(RAW_MAJOR
, number
));
172 rawdev
->binding
= bdget(dev
);
173 if (rawdev
->binding
== NULL
) {
176 dev_t raw
= MKDEV(RAW_MAJOR
, number
);
177 __module_get(THIS_MODULE
);
178 device_destroy(raw_class
, raw
);
179 device_create(raw_class
, NULL
, raw
, NULL
,
183 mutex_unlock(&raw_mutex
);
187 static int bind_get(int number
, dev_t
*dev
)
189 struct raw_device_data
*rawdev
;
190 struct block_device
*bdev
;
192 if (number
<= 0 || number
>= max_raw_minors
)
195 rawdev
= &raw_devices
[number
];
197 mutex_lock(&raw_mutex
);
198 bdev
= rawdev
->binding
;
199 *dev
= bdev
? bdev
->bd_dev
: 0;
200 mutex_unlock(&raw_mutex
);
205 * Deal with ioctls against the raw-device control interface, to bind
206 * and unbind other raw devices.
208 static long raw_ctl_ioctl(struct file
*filp
, unsigned int command
,
211 struct raw_config_request rq
;
217 if (copy_from_user(&rq
, (void __user
*) arg
, sizeof(rq
)))
220 return bind_set(rq
.raw_minor
, rq
.block_major
, rq
.block_minor
);
223 if (copy_from_user(&rq
, (void __user
*) arg
, sizeof(rq
)))
226 err
= bind_get(rq
.raw_minor
, &dev
);
230 rq
.block_major
= MAJOR(dev
);
231 rq
.block_minor
= MINOR(dev
);
233 if (copy_to_user((void __user
*)arg
, &rq
, sizeof(rq
)))
243 struct raw32_config_request
{
244 compat_int_t raw_minor
;
245 compat_u64 block_major
;
246 compat_u64 block_minor
;
249 static long raw_ctl_compat_ioctl(struct file
*file
, unsigned int cmd
,
252 struct raw32_config_request __user
*user_req
= compat_ptr(arg
);
253 struct raw32_config_request rq
;
259 if (copy_from_user(&rq
, user_req
, sizeof(rq
)))
262 return bind_set(rq
.raw_minor
, rq
.block_major
, rq
.block_minor
);
265 if (copy_from_user(&rq
, user_req
, sizeof(rq
)))
268 err
= bind_get(rq
.raw_minor
, &dev
);
272 rq
.block_major
= MAJOR(dev
);
273 rq
.block_minor
= MINOR(dev
);
275 if (copy_to_user(user_req
, &rq
, sizeof(rq
)))
285 static const struct file_operations raw_fops
= {
286 .read_iter
= blkdev_read_iter
,
287 .write_iter
= blkdev_write_iter
,
288 .fsync
= blkdev_fsync
,
290 .release
= raw_release
,
291 .unlocked_ioctl
= raw_ioctl
,
292 .llseek
= default_llseek
,
293 .owner
= THIS_MODULE
,
296 static const struct file_operations raw_ctl_fops
= {
297 .unlocked_ioctl
= raw_ctl_ioctl
,
299 .compat_ioctl
= raw_ctl_compat_ioctl
,
302 .owner
= THIS_MODULE
,
303 .llseek
= noop_llseek
,
306 static struct cdev raw_cdev
;
308 static char *raw_devnode(struct device
*dev
, umode_t
*mode
)
310 return kasprintf(GFP_KERNEL
, "raw/%s", dev_name(dev
));
313 static int __init
raw_init(void)
315 dev_t dev
= MKDEV(RAW_MAJOR
, 0);
318 if (max_raw_minors
< 1 || max_raw_minors
> 65536) {
319 printk(KERN_WARNING
"raw: invalid max_raw_minors (must be"
320 " between 1 and 65536), using %d\n", MAX_RAW_MINORS
);
321 max_raw_minors
= MAX_RAW_MINORS
;
324 raw_devices
= vzalloc(array_size(max_raw_minors
,
325 sizeof(struct raw_device_data
)));
327 printk(KERN_ERR
"Not enough memory for raw device structures\n");
332 ret
= register_chrdev_region(dev
, max_raw_minors
, "raw");
336 cdev_init(&raw_cdev
, &raw_fops
);
337 ret
= cdev_add(&raw_cdev
, dev
, max_raw_minors
);
340 raw_class
= class_create(THIS_MODULE
, "raw");
341 if (IS_ERR(raw_class
)) {
342 printk(KERN_ERR
"Error creating raw class.\n");
344 ret
= PTR_ERR(raw_class
);
347 raw_class
->devnode
= raw_devnode
;
348 device_create(raw_class
, NULL
, MKDEV(RAW_MAJOR
, 0), NULL
, "rawctl");
353 unregister_chrdev_region(dev
, max_raw_minors
);
359 static void __exit
raw_exit(void)
361 device_destroy(raw_class
, MKDEV(RAW_MAJOR
, 0));
362 class_destroy(raw_class
);
364 unregister_chrdev_region(MKDEV(RAW_MAJOR
, 0), max_raw_minors
);
367 module_init(raw_init
);
368 module_exit(raw_exit
);
369 MODULE_LICENSE("GPL");