2 * bsg.c - block layer implementation of the sg v4 interface
4 * Copyright (C) 2004 Jens Axboe <axboe@suse.de> SUSE Labs
5 * Copyright (C) 2004 Peter M. Jones <pjones@redhat.com>
7 * This file is subject to the terms and conditions of the GNU General Public
8 * License version 2. See the file "COPYING" in the main directory of this
9 * archive for more details.
12 #include <linux/module.h>
13 #include <linux/init.h>
14 #include <linux/file.h>
15 #include <linux/blkdev.h>
16 #include <linux/poll.h>
17 #include <linux/cdev.h>
18 #include <linux/jiffies.h>
19 #include <linux/percpu.h>
20 #include <linux/uio.h>
21 #include <linux/idr.h>
22 #include <linux/bsg.h>
23 #include <linux/slab.h>
25 #include <scsi/scsi.h>
26 #include <scsi/scsi_ioctl.h>
27 #include <scsi/scsi_cmnd.h>
28 #include <scsi/scsi_device.h>
29 #include <scsi/scsi_driver.h>
32 #define BSG_DESCRIPTION "Block layer SCSI generic (bsg) driver"
33 #define BSG_VERSION "0.4"
36 struct request_queue
*queue
;
38 struct list_head busy_list
;
39 struct list_head done_list
;
40 struct hlist_node dev_list
;
44 wait_queue_head_t wq_done
;
45 wait_queue_head_t wq_free
;
55 #define BSG_DEFAULT_CMDS 64
56 #define BSG_MAX_DEVS 32768
61 #define dprintk(fmt, args...) printk(KERN_ERR "%s: " fmt, __func__, ##args)
63 #define dprintk(fmt, args...)
66 static DEFINE_MUTEX(bsg_mutex
);
67 static DEFINE_IDR(bsg_minor_idr
);
69 #define BSG_LIST_ARRAY_SIZE 8
70 static struct hlist_head bsg_device_list
[BSG_LIST_ARRAY_SIZE
];
72 static struct class *bsg_class
;
75 static struct kmem_cache
*bsg_cmd_cachep
;
78 * our internal command type
81 struct bsg_device
*bd
;
82 struct list_head list
;
88 char sense
[SCSI_SENSE_BUFFERSIZE
];
91 static void bsg_free_command(struct bsg_command
*bc
)
93 struct bsg_device
*bd
= bc
->bd
;
96 kmem_cache_free(bsg_cmd_cachep
, bc
);
98 spin_lock_irqsave(&bd
->lock
, flags
);
100 spin_unlock_irqrestore(&bd
->lock
, flags
);
102 wake_up(&bd
->wq_free
);
105 static struct bsg_command
*bsg_alloc_command(struct bsg_device
*bd
)
107 struct bsg_command
*bc
= ERR_PTR(-EINVAL
);
109 spin_lock_irq(&bd
->lock
);
111 if (bd
->queued_cmds
>= bd
->max_queue
)
115 spin_unlock_irq(&bd
->lock
);
117 bc
= kmem_cache_zalloc(bsg_cmd_cachep
, GFP_KERNEL
);
119 spin_lock_irq(&bd
->lock
);
121 bc
= ERR_PTR(-ENOMEM
);
126 INIT_LIST_HEAD(&bc
->list
);
127 dprintk("%s: returning free cmd %p\n", bd
->name
, bc
);
130 spin_unlock_irq(&bd
->lock
);
134 static inline struct hlist_head
*bsg_dev_idx_hash(int index
)
136 return &bsg_device_list
[index
& (BSG_LIST_ARRAY_SIZE
- 1)];
139 static int bsg_io_schedule(struct bsg_device
*bd
)
144 spin_lock_irq(&bd
->lock
);
146 BUG_ON(bd
->done_cmds
> bd
->queued_cmds
);
149 * -ENOSPC or -ENODATA? I'm going for -ENODATA, meaning "I have no
150 * work to do", even though we return -ENOSPC after this same test
151 * during bsg_write() -- there, it means our buffer can't have more
152 * bsg_commands added to it, thus has no space left.
154 if (bd
->done_cmds
== bd
->queued_cmds
) {
159 if (!test_bit(BSG_F_BLOCK
, &bd
->flags
)) {
164 prepare_to_wait(&bd
->wq_done
, &wait
, TASK_UNINTERRUPTIBLE
);
165 spin_unlock_irq(&bd
->lock
);
167 finish_wait(&bd
->wq_done
, &wait
);
171 spin_unlock_irq(&bd
->lock
);
175 static int blk_fill_sgv4_hdr_rq(struct request_queue
*q
, struct request
*rq
,
176 struct sg_io_v4
*hdr
, struct bsg_device
*bd
,
177 fmode_t has_write_perm
)
179 if (hdr
->request_len
> BLK_MAX_CDB
) {
180 rq
->cmd
= kzalloc(hdr
->request_len
, GFP_KERNEL
);
185 if (copy_from_user(rq
->cmd
, (void __user
*)(unsigned long)hdr
->request
,
189 if (hdr
->subprotocol
== BSG_SUB_PROTOCOL_SCSI_CMD
) {
190 if (blk_verify_command(rq
->cmd
, has_write_perm
))
192 } else if (!capable(CAP_SYS_RAWIO
))
196 * fill in request structure
198 rq
->cmd_len
= hdr
->request_len
;
200 rq
->timeout
= msecs_to_jiffies(hdr
->timeout
);
202 rq
->timeout
= q
->sg_timeout
;
204 rq
->timeout
= BLK_DEFAULT_SG_TIMEOUT
;
205 if (rq
->timeout
< BLK_MIN_SG_TIMEOUT
)
206 rq
->timeout
= BLK_MIN_SG_TIMEOUT
;
212 * Check if sg_io_v4 from user is allowed and valid
215 bsg_validate_sgv4_hdr(struct request_queue
*q
, struct sg_io_v4
*hdr
, int *rw
)
219 if (hdr
->guard
!= 'Q')
222 switch (hdr
->protocol
) {
223 case BSG_PROTOCOL_SCSI
:
224 switch (hdr
->subprotocol
) {
225 case BSG_SUB_PROTOCOL_SCSI_CMD
:
226 case BSG_SUB_PROTOCOL_SCSI_TRANSPORT
:
236 *rw
= hdr
->dout_xfer_len
? WRITE
: READ
;
241 * map sg_io_v4 to a request.
243 static struct request
*
244 bsg_map_hdr(struct bsg_device
*bd
, struct sg_io_v4
*hdr
, fmode_t has_write_perm
,
247 struct request_queue
*q
= bd
->queue
;
248 struct request
*rq
, *next_rq
= NULL
;
250 unsigned int dxfer_len
;
251 void __user
*dxferp
= NULL
;
252 struct bsg_class_device
*bcd
= &q
->bsg_dev
;
254 /* if the LLD has been removed then the bsg_unregister_queue will
255 * eventually be called and the class_dev was freed, so we can no
256 * longer use this request_queue. Return no such address.
259 return ERR_PTR(-ENXIO
);
261 dprintk("map hdr %llx/%u %llx/%u\n", (unsigned long long) hdr
->dout_xferp
,
262 hdr
->dout_xfer_len
, (unsigned long long) hdr
->din_xferp
,
265 ret
= bsg_validate_sgv4_hdr(q
, hdr
, &rw
);
270 * map scatter-gather elements separately and string them to request
272 rq
= blk_get_request(q
, rw
, GFP_KERNEL
);
275 blk_rq_set_block_pc(rq
);
277 ret
= blk_fill_sgv4_hdr_rq(q
, rq
, hdr
, bd
, has_write_perm
);
281 if (rw
== WRITE
&& hdr
->din_xfer_len
) {
282 if (!test_bit(QUEUE_FLAG_BIDI
, &q
->queue_flags
)) {
287 next_rq
= blk_get_request(q
, READ
, GFP_KERNEL
);
288 if (IS_ERR(next_rq
)) {
289 ret
= PTR_ERR(next_rq
);
293 rq
->next_rq
= next_rq
;
294 next_rq
->cmd_type
= rq
->cmd_type
;
296 dxferp
= (void __user
*)(unsigned long)hdr
->din_xferp
;
297 ret
= blk_rq_map_user(q
, next_rq
, NULL
, dxferp
,
298 hdr
->din_xfer_len
, GFP_KERNEL
);
303 if (hdr
->dout_xfer_len
) {
304 dxfer_len
= hdr
->dout_xfer_len
;
305 dxferp
= (void __user
*)(unsigned long)hdr
->dout_xferp
;
306 } else if (hdr
->din_xfer_len
) {
307 dxfer_len
= hdr
->din_xfer_len
;
308 dxferp
= (void __user
*)(unsigned long)hdr
->din_xferp
;
313 ret
= blk_rq_map_user(q
, rq
, NULL
, dxferp
, dxfer_len
,
324 if (rq
->cmd
!= rq
->__cmd
)
328 blk_rq_unmap_user(next_rq
->bio
);
329 blk_put_request(next_rq
);
335 * async completion call-back from the block layer, when scsi/ide/whatever
336 * calls end_that_request_last() on a request
338 static void bsg_rq_end_io(struct request
*rq
, int uptodate
)
340 struct bsg_command
*bc
= rq
->end_io_data
;
341 struct bsg_device
*bd
= bc
->bd
;
344 dprintk("%s: finished rq %p bc %p, bio %p stat %d\n",
345 bd
->name
, rq
, bc
, bc
->bio
, uptodate
);
347 bc
->hdr
.duration
= jiffies_to_msecs(jiffies
- bc
->hdr
.duration
);
349 spin_lock_irqsave(&bd
->lock
, flags
);
350 list_move_tail(&bc
->list
, &bd
->done_list
);
352 spin_unlock_irqrestore(&bd
->lock
, flags
);
354 wake_up(&bd
->wq_done
);
358 * do final setup of a 'bc' and submit the matching 'rq' to the block
361 static void bsg_add_command(struct bsg_device
*bd
, struct request_queue
*q
,
362 struct bsg_command
*bc
, struct request
*rq
)
364 int at_head
= (0 == (bc
->hdr
.flags
& BSG_FLAG_Q_AT_TAIL
));
367 * add bc command to busy queue and submit rq for io
372 bc
->bidi_bio
= rq
->next_rq
->bio
;
373 bc
->hdr
.duration
= jiffies
;
374 spin_lock_irq(&bd
->lock
);
375 list_add_tail(&bc
->list
, &bd
->busy_list
);
376 spin_unlock_irq(&bd
->lock
);
378 dprintk("%s: queueing rq %p, bc %p\n", bd
->name
, rq
, bc
);
380 rq
->end_io_data
= bc
;
381 blk_execute_rq_nowait(q
, NULL
, rq
, at_head
, bsg_rq_end_io
);
384 static struct bsg_command
*bsg_next_done_cmd(struct bsg_device
*bd
)
386 struct bsg_command
*bc
= NULL
;
388 spin_lock_irq(&bd
->lock
);
390 bc
= list_first_entry(&bd
->done_list
, struct bsg_command
, list
);
394 spin_unlock_irq(&bd
->lock
);
400 * Get a finished command from the done list
402 static struct bsg_command
*bsg_get_done_cmd(struct bsg_device
*bd
)
404 struct bsg_command
*bc
;
408 bc
= bsg_next_done_cmd(bd
);
412 if (!test_bit(BSG_F_BLOCK
, &bd
->flags
)) {
413 bc
= ERR_PTR(-EAGAIN
);
417 ret
= wait_event_interruptible(bd
->wq_done
, bd
->done_cmds
);
419 bc
= ERR_PTR(-ERESTARTSYS
);
424 dprintk("%s: returning done %p\n", bd
->name
, bc
);
429 static int blk_complete_sgv4_hdr_rq(struct request
*rq
, struct sg_io_v4
*hdr
,
430 struct bio
*bio
, struct bio
*bidi_bio
)
434 dprintk("rq %p bio %p 0x%x\n", rq
, bio
, rq
->errors
);
436 * fill in all the output members
438 hdr
->device_status
= rq
->errors
& 0xff;
439 hdr
->transport_status
= host_byte(rq
->errors
);
440 hdr
->driver_status
= driver_byte(rq
->errors
);
442 if (hdr
->device_status
|| hdr
->transport_status
|| hdr
->driver_status
)
443 hdr
->info
|= SG_INFO_CHECK
;
444 hdr
->response_len
= 0;
446 if (rq
->sense_len
&& hdr
->response
) {
447 int len
= min_t(unsigned int, hdr
->max_response_len
,
450 ret
= copy_to_user((void __user
*)(unsigned long)hdr
->response
,
453 hdr
->response_len
= len
;
459 hdr
->dout_resid
= rq
->resid_len
;
460 hdr
->din_resid
= rq
->next_rq
->resid_len
;
461 blk_rq_unmap_user(bidi_bio
);
462 blk_put_request(rq
->next_rq
);
463 } else if (rq_data_dir(rq
) == READ
)
464 hdr
->din_resid
= rq
->resid_len
;
466 hdr
->dout_resid
= rq
->resid_len
;
469 * If the request generated a negative error number, return it
470 * (providing we aren't already returning an error); if it's
471 * just a protocol response (i.e. non negative), that gets
474 if (!ret
&& rq
->errors
< 0)
477 blk_rq_unmap_user(bio
);
478 if (rq
->cmd
!= rq
->__cmd
)
485 static int bsg_complete_all_commands(struct bsg_device
*bd
)
487 struct bsg_command
*bc
;
490 dprintk("%s: entered\n", bd
->name
);
493 * wait for all commands to complete
497 ret
= bsg_io_schedule(bd
);
499 * look for -ENODATA specifically -- we'll sometimes get
500 * -ERESTARTSYS when we've taken a signal, but we can't
501 * return until we're done freeing the queue, so ignore
502 * it. The signal will get handled when we're done freeing
505 } while (ret
!= -ENODATA
);
508 * discard done commands
512 spin_lock_irq(&bd
->lock
);
513 if (!bd
->queued_cmds
) {
514 spin_unlock_irq(&bd
->lock
);
517 spin_unlock_irq(&bd
->lock
);
519 bc
= bsg_get_done_cmd(bd
);
523 tret
= blk_complete_sgv4_hdr_rq(bc
->rq
, &bc
->hdr
, bc
->bio
,
528 bsg_free_command(bc
);
535 __bsg_read(char __user
*buf
, size_t count
, struct bsg_device
*bd
,
536 const struct iovec
*iov
, ssize_t
*bytes_read
)
538 struct bsg_command
*bc
;
539 int nr_commands
, ret
;
541 if (count
% sizeof(struct sg_io_v4
))
545 nr_commands
= count
/ sizeof(struct sg_io_v4
);
546 while (nr_commands
) {
547 bc
= bsg_get_done_cmd(bd
);
554 * this is the only case where we need to copy data back
555 * after completing the request. so do that here,
556 * bsg_complete_work() cannot do that for us
558 ret
= blk_complete_sgv4_hdr_rq(bc
->rq
, &bc
->hdr
, bc
->bio
,
561 if (copy_to_user(buf
, &bc
->hdr
, sizeof(bc
->hdr
)))
564 bsg_free_command(bc
);
569 buf
+= sizeof(struct sg_io_v4
);
570 *bytes_read
+= sizeof(struct sg_io_v4
);
577 static inline void bsg_set_block(struct bsg_device
*bd
, struct file
*file
)
579 if (file
->f_flags
& O_NONBLOCK
)
580 clear_bit(BSG_F_BLOCK
, &bd
->flags
);
582 set_bit(BSG_F_BLOCK
, &bd
->flags
);
586 * Check if the error is a "real" error that we should return.
588 static inline int err_block_err(int ret
)
590 if (ret
&& ret
!= -ENOSPC
&& ret
!= -ENODATA
&& ret
!= -EAGAIN
)
597 bsg_read(struct file
*file
, char __user
*buf
, size_t count
, loff_t
*ppos
)
599 struct bsg_device
*bd
= file
->private_data
;
603 dprintk("%s: read %Zd bytes\n", bd
->name
, count
);
605 bsg_set_block(bd
, file
);
608 ret
= __bsg_read(buf
, count
, bd
, NULL
, &bytes_read
);
611 if (!bytes_read
|| err_block_err(ret
))
617 static int __bsg_write(struct bsg_device
*bd
, const char __user
*buf
,
618 size_t count
, ssize_t
*bytes_written
,
619 fmode_t has_write_perm
)
621 struct bsg_command
*bc
;
623 int ret
, nr_commands
;
625 if (count
% sizeof(struct sg_io_v4
))
628 nr_commands
= count
/ sizeof(struct sg_io_v4
);
632 while (nr_commands
) {
633 struct request_queue
*q
= bd
->queue
;
635 bc
= bsg_alloc_command(bd
);
642 if (copy_from_user(&bc
->hdr
, buf
, sizeof(bc
->hdr
))) {
648 * get a request, fill in the blanks, and add to request queue
650 rq
= bsg_map_hdr(bd
, &bc
->hdr
, has_write_perm
, bc
->sense
);
657 bsg_add_command(bd
, q
, bc
, rq
);
661 buf
+= sizeof(struct sg_io_v4
);
662 *bytes_written
+= sizeof(struct sg_io_v4
);
666 bsg_free_command(bc
);
672 bsg_write(struct file
*file
, const char __user
*buf
, size_t count
, loff_t
*ppos
)
674 struct bsg_device
*bd
= file
->private_data
;
675 ssize_t bytes_written
;
678 dprintk("%s: write %Zd bytes\n", bd
->name
, count
);
680 if (unlikely(segment_eq(get_fs(), KERNEL_DS
)))
683 bsg_set_block(bd
, file
);
686 ret
= __bsg_write(bd
, buf
, count
, &bytes_written
,
687 file
->f_mode
& FMODE_WRITE
);
689 *ppos
= bytes_written
;
692 * return bytes written on non-fatal errors
694 if (!bytes_written
|| err_block_err(ret
))
697 dprintk("%s: returning %Zd\n", bd
->name
, bytes_written
);
698 return bytes_written
;
701 static struct bsg_device
*bsg_alloc_device(void)
703 struct bsg_device
*bd
;
705 bd
= kzalloc(sizeof(struct bsg_device
), GFP_KERNEL
);
709 spin_lock_init(&bd
->lock
);
711 bd
->max_queue
= BSG_DEFAULT_CMDS
;
713 INIT_LIST_HEAD(&bd
->busy_list
);
714 INIT_LIST_HEAD(&bd
->done_list
);
715 INIT_HLIST_NODE(&bd
->dev_list
);
717 init_waitqueue_head(&bd
->wq_free
);
718 init_waitqueue_head(&bd
->wq_done
);
722 static void bsg_kref_release_function(struct kref
*kref
)
724 struct bsg_class_device
*bcd
=
725 container_of(kref
, struct bsg_class_device
, ref
);
726 struct device
*parent
= bcd
->parent
;
729 bcd
->release(bcd
->parent
);
734 static int bsg_put_device(struct bsg_device
*bd
)
736 int ret
= 0, do_free
;
737 struct request_queue
*q
= bd
->queue
;
739 mutex_lock(&bsg_mutex
);
741 do_free
= atomic_dec_and_test(&bd
->ref_count
);
743 mutex_unlock(&bsg_mutex
);
747 hlist_del(&bd
->dev_list
);
748 mutex_unlock(&bsg_mutex
);
750 dprintk("%s: tearing down\n", bd
->name
);
753 * close can always block
755 set_bit(BSG_F_BLOCK
, &bd
->flags
);
758 * correct error detection baddies here again. it's the responsibility
759 * of the app to properly reap commands before close() if it wants
760 * fool-proof error detection
762 ret
= bsg_complete_all_commands(bd
);
766 kref_put(&q
->bsg_dev
.ref
, bsg_kref_release_function
);
772 static struct bsg_device
*bsg_add_device(struct inode
*inode
,
773 struct request_queue
*rq
,
776 struct bsg_device
*bd
;
778 unsigned char buf
[32];
780 if (!blk_get_queue(rq
))
781 return ERR_PTR(-ENXIO
);
783 bd
= bsg_alloc_device();
786 return ERR_PTR(-ENOMEM
);
791 bsg_set_block(bd
, file
);
793 atomic_set(&bd
->ref_count
, 1);
794 mutex_lock(&bsg_mutex
);
795 hlist_add_head(&bd
->dev_list
, bsg_dev_idx_hash(iminor(inode
)));
797 strncpy(bd
->name
, dev_name(rq
->bsg_dev
.class_dev
), sizeof(bd
->name
) - 1);
798 dprintk("bound to <%s>, max queue %d\n",
799 format_dev_t(buf
, inode
->i_rdev
), bd
->max_queue
);
801 mutex_unlock(&bsg_mutex
);
805 static struct bsg_device
*__bsg_get_device(int minor
, struct request_queue
*q
)
807 struct bsg_device
*bd
;
809 mutex_lock(&bsg_mutex
);
811 hlist_for_each_entry(bd
, bsg_dev_idx_hash(minor
), dev_list
) {
812 if (bd
->queue
== q
) {
813 atomic_inc(&bd
->ref_count
);
819 mutex_unlock(&bsg_mutex
);
823 static struct bsg_device
*bsg_get_device(struct inode
*inode
, struct file
*file
)
825 struct bsg_device
*bd
;
826 struct bsg_class_device
*bcd
;
829 * find the class device
831 mutex_lock(&bsg_mutex
);
832 bcd
= idr_find(&bsg_minor_idr
, iminor(inode
));
835 mutex_unlock(&bsg_mutex
);
838 return ERR_PTR(-ENODEV
);
840 bd
= __bsg_get_device(iminor(inode
), bcd
->queue
);
844 bd
= bsg_add_device(inode
, bcd
->queue
, file
);
846 kref_put(&bcd
->ref
, bsg_kref_release_function
);
851 static int bsg_open(struct inode
*inode
, struct file
*file
)
853 struct bsg_device
*bd
;
855 bd
= bsg_get_device(inode
, file
);
860 file
->private_data
= bd
;
864 static int bsg_release(struct inode
*inode
, struct file
*file
)
866 struct bsg_device
*bd
= file
->private_data
;
868 file
->private_data
= NULL
;
869 return bsg_put_device(bd
);
872 static unsigned int bsg_poll(struct file
*file
, poll_table
*wait
)
874 struct bsg_device
*bd
= file
->private_data
;
875 unsigned int mask
= 0;
877 poll_wait(file
, &bd
->wq_done
, wait
);
878 poll_wait(file
, &bd
->wq_free
, wait
);
880 spin_lock_irq(&bd
->lock
);
881 if (!list_empty(&bd
->done_list
))
882 mask
|= POLLIN
| POLLRDNORM
;
883 if (bd
->queued_cmds
< bd
->max_queue
)
885 spin_unlock_irq(&bd
->lock
);
890 static long bsg_ioctl(struct file
*file
, unsigned int cmd
, unsigned long arg
)
892 struct bsg_device
*bd
= file
->private_data
;
893 int __user
*uarg
= (int __user
*) arg
;
900 case SG_GET_COMMAND_Q
:
901 return put_user(bd
->max_queue
, uarg
);
902 case SG_SET_COMMAND_Q
: {
905 if (get_user(queue
, uarg
))
910 spin_lock_irq(&bd
->lock
);
911 bd
->max_queue
= queue
;
912 spin_unlock_irq(&bd
->lock
);
919 case SG_GET_VERSION_NUM
:
920 case SCSI_IOCTL_GET_IDLUN
:
921 case SCSI_IOCTL_GET_BUS_NUMBER
:
924 case SG_GET_RESERVED_SIZE
:
925 case SG_SET_RESERVED_SIZE
:
926 case SG_EMULATED_HOST
:
927 case SCSI_IOCTL_SEND_COMMAND
: {
928 void __user
*uarg
= (void __user
*) arg
;
929 return scsi_cmd_ioctl(bd
->queue
, NULL
, file
->f_mode
, cmd
, uarg
);
933 struct bio
*bio
, *bidi_bio
= NULL
;
936 u8 sense
[SCSI_SENSE_BUFFERSIZE
];
938 if (copy_from_user(&hdr
, uarg
, sizeof(hdr
)))
941 rq
= bsg_map_hdr(bd
, &hdr
, file
->f_mode
& FMODE_WRITE
, sense
);
947 bidi_bio
= rq
->next_rq
->bio
;
949 at_head
= (0 == (hdr
.flags
& BSG_FLAG_Q_AT_TAIL
));
950 blk_execute_rq(bd
->queue
, NULL
, rq
, at_head
);
951 ret
= blk_complete_sgv4_hdr_rq(rq
, &hdr
, bio
, bidi_bio
);
953 if (copy_to_user(uarg
, &hdr
, sizeof(hdr
)))
959 * block device ioctls
963 return ioctl_by_bdev(bd
->bdev
, cmd
, arg
);
970 static const struct file_operations bsg_fops
= {
975 .release
= bsg_release
,
976 .unlocked_ioctl
= bsg_ioctl
,
977 .owner
= THIS_MODULE
,
978 .llseek
= default_llseek
,
981 void bsg_unregister_queue(struct request_queue
*q
)
983 struct bsg_class_device
*bcd
= &q
->bsg_dev
;
988 mutex_lock(&bsg_mutex
);
989 idr_remove(&bsg_minor_idr
, bcd
->minor
);
991 sysfs_remove_link(&q
->kobj
, "bsg");
992 device_unregister(bcd
->class_dev
);
993 bcd
->class_dev
= NULL
;
994 kref_put(&bcd
->ref
, bsg_kref_release_function
);
995 mutex_unlock(&bsg_mutex
);
997 EXPORT_SYMBOL_GPL(bsg_unregister_queue
);
999 int bsg_register_queue(struct request_queue
*q
, struct device
*parent
,
1000 const char *name
, void (*release
)(struct device
*))
1002 struct bsg_class_device
*bcd
;
1005 struct device
*class_dev
= NULL
;
1006 const char *devname
;
1011 devname
= dev_name(parent
);
1014 * we need a proper transport to send commands, not a stacked device
1016 if (!queue_is_rq_based(q
))
1020 memset(bcd
, 0, sizeof(*bcd
));
1022 mutex_lock(&bsg_mutex
);
1024 ret
= idr_alloc(&bsg_minor_idr
, bcd
, 0, BSG_MAX_DEVS
, GFP_KERNEL
);
1026 if (ret
== -ENOSPC
) {
1027 printk(KERN_ERR
"bsg: too many bsg devices\n");
1035 bcd
->parent
= get_device(parent
);
1036 bcd
->release
= release
;
1037 kref_init(&bcd
->ref
);
1038 dev
= MKDEV(bsg_major
, bcd
->minor
);
1039 class_dev
= device_create(bsg_class
, parent
, dev
, NULL
, "%s", devname
);
1040 if (IS_ERR(class_dev
)) {
1041 ret
= PTR_ERR(class_dev
);
1044 bcd
->class_dev
= class_dev
;
1047 ret
= sysfs_create_link(&q
->kobj
, &bcd
->class_dev
->kobj
, "bsg");
1049 goto unregister_class_dev
;
1052 mutex_unlock(&bsg_mutex
);
1055 unregister_class_dev
:
1056 device_unregister(class_dev
);
1059 idr_remove(&bsg_minor_idr
, bcd
->minor
);
1061 mutex_unlock(&bsg_mutex
);
1064 EXPORT_SYMBOL_GPL(bsg_register_queue
);
1066 static struct cdev bsg_cdev
;
1068 static char *bsg_devnode(struct device
*dev
, umode_t
*mode
)
1070 return kasprintf(GFP_KERNEL
, "bsg/%s", dev_name(dev
));
1073 static int __init
bsg_init(void)
1078 bsg_cmd_cachep
= kmem_cache_create("bsg_cmd",
1079 sizeof(struct bsg_command
), 0, 0, NULL
);
1080 if (!bsg_cmd_cachep
) {
1081 printk(KERN_ERR
"bsg: failed creating slab cache\n");
1085 for (i
= 0; i
< BSG_LIST_ARRAY_SIZE
; i
++)
1086 INIT_HLIST_HEAD(&bsg_device_list
[i
]);
1088 bsg_class
= class_create(THIS_MODULE
, "bsg");
1089 if (IS_ERR(bsg_class
)) {
1090 ret
= PTR_ERR(bsg_class
);
1091 goto destroy_kmemcache
;
1093 bsg_class
->devnode
= bsg_devnode
;
1095 ret
= alloc_chrdev_region(&devid
, 0, BSG_MAX_DEVS
, "bsg");
1097 goto destroy_bsg_class
;
1099 bsg_major
= MAJOR(devid
);
1101 cdev_init(&bsg_cdev
, &bsg_fops
);
1102 ret
= cdev_add(&bsg_cdev
, MKDEV(bsg_major
, 0), BSG_MAX_DEVS
);
1104 goto unregister_chrdev
;
1106 printk(KERN_INFO BSG_DESCRIPTION
" version " BSG_VERSION
1107 " loaded (major %d)\n", bsg_major
);
1110 unregister_chrdev_region(MKDEV(bsg_major
, 0), BSG_MAX_DEVS
);
1112 class_destroy(bsg_class
);
1114 kmem_cache_destroy(bsg_cmd_cachep
);
1118 MODULE_AUTHOR("Jens Axboe");
1119 MODULE_DESCRIPTION(BSG_DESCRIPTION
);
1120 MODULE_LICENSE("GPL");
1122 device_initcall(bsg_init
);