2 * Network block device - make block devices work over TCP
4 * Note that you can not swap over this thing, yet. Seems to work but
5 * deadlocks sometimes - you can not swap over TCP in general.
7 * Copyright 1997-2000, 2008 Pavel Machek <pavel@ucw.cz>
8 * Parts copyright 2001 Steven Whitehouse <steve@chygwyn.com>
10 * This file is released under GPLv2 or later.
12 * (part of code stolen from loop.c)
15 #include <linux/major.h>
17 #include <linux/blkdev.h>
18 #include <linux/module.h>
19 #include <linux/init.h>
20 #include <linux/sched.h>
22 #include <linux/bio.h>
23 #include <linux/stat.h>
24 #include <linux/errno.h>
25 #include <linux/file.h>
26 #include <linux/ioctl.h>
27 #include <linux/mutex.h>
28 #include <linux/compiler.h>
29 #include <linux/err.h>
30 #include <linux/kernel.h>
31 #include <linux/slab.h>
33 #include <linux/net.h>
34 #include <linux/kthread.h>
35 #include <linux/types.h>
36 #include <linux/debugfs.h>
38 #include <asm/uaccess.h>
39 #include <asm/types.h>
41 #include <linux/nbd.h>
45 struct socket
* sock
; /* If == NULL, device is not ready, yet */
48 spinlock_t queue_lock
;
49 struct list_head queue_head
; /* Requests waiting result */
50 struct request
*active_req
;
51 wait_queue_head_t active_wq
;
52 struct list_head waiting_queue
; /* Requests to be sent */
53 wait_queue_head_t waiting_wq
;
61 bool disconnect
; /* a disconnect has been requested by user */
63 struct timer_list timeout_timer
;
64 /* protects initialization and shutdown of the socket */
66 struct task_struct
*task_recv
;
67 struct task_struct
*task_send
;
69 #if IS_ENABLED(CONFIG_DEBUG_FS)
70 struct dentry
*dbg_dir
;
74 #if IS_ENABLED(CONFIG_DEBUG_FS)
75 static struct dentry
*nbd_dbg_dir
;
78 #define nbd_name(nbd) ((nbd)->disk->disk_name)
80 #define NBD_MAGIC 0x68797548
82 static unsigned int nbds_max
= 16;
83 static struct nbd_device
*nbd_dev
;
87 * Use just one lock (or at most 1 per NIC). Two arguments for this:
88 * 1. Each NIC is essentially a synchronization point for all servers
89 * accessed through that NIC so there's no need to have more locks
91 * 2. More locks lead to more "Dirty cache line bouncing" which will slow
92 * down each lock to the point where they're actually slower than just
94 * Thanks go to Jens Axboe and Al Viro for their LKML emails explaining this!
96 static DEFINE_SPINLOCK(nbd_lock
);
98 static inline struct device
*nbd_to_dev(struct nbd_device
*nbd
)
100 return disk_to_dev(nbd
->disk
);
103 static bool nbd_is_connected(struct nbd_device
*nbd
)
105 return !!nbd
->task_recv
;
108 static const char *nbdcmd_to_ascii(int cmd
)
111 case NBD_CMD_READ
: return "read";
112 case NBD_CMD_WRITE
: return "write";
113 case NBD_CMD_DISC
: return "disconnect";
114 case NBD_CMD_FLUSH
: return "flush";
115 case NBD_CMD_TRIM
: return "trim/discard";
120 static int nbd_size_clear(struct nbd_device
*nbd
, struct block_device
*bdev
)
122 bdev
->bd_inode
->i_size
= 0;
123 set_capacity(nbd
->disk
, 0);
124 kobject_uevent(&nbd_to_dev(nbd
)->kobj
, KOBJ_CHANGE
);
129 static void nbd_size_update(struct nbd_device
*nbd
, struct block_device
*bdev
)
131 if (!nbd_is_connected(nbd
))
134 bdev
->bd_inode
->i_size
= nbd
->bytesize
;
135 set_capacity(nbd
->disk
, nbd
->bytesize
>> 9);
136 kobject_uevent(&nbd_to_dev(nbd
)->kobj
, KOBJ_CHANGE
);
139 static int nbd_size_set(struct nbd_device
*nbd
, struct block_device
*bdev
,
140 int blocksize
, int nr_blocks
)
144 ret
= set_blocksize(bdev
, blocksize
);
148 nbd
->blksize
= blocksize
;
149 nbd
->bytesize
= (loff_t
)blocksize
* (loff_t
)nr_blocks
;
151 nbd_size_update(nbd
, bdev
);
156 static void nbd_end_request(struct nbd_device
*nbd
, struct request
*req
)
158 int error
= req
->errors
? -EIO
: 0;
159 struct request_queue
*q
= req
->q
;
162 dev_dbg(nbd_to_dev(nbd
), "request %p: %s\n", req
,
163 error
? "failed" : "done");
165 spin_lock_irqsave(q
->queue_lock
, flags
);
166 __blk_end_request_all(req
, error
);
167 spin_unlock_irqrestore(q
->queue_lock
, flags
);
171 * Forcibly shutdown the socket causing all listeners to error
173 static void sock_shutdown(struct nbd_device
*nbd
)
175 spin_lock_irq(&nbd
->sock_lock
);
178 spin_unlock_irq(&nbd
->sock_lock
);
182 dev_warn(disk_to_dev(nbd
->disk
), "shutting down socket\n");
183 kernel_sock_shutdown(nbd
->sock
, SHUT_RDWR
);
184 sockfd_put(nbd
->sock
);
186 spin_unlock_irq(&nbd
->sock_lock
);
188 del_timer(&nbd
->timeout_timer
);
191 static void nbd_xmit_timeout(unsigned long arg
)
193 struct nbd_device
*nbd
= (struct nbd_device
*)arg
;
196 if (list_empty(&nbd
->queue_head
))
199 spin_lock_irqsave(&nbd
->sock_lock
, flags
);
201 nbd
->timedout
= true;
204 kernel_sock_shutdown(nbd
->sock
, SHUT_RDWR
);
206 spin_unlock_irqrestore(&nbd
->sock_lock
, flags
);
208 dev_err(nbd_to_dev(nbd
), "Connection timed out, shutting down connection\n");
212 * Send or receive packet.
214 static int sock_xmit(struct nbd_device
*nbd
, int send
, void *buf
, int size
,
217 struct socket
*sock
= nbd
->sock
;
221 unsigned long pflags
= current
->flags
;
223 if (unlikely(!sock
)) {
224 dev_err(disk_to_dev(nbd
->disk
),
225 "Attempted %s on closed socket in sock_xmit\n",
226 (send
? "send" : "recv"));
230 current
->flags
|= PF_MEMALLOC
;
232 sock
->sk
->sk_allocation
= GFP_NOIO
| __GFP_MEMALLOC
;
237 msg
.msg_control
= NULL
;
238 msg
.msg_controllen
= 0;
239 msg
.msg_flags
= msg_flags
| MSG_NOSIGNAL
;
242 result
= kernel_sendmsg(sock
, &msg
, &iov
, 1, size
);
244 result
= kernel_recvmsg(sock
, &msg
, &iov
, 1, size
,
249 result
= -EPIPE
; /* short read */
256 tsk_restore_flags(current
, pflags
, PF_MEMALLOC
);
258 if (!send
&& nbd
->xmit_timeout
)
259 mod_timer(&nbd
->timeout_timer
, jiffies
+ nbd
->xmit_timeout
);
264 static inline int sock_send_bvec(struct nbd_device
*nbd
, struct bio_vec
*bvec
,
268 void *kaddr
= kmap(bvec
->bv_page
);
269 result
= sock_xmit(nbd
, 1, kaddr
+ bvec
->bv_offset
,
270 bvec
->bv_len
, flags
);
271 kunmap(bvec
->bv_page
);
275 /* always call with the tx_lock held */
276 static int nbd_send_req(struct nbd_device
*nbd
, struct request
*req
)
279 struct nbd_request request
;
280 unsigned long size
= blk_rq_bytes(req
);
283 if (req
->cmd_type
== REQ_TYPE_DRV_PRIV
)
285 else if (req_op(req
) == REQ_OP_DISCARD
)
287 else if (req_op(req
) == REQ_OP_FLUSH
)
288 type
= NBD_CMD_FLUSH
;
289 else if (rq_data_dir(req
) == WRITE
)
290 type
= NBD_CMD_WRITE
;
294 memset(&request
, 0, sizeof(request
));
295 request
.magic
= htonl(NBD_REQUEST_MAGIC
);
296 request
.type
= htonl(type
);
297 if (type
!= NBD_CMD_FLUSH
&& type
!= NBD_CMD_DISC
) {
298 request
.from
= cpu_to_be64((u64
)blk_rq_pos(req
) << 9);
299 request
.len
= htonl(size
);
301 memcpy(request
.handle
, &req
, sizeof(req
));
303 dev_dbg(nbd_to_dev(nbd
), "request %p: sending control (%s@%llu,%uB)\n",
304 req
, nbdcmd_to_ascii(type
),
305 (unsigned long long)blk_rq_pos(req
) << 9, blk_rq_bytes(req
));
306 result
= sock_xmit(nbd
, 1, &request
, sizeof(request
),
307 (type
== NBD_CMD_WRITE
) ? MSG_MORE
: 0);
309 dev_err(disk_to_dev(nbd
->disk
),
310 "Send control failed (result %d)\n", result
);
314 if (type
== NBD_CMD_WRITE
) {
315 struct req_iterator iter
;
318 * we are really probing at internals to determine
319 * whether to set MSG_MORE or not...
321 rq_for_each_segment(bvec
, req
, iter
) {
323 if (!rq_iter_last(bvec
, iter
))
325 dev_dbg(nbd_to_dev(nbd
), "request %p: sending %d bytes data\n",
327 result
= sock_send_bvec(nbd
, &bvec
, flags
);
329 dev_err(disk_to_dev(nbd
->disk
),
330 "Send data failed (result %d)\n",
339 static struct request
*nbd_find_request(struct nbd_device
*nbd
,
340 struct request
*xreq
)
342 struct request
*req
, *tmp
;
345 err
= wait_event_interruptible(nbd
->active_wq
, nbd
->active_req
!= xreq
);
349 spin_lock(&nbd
->queue_lock
);
350 list_for_each_entry_safe(req
, tmp
, &nbd
->queue_head
, queuelist
) {
353 list_del_init(&req
->queuelist
);
354 spin_unlock(&nbd
->queue_lock
);
357 spin_unlock(&nbd
->queue_lock
);
359 return ERR_PTR(-ENOENT
);
362 static inline int sock_recv_bvec(struct nbd_device
*nbd
, struct bio_vec
*bvec
)
365 void *kaddr
= kmap(bvec
->bv_page
);
366 result
= sock_xmit(nbd
, 0, kaddr
+ bvec
->bv_offset
, bvec
->bv_len
,
368 kunmap(bvec
->bv_page
);
372 /* NULL returned = something went wrong, inform userspace */
373 static struct request
*nbd_read_stat(struct nbd_device
*nbd
)
376 struct nbd_reply reply
;
380 result
= sock_xmit(nbd
, 0, &reply
, sizeof(reply
), MSG_WAITALL
);
382 dev_err(disk_to_dev(nbd
->disk
),
383 "Receive control failed (result %d)\n", result
);
384 return ERR_PTR(result
);
387 if (ntohl(reply
.magic
) != NBD_REPLY_MAGIC
) {
388 dev_err(disk_to_dev(nbd
->disk
), "Wrong magic (0x%lx)\n",
389 (unsigned long)ntohl(reply
.magic
));
390 return ERR_PTR(-EPROTO
);
393 req
= nbd_find_request(nbd
, *(struct request
**)reply
.handle
);
395 result
= PTR_ERR(req
);
396 if (result
!= -ENOENT
)
397 return ERR_PTR(result
);
399 dev_err(disk_to_dev(nbd
->disk
), "Unexpected reply (%p)\n",
401 return ERR_PTR(-EBADR
);
404 if (ntohl(reply
.error
)) {
405 dev_err(disk_to_dev(nbd
->disk
), "Other side returned error (%d)\n",
411 dev_dbg(nbd_to_dev(nbd
), "request %p: got reply\n", req
);
412 if (rq_data_dir(req
) != WRITE
) {
413 struct req_iterator iter
;
416 rq_for_each_segment(bvec
, req
, iter
) {
417 result
= sock_recv_bvec(nbd
, &bvec
);
419 dev_err(disk_to_dev(nbd
->disk
), "Receive data failed (result %d)\n",
424 dev_dbg(nbd_to_dev(nbd
), "request %p: got %d bytes data\n",
431 static ssize_t
pid_show(struct device
*dev
,
432 struct device_attribute
*attr
, char *buf
)
434 struct gendisk
*disk
= dev_to_disk(dev
);
435 struct nbd_device
*nbd
= (struct nbd_device
*)disk
->private_data
;
437 return sprintf(buf
, "%d\n", task_pid_nr(nbd
->task_recv
));
440 static struct device_attribute pid_attr
= {
441 .attr
= { .name
= "pid", .mode
= S_IRUGO
},
445 static int nbd_thread_recv(struct nbd_device
*nbd
, struct block_device
*bdev
)
450 BUG_ON(nbd
->magic
!= NBD_MAGIC
);
452 sk_set_memalloc(nbd
->sock
->sk
);
454 nbd
->task_recv
= current
;
456 ret
= device_create_file(disk_to_dev(nbd
->disk
), &pid_attr
);
458 dev_err(disk_to_dev(nbd
->disk
), "device_create_file failed!\n");
460 nbd
->task_recv
= NULL
;
465 nbd_size_update(nbd
, bdev
);
468 req
= nbd_read_stat(nbd
);
474 nbd_end_request(nbd
, req
);
477 nbd_size_clear(nbd
, bdev
);
479 device_remove_file(disk_to_dev(nbd
->disk
), &pid_attr
);
481 nbd
->task_recv
= NULL
;
486 static void nbd_clear_que(struct nbd_device
*nbd
)
490 BUG_ON(nbd
->magic
!= NBD_MAGIC
);
493 * Because we have set nbd->sock to NULL under the tx_lock, all
494 * modifications to the list must have completed by now. For
495 * the same reason, the active_req must be NULL.
497 * As a consequence, we don't need to take the spin lock while
498 * purging the list here.
501 BUG_ON(nbd
->active_req
);
503 while (!list_empty(&nbd
->queue_head
)) {
504 req
= list_entry(nbd
->queue_head
.next
, struct request
,
506 list_del_init(&req
->queuelist
);
508 nbd_end_request(nbd
, req
);
511 while (!list_empty(&nbd
->waiting_queue
)) {
512 req
= list_entry(nbd
->waiting_queue
.next
, struct request
,
514 list_del_init(&req
->queuelist
);
516 nbd_end_request(nbd
, req
);
518 dev_dbg(disk_to_dev(nbd
->disk
), "queue cleared\n");
522 static void nbd_handle_req(struct nbd_device
*nbd
, struct request
*req
)
524 if (req
->cmd_type
!= REQ_TYPE_FS
)
527 if (rq_data_dir(req
) == WRITE
&&
528 (nbd
->flags
& NBD_FLAG_READ_ONLY
)) {
529 dev_err(disk_to_dev(nbd
->disk
),
530 "Write on read-only\n");
536 mutex_lock(&nbd
->tx_lock
);
537 if (unlikely(!nbd
->sock
)) {
538 mutex_unlock(&nbd
->tx_lock
);
539 dev_err(disk_to_dev(nbd
->disk
),
540 "Attempted send on closed socket\n");
544 nbd
->active_req
= req
;
546 if (nbd
->xmit_timeout
&& list_empty_careful(&nbd
->queue_head
))
547 mod_timer(&nbd
->timeout_timer
, jiffies
+ nbd
->xmit_timeout
);
549 if (nbd_send_req(nbd
, req
) != 0) {
550 dev_err(disk_to_dev(nbd
->disk
), "Request send failed\n");
552 nbd_end_request(nbd
, req
);
554 spin_lock(&nbd
->queue_lock
);
555 list_add_tail(&req
->queuelist
, &nbd
->queue_head
);
556 spin_unlock(&nbd
->queue_lock
);
559 nbd
->active_req
= NULL
;
560 mutex_unlock(&nbd
->tx_lock
);
561 wake_up_all(&nbd
->active_wq
);
567 nbd_end_request(nbd
, req
);
570 static int nbd_thread_send(void *data
)
572 struct nbd_device
*nbd
= data
;
575 nbd
->task_send
= current
;
577 set_user_nice(current
, MIN_NICE
);
578 while (!kthread_should_stop() || !list_empty(&nbd
->waiting_queue
)) {
579 /* wait for something to do */
580 wait_event_interruptible(nbd
->waiting_wq
,
581 kthread_should_stop() ||
582 !list_empty(&nbd
->waiting_queue
));
584 /* extract request */
585 if (list_empty(&nbd
->waiting_queue
))
588 spin_lock_irq(&nbd
->queue_lock
);
589 req
= list_entry(nbd
->waiting_queue
.next
, struct request
,
591 list_del_init(&req
->queuelist
);
592 spin_unlock_irq(&nbd
->queue_lock
);
595 nbd_handle_req(nbd
, req
);
598 nbd
->task_send
= NULL
;
604 * We always wait for result of write, for now. It would be nice to make it optional
606 * if ((rq_data_dir(req) == WRITE) && (nbd->flags & NBD_WRITE_NOCHK))
607 * { printk( "Warning: Ignoring result!\n"); nbd_end_request( req ); }
610 static void nbd_request_handler(struct request_queue
*q
)
611 __releases(q
->queue_lock
) __acquires(q
->queue_lock
)
615 while ((req
= blk_fetch_request(q
)) != NULL
) {
616 struct nbd_device
*nbd
;
618 spin_unlock_irq(q
->queue_lock
);
620 nbd
= req
->rq_disk
->private_data
;
622 BUG_ON(nbd
->magic
!= NBD_MAGIC
);
624 dev_dbg(nbd_to_dev(nbd
), "request %p: dequeued (flags=%x)\n",
627 if (unlikely(!nbd
->sock
)) {
628 dev_err_ratelimited(disk_to_dev(nbd
->disk
),
629 "Attempted send on closed socket\n");
631 nbd_end_request(nbd
, req
);
632 spin_lock_irq(q
->queue_lock
);
636 spin_lock_irq(&nbd
->queue_lock
);
637 list_add_tail(&req
->queuelist
, &nbd
->waiting_queue
);
638 spin_unlock_irq(&nbd
->queue_lock
);
640 wake_up(&nbd
->waiting_wq
);
642 spin_lock_irq(q
->queue_lock
);
646 static int nbd_set_socket(struct nbd_device
*nbd
, struct socket
*sock
)
650 spin_lock_irq(&nbd
->sock_lock
);
660 spin_unlock_irq(&nbd
->sock_lock
);
665 /* Reset all properties of an NBD device */
666 static void nbd_reset(struct nbd_device
*nbd
)
668 nbd
->disconnect
= false;
669 nbd
->timedout
= false;
672 set_capacity(nbd
->disk
, 0);
674 nbd
->xmit_timeout
= 0;
675 queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD
, nbd
->disk
->queue
);
676 del_timer_sync(&nbd
->timeout_timer
);
679 static void nbd_bdev_reset(struct block_device
*bdev
)
681 set_device_ro(bdev
, false);
682 bdev
->bd_inode
->i_size
= 0;
684 blkdev_reread_part(bdev
);
685 bdev
->bd_invalidated
= 1;
689 static void nbd_parse_flags(struct nbd_device
*nbd
, struct block_device
*bdev
)
691 if (nbd
->flags
& NBD_FLAG_READ_ONLY
)
692 set_device_ro(bdev
, true);
693 if (nbd
->flags
& NBD_FLAG_SEND_TRIM
)
694 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD
, nbd
->disk
->queue
);
695 if (nbd
->flags
& NBD_FLAG_SEND_FLUSH
)
696 blk_queue_write_cache(nbd
->disk
->queue
, true, false);
698 blk_queue_write_cache(nbd
->disk
->queue
, false, false);
701 static int nbd_dev_dbg_init(struct nbd_device
*nbd
);
702 static void nbd_dev_dbg_close(struct nbd_device
*nbd
);
704 /* Must be called with tx_lock held */
706 static int __nbd_ioctl(struct block_device
*bdev
, struct nbd_device
*nbd
,
707 unsigned int cmd
, unsigned long arg
)
710 case NBD_DISCONNECT
: {
713 dev_info(disk_to_dev(nbd
->disk
), "NBD_DISCONNECT\n");
717 mutex_unlock(&nbd
->tx_lock
);
719 mutex_lock(&nbd
->tx_lock
);
720 blk_rq_init(NULL
, &sreq
);
721 sreq
.cmd_type
= REQ_TYPE_DRV_PRIV
;
723 /* Check again after getting mutex back. */
727 nbd
->disconnect
= true;
729 nbd_send_req(nbd
, &sreq
);
736 BUG_ON(!list_empty(&nbd
->queue_head
));
737 BUG_ON(!list_empty(&nbd
->waiting_queue
));
743 struct socket
*sock
= sockfd_lookup(arg
, &err
);
748 err
= nbd_set_socket(nbd
, sock
);
749 if (!err
&& max_part
)
750 bdev
->bd_invalidated
= 1;
755 case NBD_SET_BLKSIZE
: {
756 loff_t bsize
= div_s64(nbd
->bytesize
, arg
);
758 return nbd_size_set(nbd
, bdev
, arg
, bsize
);
762 return nbd_size_set(nbd
, bdev
, nbd
->blksize
,
765 case NBD_SET_SIZE_BLOCKS
:
766 return nbd_size_set(nbd
, bdev
, nbd
->blksize
, arg
);
768 case NBD_SET_TIMEOUT
:
769 nbd
->xmit_timeout
= arg
* HZ
;
771 mod_timer(&nbd
->timeout_timer
,
772 jiffies
+ nbd
->xmit_timeout
);
774 del_timer_sync(&nbd
->timeout_timer
);
783 struct task_struct
*thread
;
791 mutex_unlock(&nbd
->tx_lock
);
793 nbd_parse_flags(nbd
, bdev
);
795 thread
= kthread_run(nbd_thread_send
, nbd
, "%s",
797 if (IS_ERR(thread
)) {
798 mutex_lock(&nbd
->tx_lock
);
799 return PTR_ERR(thread
);
802 nbd_dev_dbg_init(nbd
);
803 error
= nbd_thread_recv(nbd
, bdev
);
804 nbd_dev_dbg_close(nbd
);
805 kthread_stop(thread
);
807 mutex_lock(&nbd
->tx_lock
);
812 nbd_bdev_reset(bdev
);
814 if (nbd
->disconnect
) /* user requested, ignore socket errors */
826 * This is for compatibility only. The queue is always cleared
827 * by NBD_DO_IT or NBD_CLEAR_SOCK.
831 case NBD_PRINT_DEBUG
:
832 dev_info(disk_to_dev(nbd
->disk
),
833 "next = %p, prev = %p, head = %p\n",
834 nbd
->queue_head
.next
, nbd
->queue_head
.prev
,
841 static int nbd_ioctl(struct block_device
*bdev
, fmode_t mode
,
842 unsigned int cmd
, unsigned long arg
)
844 struct nbd_device
*nbd
= bdev
->bd_disk
->private_data
;
847 if (!capable(CAP_SYS_ADMIN
))
850 BUG_ON(nbd
->magic
!= NBD_MAGIC
);
852 mutex_lock(&nbd
->tx_lock
);
853 error
= __nbd_ioctl(bdev
, nbd
, cmd
, arg
);
854 mutex_unlock(&nbd
->tx_lock
);
859 static const struct block_device_operations nbd_fops
=
861 .owner
= THIS_MODULE
,
863 .compat_ioctl
= nbd_ioctl
,
866 #if IS_ENABLED(CONFIG_DEBUG_FS)
868 static int nbd_dbg_tasks_show(struct seq_file
*s
, void *unused
)
870 struct nbd_device
*nbd
= s
->private;
873 seq_printf(s
, "recv: %d\n", task_pid_nr(nbd
->task_recv
));
875 seq_printf(s
, "send: %d\n", task_pid_nr(nbd
->task_send
));
880 static int nbd_dbg_tasks_open(struct inode
*inode
, struct file
*file
)
882 return single_open(file
, nbd_dbg_tasks_show
, inode
->i_private
);
885 static const struct file_operations nbd_dbg_tasks_ops
= {
886 .open
= nbd_dbg_tasks_open
,
889 .release
= single_release
,
892 static int nbd_dbg_flags_show(struct seq_file
*s
, void *unused
)
894 struct nbd_device
*nbd
= s
->private;
895 u32 flags
= nbd
->flags
;
897 seq_printf(s
, "Hex: 0x%08x\n\n", flags
);
899 seq_puts(s
, "Known flags:\n");
901 if (flags
& NBD_FLAG_HAS_FLAGS
)
902 seq_puts(s
, "NBD_FLAG_HAS_FLAGS\n");
903 if (flags
& NBD_FLAG_READ_ONLY
)
904 seq_puts(s
, "NBD_FLAG_READ_ONLY\n");
905 if (flags
& NBD_FLAG_SEND_FLUSH
)
906 seq_puts(s
, "NBD_FLAG_SEND_FLUSH\n");
907 if (flags
& NBD_FLAG_SEND_TRIM
)
908 seq_puts(s
, "NBD_FLAG_SEND_TRIM\n");
913 static int nbd_dbg_flags_open(struct inode
*inode
, struct file
*file
)
915 return single_open(file
, nbd_dbg_flags_show
, inode
->i_private
);
918 static const struct file_operations nbd_dbg_flags_ops
= {
919 .open
= nbd_dbg_flags_open
,
922 .release
= single_release
,
925 static int nbd_dev_dbg_init(struct nbd_device
*nbd
)
932 dir
= debugfs_create_dir(nbd_name(nbd
), nbd_dbg_dir
);
934 dev_err(nbd_to_dev(nbd
), "Failed to create debugfs dir for '%s'\n",
940 debugfs_create_file("tasks", 0444, dir
, nbd
, &nbd_dbg_tasks_ops
);
941 debugfs_create_u64("size_bytes", 0444, dir
, &nbd
->bytesize
);
942 debugfs_create_u32("timeout", 0444, dir
, &nbd
->xmit_timeout
);
943 debugfs_create_u32("blocksize", 0444, dir
, &nbd
->blksize
);
944 debugfs_create_file("flags", 0444, dir
, nbd
, &nbd_dbg_flags_ops
);
949 static void nbd_dev_dbg_close(struct nbd_device
*nbd
)
951 debugfs_remove_recursive(nbd
->dbg_dir
);
954 static int nbd_dbg_init(void)
956 struct dentry
*dbg_dir
;
958 dbg_dir
= debugfs_create_dir("nbd", NULL
);
962 nbd_dbg_dir
= dbg_dir
;
967 static void nbd_dbg_close(void)
969 debugfs_remove_recursive(nbd_dbg_dir
);
972 #else /* IS_ENABLED(CONFIG_DEBUG_FS) */
974 static int nbd_dev_dbg_init(struct nbd_device
*nbd
)
979 static void nbd_dev_dbg_close(struct nbd_device
*nbd
)
983 static int nbd_dbg_init(void)
988 static void nbd_dbg_close(void)
995 * And here should be modules and kernel interface
996 * (Just smiley confuses emacs :-)
999 static int __init
nbd_init(void)
1005 BUILD_BUG_ON(sizeof(struct nbd_request
) != 28);
1008 printk(KERN_ERR
"nbd: max_part must be >= 0\n");
1014 part_shift
= fls(max_part
);
1017 * Adjust max_part according to part_shift as it is exported
1018 * to user space so that user can know the max number of
1019 * partition kernel should be able to manage.
1021 * Note that -1 is required because partition 0 is reserved
1022 * for the whole disk.
1024 max_part
= (1UL << part_shift
) - 1;
1027 if ((1UL << part_shift
) > DISK_MAX_PARTS
)
1030 if (nbds_max
> 1UL << (MINORBITS
- part_shift
))
1033 nbd_dev
= kcalloc(nbds_max
, sizeof(*nbd_dev
), GFP_KERNEL
);
1037 for (i
= 0; i
< nbds_max
; i
++) {
1038 struct gendisk
*disk
= alloc_disk(1 << part_shift
);
1041 nbd_dev
[i
].disk
= disk
;
1043 * The new linux 2.5 block layer implementation requires
1044 * every gendisk to have its very own request_queue struct.
1045 * These structs are big so we dynamically allocate them.
1047 disk
->queue
= blk_init_queue(nbd_request_handler
, &nbd_lock
);
1053 * Tell the block layer that we are not a rotational device
1055 queue_flag_set_unlocked(QUEUE_FLAG_NONROT
, disk
->queue
);
1056 queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM
, disk
->queue
);
1057 disk
->queue
->limits
.discard_granularity
= 512;
1058 blk_queue_max_discard_sectors(disk
->queue
, UINT_MAX
);
1059 disk
->queue
->limits
.discard_zeroes_data
= 0;
1060 blk_queue_max_hw_sectors(disk
->queue
, 65536);
1061 disk
->queue
->limits
.max_sectors
= 256;
1064 if (register_blkdev(NBD_MAJOR
, "nbd")) {
1069 printk(KERN_INFO
"nbd: registered device at major %d\n", NBD_MAJOR
);
1073 for (i
= 0; i
< nbds_max
; i
++) {
1074 struct gendisk
*disk
= nbd_dev
[i
].disk
;
1075 nbd_dev
[i
].magic
= NBD_MAGIC
;
1076 INIT_LIST_HEAD(&nbd_dev
[i
].waiting_queue
);
1077 spin_lock_init(&nbd_dev
[i
].queue_lock
);
1078 spin_lock_init(&nbd_dev
[i
].sock_lock
);
1079 INIT_LIST_HEAD(&nbd_dev
[i
].queue_head
);
1080 mutex_init(&nbd_dev
[i
].tx_lock
);
1081 init_timer(&nbd_dev
[i
].timeout_timer
);
1082 nbd_dev
[i
].timeout_timer
.function
= nbd_xmit_timeout
;
1083 nbd_dev
[i
].timeout_timer
.data
= (unsigned long)&nbd_dev
[i
];
1084 init_waitqueue_head(&nbd_dev
[i
].active_wq
);
1085 init_waitqueue_head(&nbd_dev
[i
].waiting_wq
);
1086 disk
->major
= NBD_MAJOR
;
1087 disk
->first_minor
= i
<< part_shift
;
1088 disk
->fops
= &nbd_fops
;
1089 disk
->private_data
= &nbd_dev
[i
];
1090 sprintf(disk
->disk_name
, "nbd%d", i
);
1091 nbd_reset(&nbd_dev
[i
]);
1098 blk_cleanup_queue(nbd_dev
[i
].disk
->queue
);
1099 put_disk(nbd_dev
[i
].disk
);
1105 static void __exit
nbd_cleanup(void)
1111 for (i
= 0; i
< nbds_max
; i
++) {
1112 struct gendisk
*disk
= nbd_dev
[i
].disk
;
1113 nbd_dev
[i
].magic
= 0;
1116 blk_cleanup_queue(disk
->queue
);
1120 unregister_blkdev(NBD_MAJOR
, "nbd");
1122 printk(KERN_INFO
"nbd: unregistered device at major %d\n", NBD_MAJOR
);
1125 module_init(nbd_init
);
1126 module_exit(nbd_cleanup
);
1128 MODULE_DESCRIPTION("Network Block Device");
1129 MODULE_LICENSE("GPL");
1131 module_param(nbds_max
, int, 0444);
1132 MODULE_PARM_DESC(nbds_max
, "number of network block devices to initialize (default: 16)");
1133 module_param(max_part
, int, 0444);
1134 MODULE_PARM_DESC(max_part
, "number of partitions per device (default: 0)");