2 * Network block device - make block devices work over TCP
4 * Note that you can not swap over this thing, yet. Seems to work but
5 * deadlocks sometimes - you can not swap over TCP in general.
7 * Copyright 1997-2000, 2008 Pavel Machek <pavel@ucw.cz>
8 * Parts copyright 2001 Steven Whitehouse <steve@chygwyn.com>
10 * This file is released under GPLv2 or later.
12 * (part of code stolen from loop.c)
15 #include <linux/major.h>
17 #include <linux/blkdev.h>
18 #include <linux/module.h>
19 #include <linux/init.h>
20 #include <linux/sched.h>
22 #include <linux/bio.h>
23 #include <linux/stat.h>
24 #include <linux/errno.h>
25 #include <linux/file.h>
26 #include <linux/ioctl.h>
27 #include <linux/mutex.h>
28 #include <linux/compiler.h>
29 #include <linux/err.h>
30 #include <linux/kernel.h>
31 #include <linux/slab.h>
33 #include <linux/net.h>
34 #include <linux/kthread.h>
35 #include <linux/types.h>
36 #include <linux/debugfs.h>
37 #include <linux/blk-mq.h>
39 #include <linux/uaccess.h>
40 #include <asm/types.h>
42 #include <linux/nbd.h>
49 #define NBD_TIMEDOUT 0
50 #define NBD_DISCONNECT_REQUESTED 1
51 #define NBD_DISCONNECTED 2
56 unsigned long runtime_flags
;
57 struct nbd_sock
**socks
;
60 struct blk_mq_tag_set tag_set
;
62 struct mutex config_lock
;
65 atomic_t recv_threads
;
66 wait_queue_head_t recv_wq
;
70 struct task_struct
*task_recv
;
71 struct task_struct
*task_setup
;
73 #if IS_ENABLED(CONFIG_DEBUG_FS)
74 struct dentry
*dbg_dir
;
79 struct nbd_device
*nbd
;
80 struct completion send_complete
;
83 #if IS_ENABLED(CONFIG_DEBUG_FS)
84 static struct dentry
*nbd_dbg_dir
;
87 #define nbd_name(nbd) ((nbd)->disk->disk_name)
89 #define NBD_MAGIC 0x68797548
91 static unsigned int nbds_max
= 16;
92 static struct nbd_device
*nbd_dev
;
95 static inline struct device
*nbd_to_dev(struct nbd_device
*nbd
)
97 return disk_to_dev(nbd
->disk
);
100 static bool nbd_is_connected(struct nbd_device
*nbd
)
102 return !!nbd
->task_recv
;
105 static const char *nbdcmd_to_ascii(int cmd
)
108 case NBD_CMD_READ
: return "read";
109 case NBD_CMD_WRITE
: return "write";
110 case NBD_CMD_DISC
: return "disconnect";
111 case NBD_CMD_FLUSH
: return "flush";
112 case NBD_CMD_TRIM
: return "trim/discard";
117 static int nbd_size_clear(struct nbd_device
*nbd
, struct block_device
*bdev
)
119 bdev
->bd_inode
->i_size
= 0;
120 set_capacity(nbd
->disk
, 0);
121 kobject_uevent(&nbd_to_dev(nbd
)->kobj
, KOBJ_CHANGE
);
126 static void nbd_size_update(struct nbd_device
*nbd
, struct block_device
*bdev
)
128 if (!nbd_is_connected(nbd
))
131 bdev
->bd_inode
->i_size
= nbd
->bytesize
;
132 set_capacity(nbd
->disk
, nbd
->bytesize
>> 9);
133 kobject_uevent(&nbd_to_dev(nbd
)->kobj
, KOBJ_CHANGE
);
136 static int nbd_size_set(struct nbd_device
*nbd
, struct block_device
*bdev
,
137 loff_t blocksize
, loff_t nr_blocks
)
141 ret
= set_blocksize(bdev
, blocksize
);
145 nbd
->blksize
= blocksize
;
146 nbd
->bytesize
= blocksize
* nr_blocks
;
148 nbd_size_update(nbd
, bdev
);
153 static void nbd_end_request(struct nbd_cmd
*cmd
)
155 struct nbd_device
*nbd
= cmd
->nbd
;
156 struct request
*req
= blk_mq_rq_from_pdu(cmd
);
157 int error
= req
->errors
? -EIO
: 0;
159 dev_dbg(nbd_to_dev(nbd
), "request %p: %s\n", cmd
,
160 error
? "failed" : "done");
162 blk_mq_complete_request(req
, error
);
166 * Forcibly shutdown the socket causing all listeners to error
168 static void sock_shutdown(struct nbd_device
*nbd
)
172 if (nbd
->num_connections
== 0)
174 if (test_and_set_bit(NBD_DISCONNECTED
, &nbd
->runtime_flags
))
177 for (i
= 0; i
< nbd
->num_connections
; i
++) {
178 struct nbd_sock
*nsock
= nbd
->socks
[i
];
179 mutex_lock(&nsock
->tx_lock
);
180 kernel_sock_shutdown(nsock
->sock
, SHUT_RDWR
);
181 mutex_unlock(&nsock
->tx_lock
);
183 dev_warn(disk_to_dev(nbd
->disk
), "shutting down sockets\n");
186 static enum blk_eh_timer_return
nbd_xmit_timeout(struct request
*req
,
189 struct nbd_cmd
*cmd
= blk_mq_rq_to_pdu(req
);
190 struct nbd_device
*nbd
= cmd
->nbd
;
192 dev_err(nbd_to_dev(nbd
), "Connection timed out, shutting down connection\n");
193 set_bit(NBD_TIMEDOUT
, &nbd
->runtime_flags
);
197 * If our disconnect packet times out then we're already holding the
198 * config_lock and could deadlock here, so just set an error and return,
199 * we'll handle shutting everything down later.
201 if (req
->cmd_type
== REQ_TYPE_DRV_PRIV
)
202 return BLK_EH_HANDLED
;
203 mutex_lock(&nbd
->config_lock
);
205 mutex_unlock(&nbd
->config_lock
);
206 return BLK_EH_HANDLED
;
210 * Send or receive packet.
212 static int sock_xmit(struct nbd_device
*nbd
, int index
, int send
, void *buf
,
213 int size
, int msg_flags
)
215 struct socket
*sock
= nbd
->socks
[index
]->sock
;
219 unsigned long pflags
= current
->flags
;
221 if (unlikely(!sock
)) {
222 dev_err_ratelimited(disk_to_dev(nbd
->disk
),
223 "Attempted %s on closed socket in sock_xmit\n",
224 (send
? "send" : "recv"));
228 current
->flags
|= PF_MEMALLOC
;
230 sock
->sk
->sk_allocation
= GFP_NOIO
| __GFP_MEMALLOC
;
235 msg
.msg_control
= NULL
;
236 msg
.msg_controllen
= 0;
237 msg
.msg_flags
= msg_flags
| MSG_NOSIGNAL
;
240 result
= kernel_sendmsg(sock
, &msg
, &iov
, 1, size
);
242 result
= kernel_recvmsg(sock
, &msg
, &iov
, 1, size
,
247 result
= -EPIPE
; /* short read */
254 tsk_restore_flags(current
, pflags
, PF_MEMALLOC
);
259 static inline int sock_send_bvec(struct nbd_device
*nbd
, int index
,
260 struct bio_vec
*bvec
, int flags
)
263 void *kaddr
= kmap(bvec
->bv_page
);
264 result
= sock_xmit(nbd
, index
, 1, kaddr
+ bvec
->bv_offset
,
265 bvec
->bv_len
, flags
);
266 kunmap(bvec
->bv_page
);
270 /* always call with the tx_lock held */
271 static int nbd_send_cmd(struct nbd_device
*nbd
, struct nbd_cmd
*cmd
, int index
)
273 struct request
*req
= blk_mq_rq_from_pdu(cmd
);
275 struct nbd_request request
;
276 unsigned long size
= blk_rq_bytes(req
);
279 u32 tag
= blk_mq_unique_tag(req
);
281 if (req_op(req
) == REQ_OP_DISCARD
)
283 else if (req_op(req
) == REQ_OP_FLUSH
)
284 type
= NBD_CMD_FLUSH
;
285 else if (rq_data_dir(req
) == WRITE
)
286 type
= NBD_CMD_WRITE
;
290 memset(&request
, 0, sizeof(request
));
291 request
.magic
= htonl(NBD_REQUEST_MAGIC
);
292 request
.type
= htonl(type
);
293 if (type
!= NBD_CMD_FLUSH
) {
294 request
.from
= cpu_to_be64((u64
)blk_rq_pos(req
) << 9);
295 request
.len
= htonl(size
);
297 memcpy(request
.handle
, &tag
, sizeof(tag
));
299 dev_dbg(nbd_to_dev(nbd
), "request %p: sending control (%s@%llu,%uB)\n",
300 cmd
, nbdcmd_to_ascii(type
),
301 (unsigned long long)blk_rq_pos(req
) << 9, blk_rq_bytes(req
));
302 result
= sock_xmit(nbd
, index
, 1, &request
, sizeof(request
),
303 (type
== NBD_CMD_WRITE
) ? MSG_MORE
: 0);
305 dev_err_ratelimited(disk_to_dev(nbd
->disk
),
306 "Send control failed (result %d)\n", result
);
310 if (type
!= NBD_CMD_WRITE
)
316 struct bio
*next
= bio
->bi_next
;
317 struct bvec_iter iter
;
320 bio_for_each_segment(bvec
, bio
, iter
) {
321 bool is_last
= !next
&& bio_iter_last(bvec
, iter
);
325 dev_dbg(nbd_to_dev(nbd
), "request %p: sending %d bytes data\n",
327 result
= sock_send_bvec(nbd
, index
, &bvec
, flags
);
329 dev_err(disk_to_dev(nbd
->disk
),
330 "Send data failed (result %d)\n",
335 * The completion might already have come in,
336 * so break for the last one instead of letting
337 * the iterator do it. This prevents use-after-free
348 static inline int sock_recv_bvec(struct nbd_device
*nbd
, int index
,
349 struct bio_vec
*bvec
)
352 void *kaddr
= kmap(bvec
->bv_page
);
353 result
= sock_xmit(nbd
, index
, 0, kaddr
+ bvec
->bv_offset
,
354 bvec
->bv_len
, MSG_WAITALL
);
355 kunmap(bvec
->bv_page
);
359 /* NULL returned = something went wrong, inform userspace */
360 static struct nbd_cmd
*nbd_read_stat(struct nbd_device
*nbd
, int index
)
363 struct nbd_reply reply
;
365 struct request
*req
= NULL
;
370 result
= sock_xmit(nbd
, index
, 0, &reply
, sizeof(reply
), MSG_WAITALL
);
372 if (!test_bit(NBD_DISCONNECTED
, &nbd
->runtime_flags
) &&
373 !test_bit(NBD_DISCONNECT_REQUESTED
, &nbd
->runtime_flags
))
374 dev_err(disk_to_dev(nbd
->disk
),
375 "Receive control failed (result %d)\n", result
);
376 return ERR_PTR(result
);
379 if (ntohl(reply
.magic
) != NBD_REPLY_MAGIC
) {
380 dev_err(disk_to_dev(nbd
->disk
), "Wrong magic (0x%lx)\n",
381 (unsigned long)ntohl(reply
.magic
));
382 return ERR_PTR(-EPROTO
);
385 memcpy(&tag
, reply
.handle
, sizeof(u32
));
387 hwq
= blk_mq_unique_tag_to_hwq(tag
);
388 if (hwq
< nbd
->tag_set
.nr_hw_queues
)
389 req
= blk_mq_tag_to_rq(nbd
->tag_set
.tags
[hwq
],
390 blk_mq_unique_tag_to_tag(tag
));
391 if (!req
|| !blk_mq_request_started(req
)) {
392 dev_err(disk_to_dev(nbd
->disk
), "Unexpected reply (%d) %p\n",
394 return ERR_PTR(-ENOENT
);
396 cmd
= blk_mq_rq_to_pdu(req
);
397 if (ntohl(reply
.error
)) {
398 dev_err(disk_to_dev(nbd
->disk
), "Other side returned error (%d)\n",
404 dev_dbg(nbd_to_dev(nbd
), "request %p: got reply\n", cmd
);
405 if (rq_data_dir(req
) != WRITE
) {
406 struct req_iterator iter
;
409 rq_for_each_segment(bvec
, req
, iter
) {
410 result
= sock_recv_bvec(nbd
, index
, &bvec
);
412 dev_err(disk_to_dev(nbd
->disk
), "Receive data failed (result %d)\n",
417 dev_dbg(nbd_to_dev(nbd
), "request %p: got %d bytes data\n",
421 /* See the comment in nbd_queue_rq. */
422 wait_for_completion(&cmd
->send_complete
);
427 static ssize_t
pid_show(struct device
*dev
,
428 struct device_attribute
*attr
, char *buf
)
430 struct gendisk
*disk
= dev_to_disk(dev
);
431 struct nbd_device
*nbd
= (struct nbd_device
*)disk
->private_data
;
433 return sprintf(buf
, "%d\n", task_pid_nr(nbd
->task_recv
));
436 static struct device_attribute pid_attr
= {
437 .attr
= { .name
= "pid", .mode
= S_IRUGO
},
441 struct recv_thread_args
{
442 struct work_struct work
;
443 struct nbd_device
*nbd
;
447 static void recv_work(struct work_struct
*work
)
449 struct recv_thread_args
*args
= container_of(work
,
450 struct recv_thread_args
,
452 struct nbd_device
*nbd
= args
->nbd
;
456 BUG_ON(nbd
->magic
!= NBD_MAGIC
);
458 cmd
= nbd_read_stat(nbd
, args
->index
);
464 nbd_end_request(cmd
);
468 * We got an error, shut everybody down if this wasn't the result of a
469 * disconnect request.
471 if (ret
&& !test_bit(NBD_DISCONNECT_REQUESTED
, &nbd
->runtime_flags
))
473 atomic_dec(&nbd
->recv_threads
);
474 wake_up(&nbd
->recv_wq
);
477 static void nbd_clear_req(struct request
*req
, void *data
, bool reserved
)
481 if (!blk_mq_request_started(req
))
483 cmd
= blk_mq_rq_to_pdu(req
);
485 nbd_end_request(cmd
);
488 static void nbd_clear_que(struct nbd_device
*nbd
)
490 BUG_ON(nbd
->magic
!= NBD_MAGIC
);
492 blk_mq_tagset_busy_iter(&nbd
->tag_set
, nbd_clear_req
, NULL
);
493 dev_dbg(disk_to_dev(nbd
->disk
), "queue cleared\n");
497 static void nbd_handle_cmd(struct nbd_cmd
*cmd
, int index
)
499 struct request
*req
= blk_mq_rq_from_pdu(cmd
);
500 struct nbd_device
*nbd
= cmd
->nbd
;
501 struct nbd_sock
*nsock
;
503 if (index
>= nbd
->num_connections
) {
504 dev_err_ratelimited(disk_to_dev(nbd
->disk
),
505 "Attempted send on invalid socket\n");
509 if (test_bit(NBD_DISCONNECTED
, &nbd
->runtime_flags
)) {
510 dev_err_ratelimited(disk_to_dev(nbd
->disk
),
511 "Attempted send on closed socket\n");
515 if (req
->cmd_type
!= REQ_TYPE_FS
&&
516 req
->cmd_type
!= REQ_TYPE_DRV_PRIV
)
519 if (req
->cmd_type
== REQ_TYPE_FS
&&
520 rq_data_dir(req
) == WRITE
&&
521 (nbd
->flags
& NBD_FLAG_READ_ONLY
)) {
522 dev_err_ratelimited(disk_to_dev(nbd
->disk
),
523 "Write on read-only\n");
529 nsock
= nbd
->socks
[index
];
530 mutex_lock(&nsock
->tx_lock
);
531 if (unlikely(!nsock
->sock
)) {
532 mutex_unlock(&nsock
->tx_lock
);
533 dev_err_ratelimited(disk_to_dev(nbd
->disk
),
534 "Attempted send on closed socket\n");
538 if (nbd_send_cmd(nbd
, cmd
, index
) != 0) {
539 dev_err_ratelimited(disk_to_dev(nbd
->disk
),
540 "Request send failed\n");
542 nbd_end_request(cmd
);
545 mutex_unlock(&nsock
->tx_lock
);
551 nbd_end_request(cmd
);
554 static int nbd_queue_rq(struct blk_mq_hw_ctx
*hctx
,
555 const struct blk_mq_queue_data
*bd
)
557 struct nbd_cmd
*cmd
= blk_mq_rq_to_pdu(bd
->rq
);
560 * Since we look at the bio's to send the request over the network we
561 * need to make sure the completion work doesn't mark this request done
562 * before we are done doing our send. This keeps us from dereferencing
563 * freed data if we have particularly fast completions (ie we get the
564 * completion before we exit sock_xmit on the last bvec) or in the case
565 * that the server is misbehaving (or there was an error) before we're
566 * done sending everything over the wire.
568 init_completion(&cmd
->send_complete
);
569 blk_mq_start_request(bd
->rq
);
570 nbd_handle_cmd(cmd
, hctx
->queue_num
);
571 complete(&cmd
->send_complete
);
573 return BLK_MQ_RQ_QUEUE_OK
;
576 static int nbd_add_socket(struct nbd_device
*nbd
, struct socket
*sock
)
578 struct nbd_sock
**socks
;
579 struct nbd_sock
*nsock
;
581 if (!nbd
->task_setup
)
582 nbd
->task_setup
= current
;
583 if (nbd
->task_setup
!= current
) {
584 dev_err(disk_to_dev(nbd
->disk
),
585 "Device being setup by another task");
589 socks
= krealloc(nbd
->socks
, (nbd
->num_connections
+ 1) *
590 sizeof(struct nbd_sock
*), GFP_KERNEL
);
593 nsock
= kzalloc(sizeof(struct nbd_sock
), GFP_KERNEL
);
599 mutex_init(&nsock
->tx_lock
);
601 socks
[nbd
->num_connections
++] = nsock
;
606 /* Reset all properties of an NBD device */
607 static void nbd_reset(struct nbd_device
*nbd
)
611 for (i
= 0; i
< nbd
->num_connections
; i
++)
612 kfree(nbd
->socks
[i
]);
615 nbd
->runtime_flags
= 0;
618 set_capacity(nbd
->disk
, 0);
620 nbd
->tag_set
.timeout
= 0;
621 nbd
->num_connections
= 0;
622 nbd
->task_setup
= NULL
;
623 queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD
, nbd
->disk
->queue
);
626 static void nbd_bdev_reset(struct block_device
*bdev
)
628 set_device_ro(bdev
, false);
629 bdev
->bd_inode
->i_size
= 0;
631 blkdev_reread_part(bdev
);
632 bdev
->bd_invalidated
= 1;
636 static void nbd_parse_flags(struct nbd_device
*nbd
, struct block_device
*bdev
)
638 if (nbd
->flags
& NBD_FLAG_READ_ONLY
)
639 set_device_ro(bdev
, true);
640 if (nbd
->flags
& NBD_FLAG_SEND_TRIM
)
641 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD
, nbd
->disk
->queue
);
642 if (nbd
->flags
& NBD_FLAG_SEND_FLUSH
)
643 blk_queue_write_cache(nbd
->disk
->queue
, true, false);
645 blk_queue_write_cache(nbd
->disk
->queue
, false, false);
648 static void send_disconnects(struct nbd_device
*nbd
)
650 struct nbd_request request
= {};
653 request
.magic
= htonl(NBD_REQUEST_MAGIC
);
654 request
.type
= htonl(NBD_CMD_DISC
);
656 for (i
= 0; i
< nbd
->num_connections
; i
++) {
657 ret
= sock_xmit(nbd
, i
, 1, &request
, sizeof(request
), 0);
659 dev_err(disk_to_dev(nbd
->disk
),
660 "Send disconnect failed %d\n", ret
);
664 static int nbd_dev_dbg_init(struct nbd_device
*nbd
);
665 static void nbd_dev_dbg_close(struct nbd_device
*nbd
);
667 /* Must be called with config_lock held */
668 static int __nbd_ioctl(struct block_device
*bdev
, struct nbd_device
*nbd
,
669 unsigned int cmd
, unsigned long arg
)
672 case NBD_DISCONNECT
: {
673 dev_info(disk_to_dev(nbd
->disk
), "NBD_DISCONNECT\n");
677 mutex_unlock(&nbd
->config_lock
);
679 mutex_lock(&nbd
->config_lock
);
681 /* Check again after getting mutex back. */
685 if (!test_and_set_bit(NBD_DISCONNECT_REQUESTED
,
686 &nbd
->runtime_flags
))
687 send_disconnects(nbd
);
695 nbd_bdev_reset(bdev
);
697 * We want to give the run thread a chance to wait for everybody
698 * to clean up and then do it's own cleanup.
700 if (!test_bit(NBD_RUNNING
, &nbd
->runtime_flags
)) {
703 for (i
= 0; i
< nbd
->num_connections
; i
++)
704 kfree(nbd
->socks
[i
]);
707 nbd
->num_connections
= 0;
708 nbd
->task_setup
= NULL
;
714 struct socket
*sock
= sockfd_lookup(arg
, &err
);
719 err
= nbd_add_socket(nbd
, sock
);
720 if (!err
&& max_part
)
721 bdev
->bd_invalidated
= 1;
726 case NBD_SET_BLKSIZE
: {
727 loff_t bsize
= div_s64(nbd
->bytesize
, arg
);
729 return nbd_size_set(nbd
, bdev
, arg
, bsize
);
733 return nbd_size_set(nbd
, bdev
, nbd
->blksize
,
734 div_s64(arg
, nbd
->blksize
));
736 case NBD_SET_SIZE_BLOCKS
:
737 return nbd_size_set(nbd
, bdev
, nbd
->blksize
, arg
);
739 case NBD_SET_TIMEOUT
:
740 nbd
->tag_set
.timeout
= arg
* HZ
;
748 struct recv_thread_args
*args
;
749 int num_connections
= nbd
->num_connections
;
756 if (num_connections
> 1 &&
757 !(nbd
->flags
& NBD_FLAG_CAN_MULTI_CONN
)) {
758 dev_err(disk_to_dev(nbd
->disk
), "server does not support multiple connections per device.\n");
763 set_bit(NBD_RUNNING
, &nbd
->runtime_flags
);
764 blk_mq_update_nr_hw_queues(&nbd
->tag_set
, nbd
->num_connections
);
765 args
= kcalloc(num_connections
, sizeof(*args
), GFP_KERNEL
);
770 nbd
->task_recv
= current
;
771 mutex_unlock(&nbd
->config_lock
);
773 nbd_parse_flags(nbd
, bdev
);
775 error
= device_create_file(disk_to_dev(nbd
->disk
), &pid_attr
);
777 dev_err(disk_to_dev(nbd
->disk
), "device_create_file failed!\n");
781 nbd_size_update(nbd
, bdev
);
783 nbd_dev_dbg_init(nbd
);
784 for (i
= 0; i
< num_connections
; i
++) {
785 sk_set_memalloc(nbd
->socks
[i
]->sock
->sk
);
786 atomic_inc(&nbd
->recv_threads
);
787 INIT_WORK(&args
[i
].work
, recv_work
);
790 queue_work(system_long_wq
, &args
[i
].work
);
792 wait_event_interruptible(nbd
->recv_wq
,
793 atomic_read(&nbd
->recv_threads
) == 0);
794 for (i
= 0; i
< num_connections
; i
++)
795 flush_work(&args
[i
].work
);
796 nbd_dev_dbg_close(nbd
);
797 nbd_size_clear(nbd
, bdev
);
798 device_remove_file(disk_to_dev(nbd
->disk
), &pid_attr
);
800 mutex_lock(&nbd
->config_lock
);
801 nbd
->task_recv
= NULL
;
806 nbd_bdev_reset(bdev
);
808 /* user requested, ignore socket errors */
809 if (test_bit(NBD_DISCONNECT_REQUESTED
, &nbd
->runtime_flags
))
811 if (test_bit(NBD_TIMEDOUT
, &nbd
->runtime_flags
))
820 * This is for compatibility only. The queue is always cleared
821 * by NBD_DO_IT or NBD_CLEAR_SOCK.
825 case NBD_PRINT_DEBUG
:
827 * For compatibility only, we no longer keep a list of
828 * outstanding requests.
835 static int nbd_ioctl(struct block_device
*bdev
, fmode_t mode
,
836 unsigned int cmd
, unsigned long arg
)
838 struct nbd_device
*nbd
= bdev
->bd_disk
->private_data
;
841 if (!capable(CAP_SYS_ADMIN
))
844 BUG_ON(nbd
->magic
!= NBD_MAGIC
);
846 mutex_lock(&nbd
->config_lock
);
847 error
= __nbd_ioctl(bdev
, nbd
, cmd
, arg
);
848 mutex_unlock(&nbd
->config_lock
);
853 static const struct block_device_operations nbd_fops
=
855 .owner
= THIS_MODULE
,
857 .compat_ioctl
= nbd_ioctl
,
860 #if IS_ENABLED(CONFIG_DEBUG_FS)
862 static int nbd_dbg_tasks_show(struct seq_file
*s
, void *unused
)
864 struct nbd_device
*nbd
= s
->private;
867 seq_printf(s
, "recv: %d\n", task_pid_nr(nbd
->task_recv
));
872 static int nbd_dbg_tasks_open(struct inode
*inode
, struct file
*file
)
874 return single_open(file
, nbd_dbg_tasks_show
, inode
->i_private
);
877 static const struct file_operations nbd_dbg_tasks_ops
= {
878 .open
= nbd_dbg_tasks_open
,
881 .release
= single_release
,
884 static int nbd_dbg_flags_show(struct seq_file
*s
, void *unused
)
886 struct nbd_device
*nbd
= s
->private;
887 u32 flags
= nbd
->flags
;
889 seq_printf(s
, "Hex: 0x%08x\n\n", flags
);
891 seq_puts(s
, "Known flags:\n");
893 if (flags
& NBD_FLAG_HAS_FLAGS
)
894 seq_puts(s
, "NBD_FLAG_HAS_FLAGS\n");
895 if (flags
& NBD_FLAG_READ_ONLY
)
896 seq_puts(s
, "NBD_FLAG_READ_ONLY\n");
897 if (flags
& NBD_FLAG_SEND_FLUSH
)
898 seq_puts(s
, "NBD_FLAG_SEND_FLUSH\n");
899 if (flags
& NBD_FLAG_SEND_TRIM
)
900 seq_puts(s
, "NBD_FLAG_SEND_TRIM\n");
905 static int nbd_dbg_flags_open(struct inode
*inode
, struct file
*file
)
907 return single_open(file
, nbd_dbg_flags_show
, inode
->i_private
);
910 static const struct file_operations nbd_dbg_flags_ops
= {
911 .open
= nbd_dbg_flags_open
,
914 .release
= single_release
,
917 static int nbd_dev_dbg_init(struct nbd_device
*nbd
)
924 dir
= debugfs_create_dir(nbd_name(nbd
), nbd_dbg_dir
);
926 dev_err(nbd_to_dev(nbd
), "Failed to create debugfs dir for '%s'\n",
932 debugfs_create_file("tasks", 0444, dir
, nbd
, &nbd_dbg_tasks_ops
);
933 debugfs_create_u64("size_bytes", 0444, dir
, &nbd
->bytesize
);
934 debugfs_create_u32("timeout", 0444, dir
, &nbd
->tag_set
.timeout
);
935 debugfs_create_u64("blocksize", 0444, dir
, &nbd
->blksize
);
936 debugfs_create_file("flags", 0444, dir
, nbd
, &nbd_dbg_flags_ops
);
941 static void nbd_dev_dbg_close(struct nbd_device
*nbd
)
943 debugfs_remove_recursive(nbd
->dbg_dir
);
946 static int nbd_dbg_init(void)
948 struct dentry
*dbg_dir
;
950 dbg_dir
= debugfs_create_dir("nbd", NULL
);
954 nbd_dbg_dir
= dbg_dir
;
959 static void nbd_dbg_close(void)
961 debugfs_remove_recursive(nbd_dbg_dir
);
964 #else /* IS_ENABLED(CONFIG_DEBUG_FS) */
966 static int nbd_dev_dbg_init(struct nbd_device
*nbd
)
971 static void nbd_dev_dbg_close(struct nbd_device
*nbd
)
975 static int nbd_dbg_init(void)
980 static void nbd_dbg_close(void)
986 static int nbd_init_request(void *data
, struct request
*rq
,
987 unsigned int hctx_idx
, unsigned int request_idx
,
988 unsigned int numa_node
)
990 struct nbd_cmd
*cmd
= blk_mq_rq_to_pdu(rq
);
995 static struct blk_mq_ops nbd_mq_ops
= {
996 .queue_rq
= nbd_queue_rq
,
997 .init_request
= nbd_init_request
,
998 .timeout
= nbd_xmit_timeout
,
1002 * And here should be modules and kernel interface
1003 * (Just smiley confuses emacs :-)
1006 static int __init
nbd_init(void)
1012 BUILD_BUG_ON(sizeof(struct nbd_request
) != 28);
1015 printk(KERN_ERR
"nbd: max_part must be >= 0\n");
1021 part_shift
= fls(max_part
);
1024 * Adjust max_part according to part_shift as it is exported
1025 * to user space so that user can know the max number of
1026 * partition kernel should be able to manage.
1028 * Note that -1 is required because partition 0 is reserved
1029 * for the whole disk.
1031 max_part
= (1UL << part_shift
) - 1;
1034 if ((1UL << part_shift
) > DISK_MAX_PARTS
)
1037 if (nbds_max
> 1UL << (MINORBITS
- part_shift
))
1040 nbd_dev
= kcalloc(nbds_max
, sizeof(*nbd_dev
), GFP_KERNEL
);
1044 for (i
= 0; i
< nbds_max
; i
++) {
1045 struct gendisk
*disk
= alloc_disk(1 << part_shift
);
1048 nbd_dev
[i
].disk
= disk
;
1050 nbd_dev
[i
].tag_set
.ops
= &nbd_mq_ops
;
1051 nbd_dev
[i
].tag_set
.nr_hw_queues
= 1;
1052 nbd_dev
[i
].tag_set
.queue_depth
= 128;
1053 nbd_dev
[i
].tag_set
.numa_node
= NUMA_NO_NODE
;
1054 nbd_dev
[i
].tag_set
.cmd_size
= sizeof(struct nbd_cmd
);
1055 nbd_dev
[i
].tag_set
.flags
= BLK_MQ_F_SHOULD_MERGE
|
1056 BLK_MQ_F_SG_MERGE
| BLK_MQ_F_BLOCKING
;
1057 nbd_dev
[i
].tag_set
.driver_data
= &nbd_dev
[i
];
1059 err
= blk_mq_alloc_tag_set(&nbd_dev
[i
].tag_set
);
1066 * The new linux 2.5 block layer implementation requires
1067 * every gendisk to have its very own request_queue struct.
1068 * These structs are big so we dynamically allocate them.
1070 disk
->queue
= blk_mq_init_queue(&nbd_dev
[i
].tag_set
);
1072 blk_mq_free_tag_set(&nbd_dev
[i
].tag_set
);
1078 * Tell the block layer that we are not a rotational device
1080 queue_flag_set_unlocked(QUEUE_FLAG_NONROT
, disk
->queue
);
1081 queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM
, disk
->queue
);
1082 disk
->queue
->limits
.discard_granularity
= 512;
1083 blk_queue_max_discard_sectors(disk
->queue
, UINT_MAX
);
1084 disk
->queue
->limits
.discard_zeroes_data
= 0;
1085 blk_queue_max_hw_sectors(disk
->queue
, 65536);
1086 disk
->queue
->limits
.max_sectors
= 256;
1089 if (register_blkdev(NBD_MAJOR
, "nbd")) {
1094 printk(KERN_INFO
"nbd: registered device at major %d\n", NBD_MAJOR
);
1098 for (i
= 0; i
< nbds_max
; i
++) {
1099 struct gendisk
*disk
= nbd_dev
[i
].disk
;
1100 nbd_dev
[i
].magic
= NBD_MAGIC
;
1101 mutex_init(&nbd_dev
[i
].config_lock
);
1102 disk
->major
= NBD_MAJOR
;
1103 disk
->first_minor
= i
<< part_shift
;
1104 disk
->fops
= &nbd_fops
;
1105 disk
->private_data
= &nbd_dev
[i
];
1106 sprintf(disk
->disk_name
, "nbd%d", i
);
1107 init_waitqueue_head(&nbd_dev
[i
].recv_wq
);
1108 nbd_reset(&nbd_dev
[i
]);
1115 blk_mq_free_tag_set(&nbd_dev
[i
].tag_set
);
1116 blk_cleanup_queue(nbd_dev
[i
].disk
->queue
);
1117 put_disk(nbd_dev
[i
].disk
);
1123 static void __exit
nbd_cleanup(void)
1129 for (i
= 0; i
< nbds_max
; i
++) {
1130 struct gendisk
*disk
= nbd_dev
[i
].disk
;
1131 nbd_dev
[i
].magic
= 0;
1134 blk_cleanup_queue(disk
->queue
);
1135 blk_mq_free_tag_set(&nbd_dev
[i
].tag_set
);
1139 unregister_blkdev(NBD_MAJOR
, "nbd");
1141 printk(KERN_INFO
"nbd: unregistered device at major %d\n", NBD_MAJOR
);
1144 module_init(nbd_init
);
1145 module_exit(nbd_cleanup
);
1147 MODULE_DESCRIPTION("Network Block Device");
1148 MODULE_LICENSE("GPL");
1150 module_param(nbds_max
, int, 0444);
1151 MODULE_PARM_DESC(nbds_max
, "number of network block devices to initialize (default: 16)");
1152 module_param(max_part
, int, 0444);
1153 MODULE_PARM_DESC(max_part
, "number of partitions per device (default: 0)");