1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * RDMA Network Block Driver
5 * Copyright (c) 2014 - 2018 ProfitBricks GmbH. All rights reserved.
6 * Copyright (c) 2018 - 2019 1&1 IONOS Cloud GmbH. All rights reserved.
7 * Copyright (c) 2019 - 2020 1&1 IONOS SE. All rights reserved.
10 #define pr_fmt(fmt) KBUILD_MODNAME " L" __stringify(__LINE__) ": " fmt
12 #include <linux/module.h>
13 #include <linux/blkdev.h>
16 #include "rnbd-srv-dev.h"
18 MODULE_DESCRIPTION("RDMA Network Block Device Server");
19 MODULE_LICENSE("GPL");
21 static u16 port_nr
= RTRS_PORT
;
23 module_param_named(port_nr
, port_nr
, ushort
, 0444);
24 MODULE_PARM_DESC(port_nr
,
25 "The port number the server is listening on (default: "
26 __stringify(RTRS_PORT
)")");
28 #define DEFAULT_DEV_SEARCH_PATH "/"
30 static char dev_search_path
[PATH_MAX
] = DEFAULT_DEV_SEARCH_PATH
;
32 static int dev_search_path_set(const char *val
, const struct kernel_param
*kp
)
34 const char *p
= strrchr(val
, '\n') ? : val
+ strlen(val
);
36 if (strlen(val
) >= sizeof(dev_search_path
))
39 snprintf(dev_search_path
, sizeof(dev_search_path
), "%.*s",
42 pr_info("dev_search_path changed to '%s'\n", dev_search_path
);
47 static struct kparam_string dev_search_path_kparam_str
= {
48 .maxlen
= sizeof(dev_search_path
),
49 .string
= dev_search_path
52 static const struct kernel_param_ops dev_search_path_ops
= {
53 .set
= dev_search_path_set
,
54 .get
= param_get_string
,
57 module_param_cb(dev_search_path
, &dev_search_path_ops
,
58 &dev_search_path_kparam_str
, 0444);
59 MODULE_PARM_DESC(dev_search_path
,
60 "Sets the dev_search_path. When a device is mapped this path is prepended to the device path from the map device operation. If %SESSNAME% is specified in a path, then device will be searched in a session namespace. (default: "
61 DEFAULT_DEV_SEARCH_PATH
")");
63 static DEFINE_MUTEX(sess_lock
);
64 static DEFINE_SPINLOCK(dev_lock
);
66 static LIST_HEAD(sess_list
);
67 static LIST_HEAD(dev_list
);
69 struct rnbd_io_private
{
70 struct rtrs_srv_op
*id
;
71 struct rnbd_srv_sess_dev
*sess_dev
;
74 static void rnbd_sess_dev_release(struct kref
*kref
)
76 struct rnbd_srv_sess_dev
*sess_dev
;
78 sess_dev
= container_of(kref
, struct rnbd_srv_sess_dev
, kref
);
79 complete(sess_dev
->destroy_comp
);
82 static inline void rnbd_put_sess_dev(struct rnbd_srv_sess_dev
*sess_dev
)
84 kref_put(&sess_dev
->kref
, rnbd_sess_dev_release
);
87 void rnbd_endio(void *priv
, int error
)
89 struct rnbd_io_private
*rnbd_priv
= priv
;
90 struct rnbd_srv_sess_dev
*sess_dev
= rnbd_priv
->sess_dev
;
92 rnbd_put_sess_dev(sess_dev
);
94 rtrs_srv_resp_rdma(rnbd_priv
->id
, error
);
99 static struct rnbd_srv_sess_dev
*
100 rnbd_get_sess_dev(int dev_id
, struct rnbd_srv_session
*srv_sess
)
102 struct rnbd_srv_sess_dev
*sess_dev
;
106 sess_dev
= xa_load(&srv_sess
->index_idr
, dev_id
);
107 if (likely(sess_dev
))
108 ret
= kref_get_unless_zero(&sess_dev
->kref
);
111 if (!sess_dev
|| !ret
)
112 return ERR_PTR(-ENXIO
);
117 static int process_rdma(struct rtrs_srv
*sess
,
118 struct rnbd_srv_session
*srv_sess
,
119 struct rtrs_srv_op
*id
, void *data
, u32 datalen
,
120 const void *usr
, size_t usrlen
)
122 const struct rnbd_msg_io
*msg
= usr
;
123 struct rnbd_io_private
*priv
;
124 struct rnbd_srv_sess_dev
*sess_dev
;
127 struct rnbd_dev_blk_io
*io
;
131 priv
= kmalloc(sizeof(*priv
), GFP_KERNEL
);
135 dev_id
= le32_to_cpu(msg
->device_id
);
137 sess_dev
= rnbd_get_sess_dev(dev_id
, srv_sess
);
138 if (IS_ERR(sess_dev
)) {
139 pr_err_ratelimited("Got I/O request on session %s for unknown device id %d\n",
140 srv_sess
->sessname
, dev_id
);
145 priv
->sess_dev
= sess_dev
;
148 /* Generate bio with pages pointing to the rdma buffer */
149 bio
= rnbd_bio_map_kern(data
, sess_dev
->rnbd_dev
->ibd_bio_set
, datalen
, GFP_KERNEL
);
152 rnbd_srv_err(sess_dev
, "Failed to generate bio, err: %d\n", err
);
156 io
= container_of(bio
, struct rnbd_dev_blk_io
, bio
);
157 io
->dev
= sess_dev
->rnbd_dev
;
160 bio
->bi_end_io
= rnbd_dev_bi_end_io
;
161 bio
->bi_private
= io
;
162 bio
->bi_opf
= rnbd_to_bio_flags(le32_to_cpu(msg
->rw
));
163 bio
->bi_iter
.bi_sector
= le64_to_cpu(msg
->sector
);
164 bio
->bi_iter
.bi_size
= le32_to_cpu(msg
->bi_size
);
165 prio
= srv_sess
->ver
< RNBD_PROTO_VER_MAJOR
||
166 usrlen
< sizeof(*msg
) ? 0 : le16_to_cpu(msg
->prio
);
167 bio_set_prio(bio
, prio
);
168 bio_set_dev(bio
, sess_dev
->rnbd_dev
->bdev
);
175 rnbd_put_sess_dev(sess_dev
);
181 static void destroy_device(struct rnbd_srv_dev
*dev
)
183 WARN_ONCE(!list_empty(&dev
->sess_dev_list
),
184 "Device %s is being destroyed but still in use!\n",
187 spin_lock(&dev_lock
);
188 list_del(&dev
->list
);
189 spin_unlock(&dev_lock
);
191 mutex_destroy(&dev
->lock
);
192 if (dev
->dev_kobj
.state_in_sysfs
)
194 * Destroy kobj only if it was really created.
196 rnbd_srv_destroy_dev_sysfs(dev
);
201 static void destroy_device_cb(struct kref
*kref
)
203 struct rnbd_srv_dev
*dev
;
205 dev
= container_of(kref
, struct rnbd_srv_dev
, kref
);
210 static void rnbd_put_srv_dev(struct rnbd_srv_dev
*dev
)
212 kref_put(&dev
->kref
, destroy_device_cb
);
215 void rnbd_destroy_sess_dev(struct rnbd_srv_sess_dev
*sess_dev
, bool keep_id
)
217 DECLARE_COMPLETION_ONSTACK(dc
);
220 /* free the resources for the id but don't */
221 /* allow to re-use the id itself because it */
222 /* is still used by the client */
223 xa_cmpxchg(&sess_dev
->sess
->index_idr
, sess_dev
->device_id
,
226 xa_erase(&sess_dev
->sess
->index_idr
, sess_dev
->device_id
);
229 sess_dev
->destroy_comp
= &dc
;
230 rnbd_put_sess_dev(sess_dev
);
231 wait_for_completion(&dc
); /* wait for inflights to drop to zero */
233 rnbd_dev_close(sess_dev
->rnbd_dev
);
234 list_del(&sess_dev
->sess_list
);
235 mutex_lock(&sess_dev
->dev
->lock
);
236 list_del(&sess_dev
->dev_list
);
237 if (sess_dev
->open_flags
& FMODE_WRITE
)
238 sess_dev
->dev
->open_write_cnt
--;
239 mutex_unlock(&sess_dev
->dev
->lock
);
241 rnbd_put_srv_dev(sess_dev
->dev
);
243 rnbd_srv_info(sess_dev
, "Device closed\n");
247 static void destroy_sess(struct rnbd_srv_session
*srv_sess
)
249 struct rnbd_srv_sess_dev
*sess_dev
, *tmp
;
251 if (list_empty(&srv_sess
->sess_dev_list
))
254 mutex_lock(&srv_sess
->lock
);
255 list_for_each_entry_safe(sess_dev
, tmp
, &srv_sess
->sess_dev_list
,
257 rnbd_srv_destroy_dev_session_sysfs(sess_dev
);
258 mutex_unlock(&srv_sess
->lock
);
261 xa_destroy(&srv_sess
->index_idr
);
262 bioset_exit(&srv_sess
->sess_bio_set
);
264 pr_info("RTRS Session %s disconnected\n", srv_sess
->sessname
);
266 mutex_lock(&sess_lock
);
267 list_del(&srv_sess
->list
);
268 mutex_unlock(&sess_lock
);
270 mutex_destroy(&srv_sess
->lock
);
274 static int create_sess(struct rtrs_srv
*rtrs
)
276 struct rnbd_srv_session
*srv_sess
;
277 char sessname
[NAME_MAX
];
280 err
= rtrs_srv_get_sess_name(rtrs
, sessname
, sizeof(sessname
));
282 pr_err("rtrs_srv_get_sess_name(%s): %d\n", sessname
, err
);
286 srv_sess
= kzalloc(sizeof(*srv_sess
), GFP_KERNEL
);
290 srv_sess
->queue_depth
= rtrs_srv_get_queue_depth(rtrs
);
291 err
= bioset_init(&srv_sess
->sess_bio_set
, srv_sess
->queue_depth
,
292 offsetof(struct rnbd_dev_blk_io
, bio
),
295 pr_err("Allocating srv_session for session %s failed\n",
301 xa_init_flags(&srv_sess
->index_idr
, XA_FLAGS_ALLOC
);
302 INIT_LIST_HEAD(&srv_sess
->sess_dev_list
);
303 mutex_init(&srv_sess
->lock
);
304 mutex_lock(&sess_lock
);
305 list_add(&srv_sess
->list
, &sess_list
);
306 mutex_unlock(&sess_lock
);
308 srv_sess
->rtrs
= rtrs
;
309 strlcpy(srv_sess
->sessname
, sessname
, sizeof(srv_sess
->sessname
));
311 rtrs_srv_set_sess_priv(rtrs
, srv_sess
);
316 static int rnbd_srv_link_ev(struct rtrs_srv
*rtrs
,
317 enum rtrs_srv_link_ev ev
, void *priv
)
319 struct rnbd_srv_session
*srv_sess
= priv
;
322 case RTRS_SRV_LINK_EV_CONNECTED
:
323 return create_sess(rtrs
);
325 case RTRS_SRV_LINK_EV_DISCONNECTED
:
326 if (WARN_ON_ONCE(!srv_sess
))
329 destroy_sess(srv_sess
);
333 pr_warn("Received unknown RTRS session event %d from session %s\n",
334 ev
, srv_sess
->sessname
);
339 void rnbd_srv_sess_dev_force_close(struct rnbd_srv_sess_dev
*sess_dev
)
341 struct rnbd_srv_session
*sess
= sess_dev
->sess
;
343 sess_dev
->keep_id
= true;
344 mutex_lock(&sess
->lock
);
345 rnbd_srv_destroy_dev_session_sysfs(sess_dev
);
346 mutex_unlock(&sess
->lock
);
349 static int process_msg_close(struct rtrs_srv
*rtrs
,
350 struct rnbd_srv_session
*srv_sess
,
351 void *data
, size_t datalen
, const void *usr
,
354 const struct rnbd_msg_close
*close_msg
= usr
;
355 struct rnbd_srv_sess_dev
*sess_dev
;
357 sess_dev
= rnbd_get_sess_dev(le32_to_cpu(close_msg
->device_id
),
359 if (IS_ERR(sess_dev
))
362 rnbd_put_sess_dev(sess_dev
);
363 mutex_lock(&srv_sess
->lock
);
364 rnbd_srv_destroy_dev_session_sysfs(sess_dev
);
365 mutex_unlock(&srv_sess
->lock
);
369 static int process_msg_open(struct rtrs_srv
*rtrs
,
370 struct rnbd_srv_session
*srv_sess
,
371 const void *msg
, size_t len
,
372 void *data
, size_t datalen
);
374 static int process_msg_sess_info(struct rtrs_srv
*rtrs
,
375 struct rnbd_srv_session
*srv_sess
,
376 const void *msg
, size_t len
,
377 void *data
, size_t datalen
);
379 static int rnbd_srv_rdma_ev(struct rtrs_srv
*rtrs
, void *priv
,
380 struct rtrs_srv_op
*id
, int dir
,
381 void *data
, size_t datalen
, const void *usr
,
384 struct rnbd_srv_session
*srv_sess
= priv
;
385 const struct rnbd_msg_hdr
*hdr
= usr
;
389 if (WARN_ON_ONCE(!srv_sess
))
392 type
= le16_to_cpu(hdr
->type
);
396 return process_rdma(rtrs
, srv_sess
, id
, data
, datalen
, usr
,
399 ret
= process_msg_close(rtrs
, srv_sess
, data
, datalen
,
403 ret
= process_msg_open(rtrs
, srv_sess
, usr
, usrlen
,
406 case RNBD_MSG_SESS_INFO
:
407 ret
= process_msg_sess_info(rtrs
, srv_sess
, usr
, usrlen
,
411 pr_warn("Received unexpected message type %d with dir %d from session %s\n",
412 type
, dir
, srv_sess
->sessname
);
416 rtrs_srv_resp_rdma(id
, ret
);
420 static struct rnbd_srv_sess_dev
421 *rnbd_sess_dev_alloc(struct rnbd_srv_session
*srv_sess
)
423 struct rnbd_srv_sess_dev
*sess_dev
;
426 sess_dev
= kzalloc(sizeof(*sess_dev
), GFP_KERNEL
);
428 return ERR_PTR(-ENOMEM
);
430 error
= xa_alloc(&srv_sess
->index_idr
, &sess_dev
->device_id
, sess_dev
,
431 xa_limit_32b
, GFP_NOWAIT
);
433 pr_warn("Allocating idr failed, err: %d\n", error
);
435 return ERR_PTR(error
);
441 static struct rnbd_srv_dev
*rnbd_srv_init_srv_dev(const char *id
)
443 struct rnbd_srv_dev
*dev
;
445 dev
= kzalloc(sizeof(*dev
), GFP_KERNEL
);
447 return ERR_PTR(-ENOMEM
);
449 strlcpy(dev
->id
, id
, sizeof(dev
->id
));
450 kref_init(&dev
->kref
);
451 INIT_LIST_HEAD(&dev
->sess_dev_list
);
452 mutex_init(&dev
->lock
);
457 static struct rnbd_srv_dev
*
458 rnbd_srv_find_or_add_srv_dev(struct rnbd_srv_dev
*new_dev
)
460 struct rnbd_srv_dev
*dev
;
462 spin_lock(&dev_lock
);
463 list_for_each_entry(dev
, &dev_list
, list
) {
464 if (!strncmp(dev
->id
, new_dev
->id
, sizeof(dev
->id
))) {
465 if (!kref_get_unless_zero(&dev
->kref
))
467 * We lost the race, device is almost dead.
468 * Continue traversing to find a valid one.
471 spin_unlock(&dev_lock
);
475 list_add(&new_dev
->list
, &dev_list
);
476 spin_unlock(&dev_lock
);
481 static int rnbd_srv_check_update_open_perm(struct rnbd_srv_dev
*srv_dev
,
482 struct rnbd_srv_session
*srv_sess
,
483 enum rnbd_access_mode access_mode
)
487 mutex_lock(&srv_dev
->lock
);
489 switch (access_mode
) {
494 if (srv_dev
->open_write_cnt
== 0) {
495 srv_dev
->open_write_cnt
++;
498 pr_err("Mapping device '%s' for session %s with RW permissions failed. Device already opened as 'RW' by %d client(s), access mode %s.\n",
499 srv_dev
->id
, srv_sess
->sessname
,
500 srv_dev
->open_write_cnt
,
501 rnbd_access_mode_str(access_mode
));
504 case RNBD_ACCESS_MIGRATION
:
505 if (srv_dev
->open_write_cnt
< 2) {
506 srv_dev
->open_write_cnt
++;
509 pr_err("Mapping device '%s' for session %s with migration permissions failed. Device already opened as 'RW' by %d client(s), access mode %s.\n",
510 srv_dev
->id
, srv_sess
->sessname
,
511 srv_dev
->open_write_cnt
,
512 rnbd_access_mode_str(access_mode
));
516 pr_err("Received mapping request for device '%s' on session %s with invalid access mode: %d\n",
517 srv_dev
->id
, srv_sess
->sessname
, access_mode
);
521 mutex_unlock(&srv_dev
->lock
);
526 static struct rnbd_srv_dev
*
527 rnbd_srv_get_or_create_srv_dev(struct rnbd_dev
*rnbd_dev
,
528 struct rnbd_srv_session
*srv_sess
,
529 enum rnbd_access_mode access_mode
)
532 struct rnbd_srv_dev
*new_dev
, *dev
;
534 new_dev
= rnbd_srv_init_srv_dev(rnbd_dev
->name
);
538 dev
= rnbd_srv_find_or_add_srv_dev(new_dev
);
542 ret
= rnbd_srv_check_update_open_perm(dev
, srv_sess
, access_mode
);
544 rnbd_put_srv_dev(dev
);
551 static void rnbd_srv_fill_msg_open_rsp(struct rnbd_msg_open_rsp
*rsp
,
552 struct rnbd_srv_sess_dev
*sess_dev
)
554 struct rnbd_dev
*rnbd_dev
= sess_dev
->rnbd_dev
;
555 struct request_queue
*q
= bdev_get_queue(rnbd_dev
->bdev
);
557 rsp
->hdr
.type
= cpu_to_le16(RNBD_MSG_OPEN_RSP
);
559 cpu_to_le32(sess_dev
->device_id
);
561 cpu_to_le64(get_capacity(rnbd_dev
->bdev
->bd_disk
));
562 rsp
->logical_block_size
=
563 cpu_to_le16(bdev_logical_block_size(rnbd_dev
->bdev
));
564 rsp
->physical_block_size
=
565 cpu_to_le16(bdev_physical_block_size(rnbd_dev
->bdev
));
567 cpu_to_le16(rnbd_dev_get_max_segs(rnbd_dev
));
568 rsp
->max_hw_sectors
=
569 cpu_to_le32(rnbd_dev_get_max_hw_sects(rnbd_dev
));
570 rsp
->max_write_same_sectors
=
571 cpu_to_le32(bdev_write_same(rnbd_dev
->bdev
));
572 rsp
->max_discard_sectors
=
573 cpu_to_le32(rnbd_dev_get_max_discard_sects(rnbd_dev
));
574 rsp
->discard_granularity
=
575 cpu_to_le32(rnbd_dev_get_discard_granularity(rnbd_dev
));
576 rsp
->discard_alignment
=
577 cpu_to_le32(rnbd_dev_get_discard_alignment(rnbd_dev
));
578 rsp
->secure_discard
=
579 cpu_to_le16(rnbd_dev_get_secure_discard(rnbd_dev
));
580 rsp
->rotational
= !blk_queue_nonrot(q
);
581 rsp
->cache_policy
= 0;
582 if (test_bit(QUEUE_FLAG_WC
, &q
->queue_flags
))
583 rsp
->cache_policy
|= RNBD_WRITEBACK
;
584 if (blk_queue_fua(q
))
585 rsp
->cache_policy
|= RNBD_FUA
;
588 static struct rnbd_srv_sess_dev
*
589 rnbd_srv_create_set_sess_dev(struct rnbd_srv_session
*srv_sess
,
590 const struct rnbd_msg_open
*open_msg
,
591 struct rnbd_dev
*rnbd_dev
, fmode_t open_flags
,
592 struct rnbd_srv_dev
*srv_dev
)
594 struct rnbd_srv_sess_dev
*sdev
= rnbd_sess_dev_alloc(srv_sess
);
599 kref_init(&sdev
->kref
);
601 strlcpy(sdev
->pathname
, open_msg
->dev_name
, sizeof(sdev
->pathname
));
603 sdev
->rnbd_dev
= rnbd_dev
;
604 sdev
->sess
= srv_sess
;
606 sdev
->open_flags
= open_flags
;
607 sdev
->access_mode
= open_msg
->access_mode
;
612 static char *rnbd_srv_get_full_path(struct rnbd_srv_session
*srv_sess
,
613 const char *dev_name
)
618 full_path
= kmalloc(PATH_MAX
, GFP_KERNEL
);
620 return ERR_PTR(-ENOMEM
);
623 * Replace %SESSNAME% with a real session name in order to
624 * create device namespace.
626 a
= strnstr(dev_search_path
, "%SESSNAME%", sizeof(dev_search_path
));
628 int len
= a
- dev_search_path
;
630 len
= snprintf(full_path
, PATH_MAX
, "%.*s/%s/%s", len
,
631 dev_search_path
, srv_sess
->sessname
, dev_name
);
632 if (len
>= PATH_MAX
) {
633 pr_err("Too long path: %s, %s, %s\n",
634 dev_search_path
, srv_sess
->sessname
, dev_name
);
636 return ERR_PTR(-EINVAL
);
639 snprintf(full_path
, PATH_MAX
, "%s/%s",
640 dev_search_path
, dev_name
);
643 /* eliminitate duplicated slashes */
644 a
= strchr(full_path
, '/');
647 if (*b
== '/' && *a
== '/') {
661 static int process_msg_sess_info(struct rtrs_srv
*rtrs
,
662 struct rnbd_srv_session
*srv_sess
,
663 const void *msg
, size_t len
,
664 void *data
, size_t datalen
)
666 const struct rnbd_msg_sess_info
*sess_info_msg
= msg
;
667 struct rnbd_msg_sess_info_rsp
*rsp
= data
;
669 srv_sess
->ver
= min_t(u8
, sess_info_msg
->ver
, RNBD_PROTO_VER_MAJOR
);
670 pr_debug("Session %s using protocol version %d (client version: %d, server version: %d)\n",
671 srv_sess
->sessname
, srv_sess
->ver
,
672 sess_info_msg
->ver
, RNBD_PROTO_VER_MAJOR
);
674 rsp
->hdr
.type
= cpu_to_le16(RNBD_MSG_SESS_INFO_RSP
);
675 rsp
->ver
= srv_sess
->ver
;
681 * find_srv_sess_dev() - a dev is already opened by this name
682 * @srv_sess: the session to search.
683 * @dev_name: string containing the name of the device.
685 * Return struct rnbd_srv_sess_dev if srv_sess already opened the dev_name
686 * NULL if the session didn't open the device yet.
688 static struct rnbd_srv_sess_dev
*
689 find_srv_sess_dev(struct rnbd_srv_session
*srv_sess
, const char *dev_name
)
691 struct rnbd_srv_sess_dev
*sess_dev
;
693 if (list_empty(&srv_sess
->sess_dev_list
))
696 list_for_each_entry(sess_dev
, &srv_sess
->sess_dev_list
, sess_list
)
697 if (!strcmp(sess_dev
->pathname
, dev_name
))
703 static int process_msg_open(struct rtrs_srv
*rtrs
,
704 struct rnbd_srv_session
*srv_sess
,
705 const void *msg
, size_t len
,
706 void *data
, size_t datalen
)
709 struct rnbd_srv_dev
*srv_dev
;
710 struct rnbd_srv_sess_dev
*srv_sess_dev
;
711 const struct rnbd_msg_open
*open_msg
= msg
;
714 struct rnbd_dev
*rnbd_dev
;
715 struct rnbd_msg_open_rsp
*rsp
= data
;
717 pr_debug("Open message received: session='%s' path='%s' access_mode=%d\n",
718 srv_sess
->sessname
, open_msg
->dev_name
,
719 open_msg
->access_mode
);
720 open_flags
= FMODE_READ
;
721 if (open_msg
->access_mode
!= RNBD_ACCESS_RO
)
722 open_flags
|= FMODE_WRITE
;
724 mutex_lock(&srv_sess
->lock
);
726 srv_sess_dev
= find_srv_sess_dev(srv_sess
, open_msg
->dev_name
);
730 if ((strlen(dev_search_path
) + strlen(open_msg
->dev_name
))
732 pr_err("Opening device for session %s failed, device path too long. '%s/%s' is longer than PATH_MAX (%d)\n",
733 srv_sess
->sessname
, dev_search_path
, open_msg
->dev_name
,
738 if (strstr(open_msg
->dev_name
, "..")) {
739 pr_err("Opening device for session %s failed, device path %s contains relative path ..\n",
740 srv_sess
->sessname
, open_msg
->dev_name
);
744 full_path
= rnbd_srv_get_full_path(srv_sess
, open_msg
->dev_name
);
745 if (IS_ERR(full_path
)) {
746 ret
= PTR_ERR(full_path
);
747 pr_err("Opening device '%s' for client %s failed, failed to get device full path, err: %d\n",
748 open_msg
->dev_name
, srv_sess
->sessname
, ret
);
752 rnbd_dev
= rnbd_dev_open(full_path
, open_flags
,
753 &srv_sess
->sess_bio_set
);
754 if (IS_ERR(rnbd_dev
)) {
755 pr_err("Opening device '%s' on session %s failed, failed to open the block device, err: %ld\n",
756 full_path
, srv_sess
->sessname
, PTR_ERR(rnbd_dev
));
757 ret
= PTR_ERR(rnbd_dev
);
761 srv_dev
= rnbd_srv_get_or_create_srv_dev(rnbd_dev
, srv_sess
,
762 open_msg
->access_mode
);
763 if (IS_ERR(srv_dev
)) {
764 pr_err("Opening device '%s' on session %s failed, creating srv_dev failed, err: %ld\n",
765 full_path
, srv_sess
->sessname
, PTR_ERR(srv_dev
));
766 ret
= PTR_ERR(srv_dev
);
770 srv_sess_dev
= rnbd_srv_create_set_sess_dev(srv_sess
, open_msg
,
771 rnbd_dev
, open_flags
,
773 if (IS_ERR(srv_sess_dev
)) {
774 pr_err("Opening device '%s' on session %s failed, creating sess_dev failed, err: %ld\n",
775 full_path
, srv_sess
->sessname
, PTR_ERR(srv_sess_dev
));
776 ret
= PTR_ERR(srv_sess_dev
);
780 /* Create the srv_dev sysfs files if they haven't been created yet. The
781 * reason to delay the creation is not to create the sysfs files before
782 * we are sure the device can be opened.
784 mutex_lock(&srv_dev
->lock
);
785 if (!srv_dev
->dev_kobj
.state_in_sysfs
) {
786 ret
= rnbd_srv_create_dev_sysfs(srv_dev
, rnbd_dev
->bdev
,
789 mutex_unlock(&srv_dev
->lock
);
790 rnbd_srv_err(srv_sess_dev
,
791 "Opening device failed, failed to create device sysfs files, err: %d\n",
793 goto free_srv_sess_dev
;
797 ret
= rnbd_srv_create_dev_session_sysfs(srv_sess_dev
);
799 mutex_unlock(&srv_dev
->lock
);
800 rnbd_srv_err(srv_sess_dev
,
801 "Opening device failed, failed to create dev client sysfs files, err: %d\n",
803 goto free_srv_sess_dev
;
806 list_add(&srv_sess_dev
->dev_list
, &srv_dev
->sess_dev_list
);
807 mutex_unlock(&srv_dev
->lock
);
809 list_add(&srv_sess_dev
->sess_list
, &srv_sess
->sess_dev_list
);
811 rnbd_srv_info(srv_sess_dev
, "Opened device '%s'\n", srv_dev
->id
);
816 rnbd_srv_fill_msg_open_rsp(rsp
, srv_sess_dev
);
817 mutex_unlock(&srv_sess
->lock
);
821 xa_erase(&srv_sess
->index_idr
, srv_sess_dev
->device_id
);
825 if (open_msg
->access_mode
!= RNBD_ACCESS_RO
) {
826 mutex_lock(&srv_dev
->lock
);
827 srv_dev
->open_write_cnt
--;
828 mutex_unlock(&srv_dev
->lock
);
830 rnbd_put_srv_dev(srv_dev
);
832 rnbd_dev_close(rnbd_dev
);
836 mutex_unlock(&srv_sess
->lock
);
840 static struct rtrs_srv_ctx
*rtrs_ctx
;
842 static struct rtrs_srv_ops rtrs_ops
;
843 static int __init
rnbd_srv_init_module(void)
847 BUILD_BUG_ON(sizeof(struct rnbd_msg_hdr
) != 4);
848 BUILD_BUG_ON(sizeof(struct rnbd_msg_sess_info
) != 36);
849 BUILD_BUG_ON(sizeof(struct rnbd_msg_sess_info_rsp
) != 36);
850 BUILD_BUG_ON(sizeof(struct rnbd_msg_open
) != 264);
851 BUILD_BUG_ON(sizeof(struct rnbd_msg_close
) != 8);
852 BUILD_BUG_ON(sizeof(struct rnbd_msg_open_rsp
) != 56);
853 rtrs_ops
= (struct rtrs_srv_ops
) {
854 .rdma_ev
= rnbd_srv_rdma_ev
,
855 .link_ev
= rnbd_srv_link_ev
,
857 rtrs_ctx
= rtrs_srv_open(&rtrs_ops
, port_nr
);
858 if (IS_ERR(rtrs_ctx
)) {
859 err
= PTR_ERR(rtrs_ctx
);
860 pr_err("rtrs_srv_open(), err: %d\n", err
);
864 err
= rnbd_srv_create_sysfs_files();
866 pr_err("rnbd_srv_create_sysfs_files(), err: %d\n", err
);
867 rtrs_srv_close(rtrs_ctx
);
874 static void __exit
rnbd_srv_cleanup_module(void)
876 rtrs_srv_close(rtrs_ctx
);
877 WARN_ON(!list_empty(&sess_list
));
878 rnbd_srv_destroy_sysfs_files();
881 module_init(rnbd_srv_init_module
);
882 module_exit(rnbd_srv_cleanup_module
);