1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * RDMA Network Block Driver
5 * Copyright (c) 2014 - 2018 ProfitBricks GmbH. All rights reserved.
6 * Copyright (c) 2018 - 2019 1&1 IONOS Cloud GmbH. All rights reserved.
7 * Copyright (c) 2019 - 2020 1&1 IONOS SE. All rights reserved.
10 #define pr_fmt(fmt) KBUILD_MODNAME " L" __stringify(__LINE__) ": " fmt
12 #include <linux/module.h>
13 #include <linux/blkdev.h>
16 #include "rnbd-srv-trace.h"
18 MODULE_DESCRIPTION("RDMA Network Block Device Server");
19 MODULE_LICENSE("GPL");
21 static u16 port_nr
= RTRS_PORT
;
23 module_param_named(port_nr
, port_nr
, ushort
, 0444);
24 MODULE_PARM_DESC(port_nr
,
25 "The port number the server is listening on (default: "
26 __stringify(RTRS_PORT
)")");
28 #define DEFAULT_DEV_SEARCH_PATH "/"
30 static char dev_search_path
[PATH_MAX
] = DEFAULT_DEV_SEARCH_PATH
;
32 static int dev_search_path_set(const char *val
, const struct kernel_param
*kp
)
34 const char *p
= strrchr(val
, '\n') ? : val
+ strlen(val
);
36 if (strlen(val
) >= sizeof(dev_search_path
))
39 snprintf(dev_search_path
, sizeof(dev_search_path
), "%.*s",
42 pr_info("dev_search_path changed to '%s'\n", dev_search_path
);
47 static struct kparam_string dev_search_path_kparam_str
= {
48 .maxlen
= sizeof(dev_search_path
),
49 .string
= dev_search_path
52 static const struct kernel_param_ops dev_search_path_ops
= {
53 .set
= dev_search_path_set
,
54 .get
= param_get_string
,
57 module_param_cb(dev_search_path
, &dev_search_path_ops
,
58 &dev_search_path_kparam_str
, 0444);
59 MODULE_PARM_DESC(dev_search_path
,
60 "Sets the dev_search_path. When a device is mapped this path is prepended to the device path from the map device operation. If %SESSNAME% is specified in a path, then device will be searched in a session namespace. (default: "
61 DEFAULT_DEV_SEARCH_PATH
")");
63 static DEFINE_MUTEX(sess_lock
);
64 static DEFINE_SPINLOCK(dev_lock
);
66 static LIST_HEAD(sess_list
);
67 static LIST_HEAD(dev_list
);
69 struct rnbd_io_private
{
70 struct rtrs_srv_op
*id
;
71 struct rnbd_srv_sess_dev
*sess_dev
;
74 static void rnbd_sess_dev_release(struct kref
*kref
)
76 struct rnbd_srv_sess_dev
*sess_dev
;
78 sess_dev
= container_of(kref
, struct rnbd_srv_sess_dev
, kref
);
79 complete(sess_dev
->destroy_comp
);
82 static inline void rnbd_put_sess_dev(struct rnbd_srv_sess_dev
*sess_dev
)
84 kref_put(&sess_dev
->kref
, rnbd_sess_dev_release
);
87 static struct rnbd_srv_sess_dev
*
88 rnbd_get_sess_dev(int dev_id
, struct rnbd_srv_session
*srv_sess
)
90 struct rnbd_srv_sess_dev
*sess_dev
;
94 sess_dev
= xa_load(&srv_sess
->index_idr
, dev_id
);
96 ret
= kref_get_unless_zero(&sess_dev
->kref
);
100 return ERR_PTR(-ENXIO
);
105 static void rnbd_dev_bi_end_io(struct bio
*bio
)
107 struct rnbd_io_private
*rnbd_priv
= bio
->bi_private
;
108 struct rnbd_srv_sess_dev
*sess_dev
= rnbd_priv
->sess_dev
;
110 rnbd_put_sess_dev(sess_dev
);
111 rtrs_srv_resp_rdma(rnbd_priv
->id
, blk_status_to_errno(bio
->bi_status
));
117 static int process_rdma(struct rnbd_srv_session
*srv_sess
,
118 struct rtrs_srv_op
*id
, void *data
, u32 datalen
,
119 const void *usr
, size_t usrlen
)
121 const struct rnbd_msg_io
*msg
= usr
;
122 struct rnbd_io_private
*priv
;
123 struct rnbd_srv_sess_dev
*sess_dev
;
129 trace_process_rdma(srv_sess
, msg
, id
, datalen
, usrlen
);
131 priv
= kmalloc(sizeof(*priv
), GFP_KERNEL
);
135 dev_id
= le32_to_cpu(msg
->device_id
);
137 sess_dev
= rnbd_get_sess_dev(dev_id
, srv_sess
);
138 if (IS_ERR(sess_dev
)) {
139 pr_err_ratelimited("Got I/O request on session %s for unknown device id %d: %pe\n",
140 srv_sess
->sessname
, dev_id
, sess_dev
);
145 priv
->sess_dev
= sess_dev
;
148 bio
= bio_alloc(file_bdev(sess_dev
->bdev_file
), 1,
149 rnbd_to_bio_flags(le32_to_cpu(msg
->rw
)), GFP_KERNEL
);
150 if (bio_add_page(bio
, virt_to_page(data
), datalen
,
151 offset_in_page(data
)) != datalen
) {
152 rnbd_srv_err_rl(sess_dev
, "Failed to map data to bio\n");
157 bio
->bi_opf
= rnbd_to_bio_flags(le32_to_cpu(msg
->rw
));
158 if (bio_has_data(bio
) &&
159 bio
->bi_iter
.bi_size
!= le32_to_cpu(msg
->bi_size
)) {
160 rnbd_srv_err_rl(sess_dev
, "Datalen mismatch: bio bi_size (%u), bi_size (%u)\n",
161 bio
->bi_iter
.bi_size
, msg
->bi_size
);
165 bio
->bi_end_io
= rnbd_dev_bi_end_io
;
166 bio
->bi_private
= priv
;
167 bio
->bi_iter
.bi_sector
= le64_to_cpu(msg
->sector
);
168 prio
= srv_sess
->ver
< RNBD_PROTO_VER_MAJOR
||
169 usrlen
< sizeof(*msg
) ? 0 : le16_to_cpu(msg
->prio
);
170 bio_set_prio(bio
, prio
);
178 rnbd_put_sess_dev(sess_dev
);
184 static void destroy_device(struct kref
*kref
)
186 struct rnbd_srv_dev
*dev
= container_of(kref
, struct rnbd_srv_dev
, kref
);
188 WARN_ONCE(!list_empty(&dev
->sess_dev_list
),
189 "Device %s is being destroyed but still in use!\n",
192 spin_lock(&dev_lock
);
193 list_del(&dev
->list
);
194 spin_unlock(&dev_lock
);
196 mutex_destroy(&dev
->lock
);
197 if (dev
->dev_kobj
.state_in_sysfs
)
199 * Destroy kobj only if it was really created.
201 rnbd_srv_destroy_dev_sysfs(dev
);
206 static void rnbd_put_srv_dev(struct rnbd_srv_dev
*dev
)
208 kref_put(&dev
->kref
, destroy_device
);
211 void rnbd_destroy_sess_dev(struct rnbd_srv_sess_dev
*sess_dev
, bool keep_id
)
213 DECLARE_COMPLETION_ONSTACK(dc
);
216 /* free the resources for the id but don't */
217 /* allow to re-use the id itself because it */
218 /* is still used by the client */
219 xa_cmpxchg(&sess_dev
->sess
->index_idr
, sess_dev
->device_id
,
222 xa_erase(&sess_dev
->sess
->index_idr
, sess_dev
->device_id
);
225 sess_dev
->destroy_comp
= &dc
;
226 rnbd_put_sess_dev(sess_dev
);
227 wait_for_completion(&dc
); /* wait for inflights to drop to zero */
229 fput(sess_dev
->bdev_file
);
230 mutex_lock(&sess_dev
->dev
->lock
);
231 list_del(&sess_dev
->dev_list
);
232 if (!sess_dev
->readonly
)
233 sess_dev
->dev
->open_write_cnt
--;
234 mutex_unlock(&sess_dev
->dev
->lock
);
236 rnbd_put_srv_dev(sess_dev
->dev
);
238 rnbd_srv_info(sess_dev
, "Device closed\n");
242 static void destroy_sess(struct rnbd_srv_session
*srv_sess
)
244 struct rnbd_srv_sess_dev
*sess_dev
;
247 if (xa_empty(&srv_sess
->index_idr
))
250 trace_destroy_sess(srv_sess
);
252 mutex_lock(&srv_sess
->lock
);
253 xa_for_each(&srv_sess
->index_idr
, index
, sess_dev
)
254 rnbd_srv_destroy_dev_session_sysfs(sess_dev
);
255 mutex_unlock(&srv_sess
->lock
);
258 xa_destroy(&srv_sess
->index_idr
);
260 pr_info("RTRS Session %s disconnected\n", srv_sess
->sessname
);
262 mutex_lock(&sess_lock
);
263 list_del(&srv_sess
->list
);
264 mutex_unlock(&sess_lock
);
266 mutex_destroy(&srv_sess
->lock
);
270 static int create_sess(struct rtrs_srv_sess
*rtrs
)
272 struct rnbd_srv_session
*srv_sess
;
273 char pathname
[NAME_MAX
];
276 err
= rtrs_srv_get_path_name(rtrs
, pathname
, sizeof(pathname
));
278 pr_err("rtrs_srv_get_path_name(%s): %d\n", pathname
, err
);
282 srv_sess
= kzalloc(sizeof(*srv_sess
), GFP_KERNEL
);
286 srv_sess
->queue_depth
= rtrs_srv_get_queue_depth(rtrs
);
287 xa_init_flags(&srv_sess
->index_idr
, XA_FLAGS_ALLOC
);
288 mutex_init(&srv_sess
->lock
);
289 mutex_lock(&sess_lock
);
290 list_add(&srv_sess
->list
, &sess_list
);
291 mutex_unlock(&sess_lock
);
293 srv_sess
->rtrs
= rtrs
;
294 strscpy(srv_sess
->sessname
, pathname
, sizeof(srv_sess
->sessname
));
296 rtrs_srv_set_sess_priv(rtrs
, srv_sess
);
298 trace_create_sess(srv_sess
);
303 static int rnbd_srv_link_ev(struct rtrs_srv_sess
*rtrs
,
304 enum rtrs_srv_link_ev ev
, void *priv
)
306 struct rnbd_srv_session
*srv_sess
= priv
;
309 case RTRS_SRV_LINK_EV_CONNECTED
:
310 return create_sess(rtrs
);
312 case RTRS_SRV_LINK_EV_DISCONNECTED
:
313 if (WARN_ON_ONCE(!srv_sess
))
316 destroy_sess(srv_sess
);
320 pr_warn("Received unknown RTRS session event %d from session %s\n",
321 ev
, srv_sess
->sessname
);
326 void rnbd_srv_sess_dev_force_close(struct rnbd_srv_sess_dev
*sess_dev
,
327 struct kobj_attribute
*attr
)
329 struct rnbd_srv_session
*sess
= sess_dev
->sess
;
331 /* It is already started to close by client's close message. */
332 if (!mutex_trylock(&sess
->lock
))
335 sess_dev
->keep_id
= true;
336 /* first remove sysfs itself to avoid deadlock */
337 sysfs_remove_file_self(&sess_dev
->kobj
, &attr
->attr
);
338 rnbd_srv_destroy_dev_session_sysfs(sess_dev
);
339 mutex_unlock(&sess
->lock
);
342 static void process_msg_close(struct rnbd_srv_session
*srv_sess
,
343 void *data
, size_t datalen
, const void *usr
,
346 const struct rnbd_msg_close
*close_msg
= usr
;
347 struct rnbd_srv_sess_dev
*sess_dev
;
349 trace_process_msg_close(srv_sess
, close_msg
);
351 sess_dev
= rnbd_get_sess_dev(le32_to_cpu(close_msg
->device_id
),
353 if (IS_ERR(sess_dev
))
356 rnbd_put_sess_dev(sess_dev
);
357 mutex_lock(&srv_sess
->lock
);
358 rnbd_srv_destroy_dev_session_sysfs(sess_dev
);
359 mutex_unlock(&srv_sess
->lock
);
362 static int process_msg_open(struct rnbd_srv_session
*srv_sess
,
363 const void *msg
, size_t len
,
364 void *data
, size_t datalen
);
366 static void process_msg_sess_info(struct rnbd_srv_session
*srv_sess
,
367 const void *msg
, size_t len
,
368 void *data
, size_t datalen
);
370 static int rnbd_srv_rdma_ev(void *priv
, struct rtrs_srv_op
*id
,
371 void *data
, size_t datalen
,
372 const void *usr
, size_t usrlen
)
374 struct rnbd_srv_session
*srv_sess
= priv
;
375 const struct rnbd_msg_hdr
*hdr
= usr
;
379 if (WARN_ON_ONCE(!srv_sess
))
382 type
= le16_to_cpu(hdr
->type
);
386 return process_rdma(srv_sess
, id
, data
, datalen
, usr
, usrlen
);
388 process_msg_close(srv_sess
, data
, datalen
, usr
, usrlen
);
391 ret
= process_msg_open(srv_sess
, usr
, usrlen
, data
, datalen
);
393 case RNBD_MSG_SESS_INFO
:
394 process_msg_sess_info(srv_sess
, usr
, usrlen
, data
, datalen
);
397 pr_warn("Received unexpected message type %d from session %s\n",
398 type
, srv_sess
->sessname
);
403 * Since ret is passed to rtrs to handle the failure case, we
404 * just return 0 at the end otherwise callers in rtrs would call
405 * send_io_resp_imm again to print redundant err message.
407 rtrs_srv_resp_rdma(id
, ret
);
411 static struct rnbd_srv_sess_dev
412 *rnbd_sess_dev_alloc(struct rnbd_srv_session
*srv_sess
)
414 struct rnbd_srv_sess_dev
*sess_dev
;
417 sess_dev
= kzalloc(sizeof(*sess_dev
), GFP_KERNEL
);
419 return ERR_PTR(-ENOMEM
);
421 error
= xa_alloc(&srv_sess
->index_idr
, &sess_dev
->device_id
, sess_dev
,
422 xa_limit_32b
, GFP_NOWAIT
);
424 pr_warn("Allocating idr failed, err: %d\n", error
);
426 return ERR_PTR(error
);
432 static struct rnbd_srv_dev
*rnbd_srv_init_srv_dev(struct block_device
*bdev
)
434 struct rnbd_srv_dev
*dev
;
436 dev
= kzalloc(sizeof(*dev
), GFP_KERNEL
);
438 return ERR_PTR(-ENOMEM
);
440 snprintf(dev
->name
, sizeof(dev
->name
), "%pg", bdev
);
441 kref_init(&dev
->kref
);
442 INIT_LIST_HEAD(&dev
->sess_dev_list
);
443 mutex_init(&dev
->lock
);
448 static struct rnbd_srv_dev
*
449 rnbd_srv_find_or_add_srv_dev(struct rnbd_srv_dev
*new_dev
)
451 struct rnbd_srv_dev
*dev
;
453 spin_lock(&dev_lock
);
454 list_for_each_entry(dev
, &dev_list
, list
) {
455 if (!strncmp(dev
->name
, new_dev
->name
, sizeof(dev
->name
))) {
456 if (!kref_get_unless_zero(&dev
->kref
))
458 * We lost the race, device is almost dead.
459 * Continue traversing to find a valid one.
462 spin_unlock(&dev_lock
);
466 list_add(&new_dev
->list
, &dev_list
);
467 spin_unlock(&dev_lock
);
472 static int rnbd_srv_check_update_open_perm(struct rnbd_srv_dev
*srv_dev
,
473 struct rnbd_srv_session
*srv_sess
,
474 enum rnbd_access_mode access_mode
)
478 mutex_lock(&srv_dev
->lock
);
480 switch (access_mode
) {
484 if (srv_dev
->open_write_cnt
== 0) {
485 srv_dev
->open_write_cnt
++;
487 pr_err("Mapping device '%s' for session %s with RW permissions failed. Device already opened as 'RW' by %d client(s), access mode %s.\n",
488 srv_dev
->name
, srv_sess
->sessname
,
489 srv_dev
->open_write_cnt
,
490 rnbd_access_modes
[access_mode
].str
);
494 case RNBD_ACCESS_MIGRATION
:
495 if (srv_dev
->open_write_cnt
< 2) {
496 srv_dev
->open_write_cnt
++;
498 pr_err("Mapping device '%s' for session %s with migration permissions failed. Device already opened as 'RW' by %d client(s), access mode %s.\n",
499 srv_dev
->name
, srv_sess
->sessname
,
500 srv_dev
->open_write_cnt
,
501 rnbd_access_modes
[access_mode
].str
);
506 pr_err("Received mapping request for device '%s' on session %s with invalid access mode: %d\n",
507 srv_dev
->name
, srv_sess
->sessname
, access_mode
);
511 mutex_unlock(&srv_dev
->lock
);
516 static struct rnbd_srv_dev
*
517 rnbd_srv_get_or_create_srv_dev(struct block_device
*bdev
,
518 struct rnbd_srv_session
*srv_sess
,
519 enum rnbd_access_mode access_mode
)
522 struct rnbd_srv_dev
*new_dev
, *dev
;
524 new_dev
= rnbd_srv_init_srv_dev(bdev
);
528 dev
= rnbd_srv_find_or_add_srv_dev(new_dev
);
532 ret
= rnbd_srv_check_update_open_perm(dev
, srv_sess
, access_mode
);
534 rnbd_put_srv_dev(dev
);
541 static void rnbd_srv_fill_msg_open_rsp(struct rnbd_msg_open_rsp
*rsp
,
542 struct rnbd_srv_sess_dev
*sess_dev
)
544 struct block_device
*bdev
= file_bdev(sess_dev
->bdev_file
);
546 rsp
->hdr
.type
= cpu_to_le16(RNBD_MSG_OPEN_RSP
);
547 rsp
->device_id
= cpu_to_le32(sess_dev
->device_id
);
548 rsp
->nsectors
= cpu_to_le64(bdev_nr_sectors(bdev
));
549 rsp
->logical_block_size
= cpu_to_le16(bdev_logical_block_size(bdev
));
550 rsp
->physical_block_size
= cpu_to_le16(bdev_physical_block_size(bdev
));
551 rsp
->max_segments
= cpu_to_le16(bdev_max_segments(bdev
));
552 rsp
->max_hw_sectors
=
553 cpu_to_le32(queue_max_hw_sectors(bdev_get_queue(bdev
)));
554 rsp
->max_write_zeroes_sectors
=
555 cpu_to_le32(bdev_write_zeroes_sectors(bdev
));
556 rsp
->max_discard_sectors
= cpu_to_le32(bdev_max_discard_sectors(bdev
));
557 rsp
->discard_granularity
= cpu_to_le32(bdev_discard_granularity(bdev
));
558 rsp
->discard_alignment
= cpu_to_le32(bdev_discard_alignment(bdev
));
559 rsp
->secure_discard
= cpu_to_le16(bdev_max_secure_erase_sectors(bdev
));
560 rsp
->cache_policy
= 0;
561 if (bdev_write_cache(bdev
))
562 rsp
->cache_policy
|= RNBD_WRITEBACK
;
564 rsp
->cache_policy
|= RNBD_FUA
;
567 static struct rnbd_srv_sess_dev
*
568 rnbd_srv_create_set_sess_dev(struct rnbd_srv_session
*srv_sess
,
569 const struct rnbd_msg_open
*open_msg
,
570 struct file
*bdev_file
, bool readonly
,
571 struct rnbd_srv_dev
*srv_dev
)
573 struct rnbd_srv_sess_dev
*sdev
= rnbd_sess_dev_alloc(srv_sess
);
578 kref_init(&sdev
->kref
);
580 strscpy(sdev
->pathname
, open_msg
->dev_name
, sizeof(sdev
->pathname
));
582 sdev
->bdev_file
= bdev_file
;
583 sdev
->sess
= srv_sess
;
585 sdev
->readonly
= readonly
;
586 sdev
->access_mode
= open_msg
->access_mode
;
591 static char *rnbd_srv_get_full_path(struct rnbd_srv_session
*srv_sess
,
592 const char *dev_name
)
598 full_path
= kmalloc(PATH_MAX
, GFP_KERNEL
);
600 return ERR_PTR(-ENOMEM
);
603 * Replace %SESSNAME% with a real session name in order to
604 * create device namespace.
606 a
= strnstr(dev_search_path
, "%SESSNAME%", sizeof(dev_search_path
));
608 len
= a
- dev_search_path
;
610 len
= snprintf(full_path
, PATH_MAX
, "%.*s/%s/%s", len
,
611 dev_search_path
, srv_sess
->sessname
, dev_name
);
613 len
= snprintf(full_path
, PATH_MAX
, "%s/%s",
614 dev_search_path
, dev_name
);
616 if (len
>= PATH_MAX
) {
617 pr_err("Too long path: %s, %s, %s\n",
618 dev_search_path
, srv_sess
->sessname
, dev_name
);
620 return ERR_PTR(-EINVAL
);
623 /* eliminitate duplicated slashes */
624 a
= strchr(full_path
, '/');
627 if (*b
== '/' && *a
== '/') {
641 static void process_msg_sess_info(struct rnbd_srv_session
*srv_sess
,
642 const void *msg
, size_t len
,
643 void *data
, size_t datalen
)
645 const struct rnbd_msg_sess_info
*sess_info_msg
= msg
;
646 struct rnbd_msg_sess_info_rsp
*rsp
= data
;
648 srv_sess
->ver
= min_t(u8
, sess_info_msg
->ver
, RNBD_PROTO_VER_MAJOR
);
650 trace_process_msg_sess_info(srv_sess
, sess_info_msg
);
652 rsp
->hdr
.type
= cpu_to_le16(RNBD_MSG_SESS_INFO_RSP
);
653 rsp
->ver
= srv_sess
->ver
;
657 * find_srv_sess_dev() - a dev is already opened by this name
658 * @srv_sess: the session to search.
659 * @dev_name: string containing the name of the device.
661 * Return struct rnbd_srv_sess_dev if srv_sess already opened the dev_name
662 * NULL if the session didn't open the device yet.
664 static struct rnbd_srv_sess_dev
*
665 find_srv_sess_dev(struct rnbd_srv_session
*srv_sess
, const char *dev_name
)
667 struct rnbd_srv_sess_dev
*sess_dev
;
670 if (xa_empty(&srv_sess
->index_idr
))
673 xa_for_each(&srv_sess
->index_idr
, index
, sess_dev
)
674 if (!strcmp(sess_dev
->pathname
, dev_name
))
680 static int process_msg_open(struct rnbd_srv_session
*srv_sess
,
681 const void *msg
, size_t len
,
682 void *data
, size_t datalen
)
685 struct rnbd_srv_dev
*srv_dev
;
686 struct rnbd_srv_sess_dev
*srv_sess_dev
;
687 const struct rnbd_msg_open
*open_msg
= msg
;
688 struct file
*bdev_file
;
689 blk_mode_t open_flags
= BLK_OPEN_READ
;
691 struct rnbd_msg_open_rsp
*rsp
= data
;
693 trace_process_msg_open(srv_sess
, open_msg
);
695 if (open_msg
->access_mode
!= RNBD_ACCESS_RO
)
696 open_flags
|= BLK_OPEN_WRITE
;
698 mutex_lock(&srv_sess
->lock
);
700 srv_sess_dev
= find_srv_sess_dev(srv_sess
, open_msg
->dev_name
);
704 if ((strlen(dev_search_path
) + strlen(open_msg
->dev_name
))
706 pr_err("Opening device for session %s failed, device path too long. '%s/%s' is longer than PATH_MAX (%d)\n",
707 srv_sess
->sessname
, dev_search_path
, open_msg
->dev_name
,
712 if (strstr(open_msg
->dev_name
, "..")) {
713 pr_err("Opening device for session %s failed, device path %s contains relative path ..\n",
714 srv_sess
->sessname
, open_msg
->dev_name
);
718 full_path
= rnbd_srv_get_full_path(srv_sess
, open_msg
->dev_name
);
719 if (IS_ERR(full_path
)) {
720 ret
= PTR_ERR(full_path
);
721 pr_err("Opening device '%s' for client %s failed, failed to get device full path, err: %pe\n",
722 open_msg
->dev_name
, srv_sess
->sessname
, full_path
);
726 bdev_file
= bdev_file_open_by_path(full_path
, open_flags
, NULL
, NULL
);
727 if (IS_ERR(bdev_file
)) {
728 ret
= PTR_ERR(bdev_file
);
729 pr_err("Opening device '%s' on session %s failed, failed to open the block device, err: %pe\n",
730 full_path
, srv_sess
->sessname
, bdev_file
);
734 srv_dev
= rnbd_srv_get_or_create_srv_dev(file_bdev(bdev_file
), srv_sess
,
735 open_msg
->access_mode
);
736 if (IS_ERR(srv_dev
)) {
737 pr_err("Opening device '%s' on session %s failed, creating srv_dev failed, err: %pe\n",
738 full_path
, srv_sess
->sessname
, srv_dev
);
739 ret
= PTR_ERR(srv_dev
);
743 srv_sess_dev
= rnbd_srv_create_set_sess_dev(srv_sess
, open_msg
,
745 open_msg
->access_mode
== RNBD_ACCESS_RO
,
747 if (IS_ERR(srv_sess_dev
)) {
748 pr_err("Opening device '%s' on session %s failed, creating sess_dev failed, err: %pe\n",
749 full_path
, srv_sess
->sessname
, srv_sess_dev
);
750 ret
= PTR_ERR(srv_sess_dev
);
754 /* Create the srv_dev sysfs files if they haven't been created yet. The
755 * reason to delay the creation is not to create the sysfs files before
756 * we are sure the device can be opened.
758 mutex_lock(&srv_dev
->lock
);
759 if (!srv_dev
->dev_kobj
.state_in_sysfs
) {
760 ret
= rnbd_srv_create_dev_sysfs(srv_dev
, file_bdev(bdev_file
));
762 mutex_unlock(&srv_dev
->lock
);
763 rnbd_srv_err(srv_sess_dev
,
764 "Opening device failed, failed to create device sysfs files, err: %d\n",
766 goto free_srv_sess_dev
;
770 ret
= rnbd_srv_create_dev_session_sysfs(srv_sess_dev
);
772 mutex_unlock(&srv_dev
->lock
);
773 rnbd_srv_err(srv_sess_dev
,
774 "Opening device failed, failed to create dev client sysfs files, err: %d\n",
776 goto free_srv_sess_dev
;
779 list_add(&srv_sess_dev
->dev_list
, &srv_dev
->sess_dev_list
);
780 mutex_unlock(&srv_dev
->lock
);
782 rnbd_srv_info(srv_sess_dev
, "Opened device '%s'\n", srv_dev
->name
);
787 rnbd_srv_fill_msg_open_rsp(rsp
, srv_sess_dev
);
788 mutex_unlock(&srv_sess
->lock
);
792 xa_erase(&srv_sess
->index_idr
, srv_sess_dev
->device_id
);
796 if (open_msg
->access_mode
!= RNBD_ACCESS_RO
) {
797 mutex_lock(&srv_dev
->lock
);
798 srv_dev
->open_write_cnt
--;
799 mutex_unlock(&srv_dev
->lock
);
801 rnbd_put_srv_dev(srv_dev
);
807 mutex_unlock(&srv_sess
->lock
);
811 static struct rtrs_srv_ctx
*rtrs_ctx
;
813 static struct rtrs_srv_ops rtrs_ops
;
814 static int __init
rnbd_srv_init_module(void)
818 BUILD_BUG_ON(sizeof(struct rnbd_msg_hdr
) != 4);
819 BUILD_BUG_ON(sizeof(struct rnbd_msg_sess_info
) != 36);
820 BUILD_BUG_ON(sizeof(struct rnbd_msg_sess_info_rsp
) != 36);
821 BUILD_BUG_ON(sizeof(struct rnbd_msg_open
) != 264);
822 BUILD_BUG_ON(sizeof(struct rnbd_msg_close
) != 8);
823 BUILD_BUG_ON(sizeof(struct rnbd_msg_open_rsp
) != 56);
824 rtrs_ops
= (struct rtrs_srv_ops
) {
825 .rdma_ev
= rnbd_srv_rdma_ev
,
826 .link_ev
= rnbd_srv_link_ev
,
828 rtrs_ctx
= rtrs_srv_open(&rtrs_ops
, port_nr
);
829 if (IS_ERR(rtrs_ctx
)) {
830 pr_err("rtrs_srv_open(), err: %pe\n", rtrs_ctx
);
831 return PTR_ERR(rtrs_ctx
);
834 err
= rnbd_srv_create_sysfs_files();
836 pr_err("rnbd_srv_create_sysfs_files(), err: %d\n", err
);
837 rtrs_srv_close(rtrs_ctx
);
843 static void __exit
rnbd_srv_cleanup_module(void)
845 rtrs_srv_close(rtrs_ctx
);
846 WARN_ON(!list_empty(&sess_list
));
847 rnbd_srv_destroy_sysfs_files();
850 module_init(rnbd_srv_init_module
);
851 module_exit(rnbd_srv_cleanup_module
);