3 rbd.c -- Export ceph rados objects as a Linux block device
6 based on drivers/block/osdblk.c:
8 Copyright 2009 Red Hat, Inc.
10 This program is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation.
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with this program; see the file COPYING. If not, write to
21 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
25 For usage instructions, please refer to:
27 Documentation/ABI/testing/sysfs-bus-rbd
31 #include <linux/ceph/libceph.h>
32 #include <linux/ceph/osd_client.h>
33 #include <linux/ceph/mon_client.h>
34 #include <linux/ceph/cls_lock_client.h>
35 #include <linux/ceph/striper.h>
36 #include <linux/ceph/decode.h>
37 #include <linux/parser.h>
38 #include <linux/bsearch.h>
40 #include <linux/kernel.h>
41 #include <linux/device.h>
42 #include <linux/module.h>
43 #include <linux/blk-mq.h>
45 #include <linux/blkdev.h>
46 #include <linux/slab.h>
47 #include <linux/idr.h>
48 #include <linux/workqueue.h>
50 #include "rbd_types.h"
52 #define RBD_DEBUG /* Activate rbd_assert() calls */
55 * Increment the given counter and return its updated value.
56 * If the counter is already 0 it will not be incremented.
57 * If the counter is already at its maximum value returns
58 * -EINVAL without updating it.
60 static int atomic_inc_return_safe(atomic_t
*v
)
64 counter
= (unsigned int)atomic_fetch_add_unless(v
, 1, 0);
65 if (counter
<= (unsigned int)INT_MAX
)
73 /* Decrement the counter. Return the resulting value, or -EINVAL */
74 static int atomic_dec_return_safe(atomic_t
*v
)
78 counter
= atomic_dec_return(v
);
87 #define RBD_DRV_NAME "rbd"
89 #define RBD_MINORS_PER_MAJOR 256
90 #define RBD_SINGLE_MAJOR_PART_SHIFT 4
92 #define RBD_MAX_PARENT_CHAIN_LEN 16
94 #define RBD_SNAP_DEV_NAME_PREFIX "snap_"
95 #define RBD_MAX_SNAP_NAME_LEN \
96 (NAME_MAX - (sizeof (RBD_SNAP_DEV_NAME_PREFIX) - 1))
98 #define RBD_MAX_SNAP_COUNT 510 /* allows max snapc to fit in 4KB */
100 #define RBD_SNAP_HEAD_NAME "-"
102 #define BAD_SNAP_INDEX U32_MAX /* invalid index into snap array */
104 /* This allows a single page to hold an image name sent by OSD */
105 #define RBD_IMAGE_NAME_LEN_MAX (PAGE_SIZE - sizeof (__le32) - 1)
106 #define RBD_IMAGE_ID_LEN_MAX 64
108 #define RBD_OBJ_PREFIX_LEN_MAX 64
110 #define RBD_NOTIFY_TIMEOUT 5 /* seconds */
111 #define RBD_RETRY_DELAY msecs_to_jiffies(1000)
115 #define RBD_FEATURE_LAYERING (1ULL<<0)
116 #define RBD_FEATURE_STRIPINGV2 (1ULL<<1)
117 #define RBD_FEATURE_EXCLUSIVE_LOCK (1ULL<<2)
118 #define RBD_FEATURE_DEEP_FLATTEN (1ULL<<5)
119 #define RBD_FEATURE_DATA_POOL (1ULL<<7)
120 #define RBD_FEATURE_OPERATIONS (1ULL<<8)
122 #define RBD_FEATURES_ALL (RBD_FEATURE_LAYERING | \
123 RBD_FEATURE_STRIPINGV2 | \
124 RBD_FEATURE_EXCLUSIVE_LOCK | \
125 RBD_FEATURE_DEEP_FLATTEN | \
126 RBD_FEATURE_DATA_POOL | \
127 RBD_FEATURE_OPERATIONS)
129 /* Features supported by this (client software) implementation. */
131 #define RBD_FEATURES_SUPPORTED (RBD_FEATURES_ALL)
134 * An RBD device name will be "rbd#", where the "rbd" comes from
135 * RBD_DRV_NAME above, and # is a unique integer identifier.
137 #define DEV_NAME_LEN 32
140 * block device image metadata (in-memory version)
142 struct rbd_image_header
{
143 /* These six fields never change for a given rbd image */
149 u64 features
; /* Might be changeable someday? */
151 /* The remaining fields need to be updated occasionally */
153 struct ceph_snap_context
*snapc
;
154 char *snap_names
; /* format 1 only */
155 u64
*snap_sizes
; /* format 1 only */
159 * An rbd image specification.
161 * The tuple (pool_id, image_id, snap_id) is sufficient to uniquely
162 * identify an image. Each rbd_dev structure includes a pointer to
163 * an rbd_spec structure that encapsulates this identity.
165 * Each of the id's in an rbd_spec has an associated name. For a
166 * user-mapped image, the names are supplied and the id's associated
167 * with them are looked up. For a layered image, a parent image is
168 * defined by the tuple, and the names are looked up.
170 * An rbd_dev structure contains a parent_spec pointer which is
171 * non-null if the image it represents is a child in a layered
172 * image. This pointer will refer to the rbd_spec structure used
173 * by the parent rbd_dev for its own identity (i.e., the structure
174 * is shared between the parent and child).
176 * Since these structures are populated once, during the discovery
177 * phase of image construction, they are effectively immutable so
178 * we make no effort to synchronize access to them.
180 * Note that code herein does not assume the image name is known (it
181 * could be a null pointer).
185 const char *pool_name
;
186 const char *pool_ns
; /* NULL if default, never "" */
188 const char *image_id
;
189 const char *image_name
;
192 const char *snap_name
;
198 * an instance of the client. multiple devices may share an rbd client.
201 struct ceph_client
*client
;
203 struct list_head node
;
206 struct rbd_img_request
;
208 enum obj_request_type
{
209 OBJ_REQUEST_NODATA
= 1,
210 OBJ_REQUEST_BIO
, /* pointer into provided bio (list) */
211 OBJ_REQUEST_BVECS
, /* pointer into provided bio_vec array */
212 OBJ_REQUEST_OWN_BVECS
, /* private bio_vec array, doesn't own pages */
215 enum obj_operation_type
{
223 * Writes go through the following state machine to deal with
226 * . . . . . RBD_OBJ_WRITE_GUARD. . . . . . . . . . . . . .
229 * . RBD_OBJ_WRITE_READ_FROM_PARENT. . . .
231 * . v v (deep-copyup .
232 * (image . RBD_OBJ_WRITE_COPYUP_EMPTY_SNAPC . not needed) .
235 * . . . .RBD_OBJ_WRITE_COPYUP_OPS. . . . . (copyup .
238 * done . . . . . . . . . . . . . . . . . .
243 * Writes start in RBD_OBJ_WRITE_GUARD or _FLAT, depending on whether
244 * assert_exists guard is needed or not (in some cases it's not needed
245 * even if there is a parent).
247 enum rbd_obj_write_state
{
248 RBD_OBJ_WRITE_FLAT
= 1,
250 RBD_OBJ_WRITE_READ_FROM_PARENT
,
251 RBD_OBJ_WRITE_COPYUP_EMPTY_SNAPC
,
252 RBD_OBJ_WRITE_COPYUP_OPS
,
255 struct rbd_obj_request
{
256 struct ceph_object_extent ex
;
258 bool tried_parent
; /* for reads */
259 enum rbd_obj_write_state write_state
; /* for writes */
262 struct rbd_img_request
*img_request
;
263 struct ceph_file_extent
*img_extents
;
267 struct ceph_bio_iter bio_pos
;
269 struct ceph_bvec_iter bvec_pos
;
274 struct bio_vec
*copyup_bvecs
;
275 u32 copyup_bvec_count
;
277 struct ceph_osd_request
*osd_req
;
279 u64 xferred
; /* bytes transferred */
286 IMG_REQ_CHILD
, /* initiator: block = 0, child image = 1 */
287 IMG_REQ_LAYERED
, /* ENOENT handling: normal = 0, layered = 1 */
290 struct rbd_img_request
{
291 struct rbd_device
*rbd_dev
;
292 enum obj_operation_type op_type
;
293 enum obj_request_type data_type
;
296 u64 snap_id
; /* for reads */
297 struct ceph_snap_context
*snapc
; /* for writes */
300 struct request
*rq
; /* block request */
301 struct rbd_obj_request
*obj_request
; /* obj req initiator */
303 spinlock_t completion_lock
;
304 u64 xferred
;/* aggregate bytes transferred */
305 int result
; /* first nonzero obj_request result */
307 struct list_head object_extents
; /* obj_req.ex structs */
313 #define for_each_obj_request(ireq, oreq) \
314 list_for_each_entry(oreq, &(ireq)->object_extents, ex.oe_item)
315 #define for_each_obj_request_safe(ireq, oreq, n) \
316 list_for_each_entry_safe(oreq, n, &(ireq)->object_extents, ex.oe_item)
318 enum rbd_watch_state
{
319 RBD_WATCH_STATE_UNREGISTERED
,
320 RBD_WATCH_STATE_REGISTERED
,
321 RBD_WATCH_STATE_ERROR
,
324 enum rbd_lock_state
{
325 RBD_LOCK_STATE_UNLOCKED
,
326 RBD_LOCK_STATE_LOCKED
,
327 RBD_LOCK_STATE_RELEASING
,
330 /* WatchNotify::ClientId */
331 struct rbd_client_id
{
345 int dev_id
; /* blkdev unique id */
347 int major
; /* blkdev assigned major */
349 struct gendisk
*disk
; /* blkdev's gendisk and rq */
351 u32 image_format
; /* Either 1 or 2 */
352 struct rbd_client
*rbd_client
;
354 char name
[DEV_NAME_LEN
]; /* blkdev name, e.g. rbd3 */
356 spinlock_t lock
; /* queue, flags, open_count */
358 struct rbd_image_header header
;
359 unsigned long flags
; /* possibly lock protected */
360 struct rbd_spec
*spec
;
361 struct rbd_options
*opts
;
362 char *config_info
; /* add{,_single_major} string */
364 struct ceph_object_id header_oid
;
365 struct ceph_object_locator header_oloc
;
367 struct ceph_file_layout layout
; /* used for all rbd requests */
369 struct mutex watch_mutex
;
370 enum rbd_watch_state watch_state
;
371 struct ceph_osd_linger_request
*watch_handle
;
373 struct delayed_work watch_dwork
;
375 struct rw_semaphore lock_rwsem
;
376 enum rbd_lock_state lock_state
;
377 char lock_cookie
[32];
378 struct rbd_client_id owner_cid
;
379 struct work_struct acquired_lock_work
;
380 struct work_struct released_lock_work
;
381 struct delayed_work lock_dwork
;
382 struct work_struct unlock_work
;
383 wait_queue_head_t lock_waitq
;
385 struct workqueue_struct
*task_wq
;
387 struct rbd_spec
*parent_spec
;
390 struct rbd_device
*parent
;
392 /* Block layer tags. */
393 struct blk_mq_tag_set tag_set
;
395 /* protects updating the header */
396 struct rw_semaphore header_rwsem
;
398 struct rbd_mapping mapping
;
400 struct list_head node
;
404 unsigned long open_count
; /* protected by lock */
408 * Flag bits for rbd_dev->flags:
409 * - REMOVING (which is coupled with rbd_dev->open_count) is protected
411 * - BLACKLISTED is protected by rbd_dev->lock_rwsem
414 RBD_DEV_FLAG_EXISTS
, /* mapped snapshot has not been deleted */
415 RBD_DEV_FLAG_REMOVING
, /* this mapping is being removed */
416 RBD_DEV_FLAG_BLACKLISTED
, /* our ceph_client is blacklisted */
419 static DEFINE_MUTEX(client_mutex
); /* Serialize client creation */
421 static LIST_HEAD(rbd_dev_list
); /* devices */
422 static DEFINE_SPINLOCK(rbd_dev_list_lock
);
424 static LIST_HEAD(rbd_client_list
); /* clients */
425 static DEFINE_SPINLOCK(rbd_client_list_lock
);
427 /* Slab caches for frequently-allocated structures */
429 static struct kmem_cache
*rbd_img_request_cache
;
430 static struct kmem_cache
*rbd_obj_request_cache
;
432 static int rbd_major
;
433 static DEFINE_IDA(rbd_dev_id_ida
);
435 static struct workqueue_struct
*rbd_wq
;
437 static struct ceph_snap_context rbd_empty_snapc
= {
438 .nref
= REFCOUNT_INIT(1),
442 * single-major requires >= 0.75 version of userspace rbd utility.
444 static bool single_major
= true;
445 module_param(single_major
, bool, 0444);
446 MODULE_PARM_DESC(single_major
, "Use a single major number for all rbd devices (default: true)");
448 static ssize_t
add_store(struct bus_type
*bus
, const char *buf
, size_t count
);
449 static ssize_t
remove_store(struct bus_type
*bus
, const char *buf
,
451 static ssize_t
add_single_major_store(struct bus_type
*bus
, const char *buf
,
453 static ssize_t
remove_single_major_store(struct bus_type
*bus
, const char *buf
,
455 static int rbd_dev_image_probe(struct rbd_device
*rbd_dev
, int depth
);
457 static int rbd_dev_id_to_minor(int dev_id
)
459 return dev_id
<< RBD_SINGLE_MAJOR_PART_SHIFT
;
462 static int minor_to_rbd_dev_id(int minor
)
464 return minor
>> RBD_SINGLE_MAJOR_PART_SHIFT
;
467 static bool __rbd_is_lock_owner(struct rbd_device
*rbd_dev
)
469 return rbd_dev
->lock_state
== RBD_LOCK_STATE_LOCKED
||
470 rbd_dev
->lock_state
== RBD_LOCK_STATE_RELEASING
;
473 static bool rbd_is_lock_owner(struct rbd_device
*rbd_dev
)
477 down_read(&rbd_dev
->lock_rwsem
);
478 is_lock_owner
= __rbd_is_lock_owner(rbd_dev
);
479 up_read(&rbd_dev
->lock_rwsem
);
480 return is_lock_owner
;
483 static ssize_t
supported_features_show(struct bus_type
*bus
, char *buf
)
485 return sprintf(buf
, "0x%llx\n", RBD_FEATURES_SUPPORTED
);
488 static BUS_ATTR_WO(add
);
489 static BUS_ATTR_WO(remove
);
490 static BUS_ATTR_WO(add_single_major
);
491 static BUS_ATTR_WO(remove_single_major
);
492 static BUS_ATTR_RO(supported_features
);
494 static struct attribute
*rbd_bus_attrs
[] = {
496 &bus_attr_remove
.attr
,
497 &bus_attr_add_single_major
.attr
,
498 &bus_attr_remove_single_major
.attr
,
499 &bus_attr_supported_features
.attr
,
503 static umode_t
rbd_bus_is_visible(struct kobject
*kobj
,
504 struct attribute
*attr
, int index
)
507 (attr
== &bus_attr_add_single_major
.attr
||
508 attr
== &bus_attr_remove_single_major
.attr
))
514 static const struct attribute_group rbd_bus_group
= {
515 .attrs
= rbd_bus_attrs
,
516 .is_visible
= rbd_bus_is_visible
,
518 __ATTRIBUTE_GROUPS(rbd_bus
);
520 static struct bus_type rbd_bus_type
= {
522 .bus_groups
= rbd_bus_groups
,
525 static void rbd_root_dev_release(struct device
*dev
)
529 static struct device rbd_root_dev
= {
531 .release
= rbd_root_dev_release
,
534 static __printf(2, 3)
535 void rbd_warn(struct rbd_device
*rbd_dev
, const char *fmt
, ...)
537 struct va_format vaf
;
545 printk(KERN_WARNING
"%s: %pV\n", RBD_DRV_NAME
, &vaf
);
546 else if (rbd_dev
->disk
)
547 printk(KERN_WARNING
"%s: %s: %pV\n",
548 RBD_DRV_NAME
, rbd_dev
->disk
->disk_name
, &vaf
);
549 else if (rbd_dev
->spec
&& rbd_dev
->spec
->image_name
)
550 printk(KERN_WARNING
"%s: image %s: %pV\n",
551 RBD_DRV_NAME
, rbd_dev
->spec
->image_name
, &vaf
);
552 else if (rbd_dev
->spec
&& rbd_dev
->spec
->image_id
)
553 printk(KERN_WARNING
"%s: id %s: %pV\n",
554 RBD_DRV_NAME
, rbd_dev
->spec
->image_id
, &vaf
);
556 printk(KERN_WARNING
"%s: rbd_dev %p: %pV\n",
557 RBD_DRV_NAME
, rbd_dev
, &vaf
);
562 #define rbd_assert(expr) \
563 if (unlikely(!(expr))) { \
564 printk(KERN_ERR "\nAssertion failure in %s() " \
566 "\trbd_assert(%s);\n\n", \
567 __func__, __LINE__, #expr); \
570 #else /* !RBD_DEBUG */
571 # define rbd_assert(expr) ((void) 0)
572 #endif /* !RBD_DEBUG */
574 static void rbd_dev_remove_parent(struct rbd_device
*rbd_dev
);
576 static int rbd_dev_refresh(struct rbd_device
*rbd_dev
);
577 static int rbd_dev_v2_header_onetime(struct rbd_device
*rbd_dev
);
578 static int rbd_dev_header_info(struct rbd_device
*rbd_dev
);
579 static int rbd_dev_v2_parent_info(struct rbd_device
*rbd_dev
);
580 static const char *rbd_dev_v2_snap_name(struct rbd_device
*rbd_dev
,
582 static int _rbd_dev_v2_snap_size(struct rbd_device
*rbd_dev
, u64 snap_id
,
583 u8
*order
, u64
*snap_size
);
584 static int _rbd_dev_v2_snap_features(struct rbd_device
*rbd_dev
, u64 snap_id
,
587 static int rbd_open(struct block_device
*bdev
, fmode_t mode
)
589 struct rbd_device
*rbd_dev
= bdev
->bd_disk
->private_data
;
590 bool removing
= false;
592 spin_lock_irq(&rbd_dev
->lock
);
593 if (test_bit(RBD_DEV_FLAG_REMOVING
, &rbd_dev
->flags
))
596 rbd_dev
->open_count
++;
597 spin_unlock_irq(&rbd_dev
->lock
);
601 (void) get_device(&rbd_dev
->dev
);
606 static void rbd_release(struct gendisk
*disk
, fmode_t mode
)
608 struct rbd_device
*rbd_dev
= disk
->private_data
;
609 unsigned long open_count_before
;
611 spin_lock_irq(&rbd_dev
->lock
);
612 open_count_before
= rbd_dev
->open_count
--;
613 spin_unlock_irq(&rbd_dev
->lock
);
614 rbd_assert(open_count_before
> 0);
616 put_device(&rbd_dev
->dev
);
619 static int rbd_ioctl_set_ro(struct rbd_device
*rbd_dev
, unsigned long arg
)
623 if (get_user(ro
, (int __user
*)arg
))
626 /* Snapshots can't be marked read-write */
627 if (rbd_dev
->spec
->snap_id
!= CEPH_NOSNAP
&& !ro
)
630 /* Let blkdev_roset() handle it */
634 static int rbd_ioctl(struct block_device
*bdev
, fmode_t mode
,
635 unsigned int cmd
, unsigned long arg
)
637 struct rbd_device
*rbd_dev
= bdev
->bd_disk
->private_data
;
642 ret
= rbd_ioctl_set_ro(rbd_dev
, arg
);
652 static int rbd_compat_ioctl(struct block_device
*bdev
, fmode_t mode
,
653 unsigned int cmd
, unsigned long arg
)
655 return rbd_ioctl(bdev
, mode
, cmd
, arg
);
657 #endif /* CONFIG_COMPAT */
659 static const struct block_device_operations rbd_bd_ops
= {
660 .owner
= THIS_MODULE
,
662 .release
= rbd_release
,
665 .compat_ioctl
= rbd_compat_ioctl
,
670 * Initialize an rbd client instance. Success or not, this function
671 * consumes ceph_opts. Caller holds client_mutex.
673 static struct rbd_client
*rbd_client_create(struct ceph_options
*ceph_opts
)
675 struct rbd_client
*rbdc
;
678 dout("%s:\n", __func__
);
679 rbdc
= kmalloc(sizeof(struct rbd_client
), GFP_KERNEL
);
683 kref_init(&rbdc
->kref
);
684 INIT_LIST_HEAD(&rbdc
->node
);
686 rbdc
->client
= ceph_create_client(ceph_opts
, rbdc
);
687 if (IS_ERR(rbdc
->client
))
689 ceph_opts
= NULL
; /* Now rbdc->client is responsible for ceph_opts */
691 ret
= ceph_open_session(rbdc
->client
);
695 spin_lock(&rbd_client_list_lock
);
696 list_add_tail(&rbdc
->node
, &rbd_client_list
);
697 spin_unlock(&rbd_client_list_lock
);
699 dout("%s: rbdc %p\n", __func__
, rbdc
);
703 ceph_destroy_client(rbdc
->client
);
708 ceph_destroy_options(ceph_opts
);
709 dout("%s: error %d\n", __func__
, ret
);
714 static struct rbd_client
*__rbd_get_client(struct rbd_client
*rbdc
)
716 kref_get(&rbdc
->kref
);
722 * Find a ceph client with specific addr and configuration. If
723 * found, bump its reference count.
725 static struct rbd_client
*rbd_client_find(struct ceph_options
*ceph_opts
)
727 struct rbd_client
*client_node
;
730 if (ceph_opts
->flags
& CEPH_OPT_NOSHARE
)
733 spin_lock(&rbd_client_list_lock
);
734 list_for_each_entry(client_node
, &rbd_client_list
, node
) {
735 if (!ceph_compare_options(ceph_opts
, client_node
->client
)) {
736 __rbd_get_client(client_node
);
742 spin_unlock(&rbd_client_list_lock
);
744 return found
? client_node
: NULL
;
748 * (Per device) rbd map options
758 /* string args above */
767 static match_table_t rbd_opts_tokens
= {
768 {Opt_queue_depth
, "queue_depth=%d"},
769 {Opt_alloc_size
, "alloc_size=%d"},
770 {Opt_lock_timeout
, "lock_timeout=%d"},
772 {Opt_pool_ns
, "_pool_ns=%s"},
773 /* string args above */
774 {Opt_read_only
, "read_only"},
775 {Opt_read_only
, "ro"}, /* Alternate spelling */
776 {Opt_read_write
, "read_write"},
777 {Opt_read_write
, "rw"}, /* Alternate spelling */
778 {Opt_lock_on_read
, "lock_on_read"},
779 {Opt_exclusive
, "exclusive"},
780 {Opt_notrim
, "notrim"},
787 unsigned long lock_timeout
;
794 #define RBD_QUEUE_DEPTH_DEFAULT BLKDEV_MAX_RQ
795 #define RBD_ALLOC_SIZE_DEFAULT (64 * 1024)
796 #define RBD_LOCK_TIMEOUT_DEFAULT 0 /* no timeout */
797 #define RBD_READ_ONLY_DEFAULT false
798 #define RBD_LOCK_ON_READ_DEFAULT false
799 #define RBD_EXCLUSIVE_DEFAULT false
800 #define RBD_TRIM_DEFAULT true
802 struct parse_rbd_opts_ctx
{
803 struct rbd_spec
*spec
;
804 struct rbd_options
*opts
;
807 static int parse_rbd_opts_token(char *c
, void *private)
809 struct parse_rbd_opts_ctx
*pctx
= private;
810 substring_t argstr
[MAX_OPT_ARGS
];
811 int token
, intval
, ret
;
813 token
= match_token(c
, rbd_opts_tokens
, argstr
);
814 if (token
< Opt_last_int
) {
815 ret
= match_int(&argstr
[0], &intval
);
817 pr_err("bad option arg (not int) at '%s'\n", c
);
820 dout("got int token %d val %d\n", token
, intval
);
821 } else if (token
> Opt_last_int
&& token
< Opt_last_string
) {
822 dout("got string token %d val %s\n", token
, argstr
[0].from
);
824 dout("got token %d\n", token
);
828 case Opt_queue_depth
:
830 pr_err("queue_depth out of range\n");
833 pctx
->opts
->queue_depth
= intval
;
837 pr_err("alloc_size out of range\n");
840 if (!is_power_of_2(intval
)) {
841 pr_err("alloc_size must be a power of 2\n");
844 pctx
->opts
->alloc_size
= intval
;
846 case Opt_lock_timeout
:
847 /* 0 is "wait forever" (i.e. infinite timeout) */
848 if (intval
< 0 || intval
> INT_MAX
/ 1000) {
849 pr_err("lock_timeout out of range\n");
852 pctx
->opts
->lock_timeout
= msecs_to_jiffies(intval
* 1000);
855 kfree(pctx
->spec
->pool_ns
);
856 pctx
->spec
->pool_ns
= match_strdup(argstr
);
857 if (!pctx
->spec
->pool_ns
)
861 pctx
->opts
->read_only
= true;
864 pctx
->opts
->read_only
= false;
866 case Opt_lock_on_read
:
867 pctx
->opts
->lock_on_read
= true;
870 pctx
->opts
->exclusive
= true;
873 pctx
->opts
->trim
= false;
876 /* libceph prints "bad option" msg */
883 static char* obj_op_name(enum obj_operation_type op_type
)
900 * Destroy ceph client
902 * Caller must hold rbd_client_list_lock.
904 static void rbd_client_release(struct kref
*kref
)
906 struct rbd_client
*rbdc
= container_of(kref
, struct rbd_client
, kref
);
908 dout("%s: rbdc %p\n", __func__
, rbdc
);
909 spin_lock(&rbd_client_list_lock
);
910 list_del(&rbdc
->node
);
911 spin_unlock(&rbd_client_list_lock
);
913 ceph_destroy_client(rbdc
->client
);
918 * Drop reference to ceph client node. If it's not referenced anymore, release
921 static void rbd_put_client(struct rbd_client
*rbdc
)
924 kref_put(&rbdc
->kref
, rbd_client_release
);
927 static int wait_for_latest_osdmap(struct ceph_client
*client
)
932 ret
= ceph_monc_get_version(&client
->monc
, "osdmap", &newest_epoch
);
936 if (client
->osdc
.osdmap
->epoch
>= newest_epoch
)
939 ceph_osdc_maybe_request_map(&client
->osdc
);
940 return ceph_monc_wait_osdmap(&client
->monc
, newest_epoch
,
941 client
->options
->mount_timeout
);
945 * Get a ceph client with specific addr and configuration, if one does
946 * not exist create it. Either way, ceph_opts is consumed by this
949 static struct rbd_client
*rbd_get_client(struct ceph_options
*ceph_opts
)
951 struct rbd_client
*rbdc
;
954 mutex_lock_nested(&client_mutex
, SINGLE_DEPTH_NESTING
);
955 rbdc
= rbd_client_find(ceph_opts
);
957 ceph_destroy_options(ceph_opts
);
960 * Using an existing client. Make sure ->pg_pools is up to
961 * date before we look up the pool id in do_rbd_add().
963 ret
= wait_for_latest_osdmap(rbdc
->client
);
965 rbd_warn(NULL
, "failed to get latest osdmap: %d", ret
);
966 rbd_put_client(rbdc
);
970 rbdc
= rbd_client_create(ceph_opts
);
972 mutex_unlock(&client_mutex
);
977 static bool rbd_image_format_valid(u32 image_format
)
979 return image_format
== 1 || image_format
== 2;
982 static bool rbd_dev_ondisk_valid(struct rbd_image_header_ondisk
*ondisk
)
987 /* The header has to start with the magic rbd header text */
988 if (memcmp(&ondisk
->text
, RBD_HEADER_TEXT
, sizeof (RBD_HEADER_TEXT
)))
991 /* The bio layer requires at least sector-sized I/O */
993 if (ondisk
->options
.order
< SECTOR_SHIFT
)
996 /* If we use u64 in a few spots we may be able to loosen this */
998 if (ondisk
->options
.order
> 8 * sizeof (int) - 1)
1002 * The size of a snapshot header has to fit in a size_t, and
1003 * that limits the number of snapshots.
1005 snap_count
= le32_to_cpu(ondisk
->snap_count
);
1006 size
= SIZE_MAX
- sizeof (struct ceph_snap_context
);
1007 if (snap_count
> size
/ sizeof (__le64
))
1011 * Not only that, but the size of the entire the snapshot
1012 * header must also be representable in a size_t.
1014 size
-= snap_count
* sizeof (__le64
);
1015 if ((u64
) size
< le64_to_cpu(ondisk
->snap_names_len
))
1022 * returns the size of an object in the image
1024 static u32
rbd_obj_bytes(struct rbd_image_header
*header
)
1026 return 1U << header
->obj_order
;
1029 static void rbd_init_layout(struct rbd_device
*rbd_dev
)
1031 if (rbd_dev
->header
.stripe_unit
== 0 ||
1032 rbd_dev
->header
.stripe_count
== 0) {
1033 rbd_dev
->header
.stripe_unit
= rbd_obj_bytes(&rbd_dev
->header
);
1034 rbd_dev
->header
.stripe_count
= 1;
1037 rbd_dev
->layout
.stripe_unit
= rbd_dev
->header
.stripe_unit
;
1038 rbd_dev
->layout
.stripe_count
= rbd_dev
->header
.stripe_count
;
1039 rbd_dev
->layout
.object_size
= rbd_obj_bytes(&rbd_dev
->header
);
1040 rbd_dev
->layout
.pool_id
= rbd_dev
->header
.data_pool_id
== CEPH_NOPOOL
?
1041 rbd_dev
->spec
->pool_id
: rbd_dev
->header
.data_pool_id
;
1042 RCU_INIT_POINTER(rbd_dev
->layout
.pool_ns
, NULL
);
1046 * Fill an rbd image header with information from the given format 1
1049 static int rbd_header_from_disk(struct rbd_device
*rbd_dev
,
1050 struct rbd_image_header_ondisk
*ondisk
)
1052 struct rbd_image_header
*header
= &rbd_dev
->header
;
1053 bool first_time
= header
->object_prefix
== NULL
;
1054 struct ceph_snap_context
*snapc
;
1055 char *object_prefix
= NULL
;
1056 char *snap_names
= NULL
;
1057 u64
*snap_sizes
= NULL
;
1062 /* Allocate this now to avoid having to handle failure below */
1065 object_prefix
= kstrndup(ondisk
->object_prefix
,
1066 sizeof(ondisk
->object_prefix
),
1072 /* Allocate the snapshot context and fill it in */
1074 snap_count
= le32_to_cpu(ondisk
->snap_count
);
1075 snapc
= ceph_create_snap_context(snap_count
, GFP_KERNEL
);
1078 snapc
->seq
= le64_to_cpu(ondisk
->snap_seq
);
1080 struct rbd_image_snap_ondisk
*snaps
;
1081 u64 snap_names_len
= le64_to_cpu(ondisk
->snap_names_len
);
1083 /* We'll keep a copy of the snapshot names... */
1085 if (snap_names_len
> (u64
)SIZE_MAX
)
1087 snap_names
= kmalloc(snap_names_len
, GFP_KERNEL
);
1091 /* ...as well as the array of their sizes. */
1092 snap_sizes
= kmalloc_array(snap_count
,
1093 sizeof(*header
->snap_sizes
),
1099 * Copy the names, and fill in each snapshot's id
1102 * Note that rbd_dev_v1_header_info() guarantees the
1103 * ondisk buffer we're working with has
1104 * snap_names_len bytes beyond the end of the
1105 * snapshot id array, this memcpy() is safe.
1107 memcpy(snap_names
, &ondisk
->snaps
[snap_count
], snap_names_len
);
1108 snaps
= ondisk
->snaps
;
1109 for (i
= 0; i
< snap_count
; i
++) {
1110 snapc
->snaps
[i
] = le64_to_cpu(snaps
[i
].id
);
1111 snap_sizes
[i
] = le64_to_cpu(snaps
[i
].image_size
);
1115 /* We won't fail any more, fill in the header */
1118 header
->object_prefix
= object_prefix
;
1119 header
->obj_order
= ondisk
->options
.order
;
1120 rbd_init_layout(rbd_dev
);
1122 ceph_put_snap_context(header
->snapc
);
1123 kfree(header
->snap_names
);
1124 kfree(header
->snap_sizes
);
1127 /* The remaining fields always get updated (when we refresh) */
1129 header
->image_size
= le64_to_cpu(ondisk
->image_size
);
1130 header
->snapc
= snapc
;
1131 header
->snap_names
= snap_names
;
1132 header
->snap_sizes
= snap_sizes
;
1140 ceph_put_snap_context(snapc
);
1141 kfree(object_prefix
);
1146 static const char *_rbd_dev_v1_snap_name(struct rbd_device
*rbd_dev
, u32 which
)
1148 const char *snap_name
;
1150 rbd_assert(which
< rbd_dev
->header
.snapc
->num_snaps
);
1152 /* Skip over names until we find the one we are looking for */
1154 snap_name
= rbd_dev
->header
.snap_names
;
1156 snap_name
+= strlen(snap_name
) + 1;
1158 return kstrdup(snap_name
, GFP_KERNEL
);
1162 * Snapshot id comparison function for use with qsort()/bsearch().
1163 * Note that result is for snapshots in *descending* order.
1165 static int snapid_compare_reverse(const void *s1
, const void *s2
)
1167 u64 snap_id1
= *(u64
*)s1
;
1168 u64 snap_id2
= *(u64
*)s2
;
1170 if (snap_id1
< snap_id2
)
1172 return snap_id1
== snap_id2
? 0 : -1;
1176 * Search a snapshot context to see if the given snapshot id is
1179 * Returns the position of the snapshot id in the array if it's found,
1180 * or BAD_SNAP_INDEX otherwise.
1182 * Note: The snapshot array is in kept sorted (by the osd) in
1183 * reverse order, highest snapshot id first.
1185 static u32
rbd_dev_snap_index(struct rbd_device
*rbd_dev
, u64 snap_id
)
1187 struct ceph_snap_context
*snapc
= rbd_dev
->header
.snapc
;
1190 found
= bsearch(&snap_id
, &snapc
->snaps
, snapc
->num_snaps
,
1191 sizeof (snap_id
), snapid_compare_reverse
);
1193 return found
? (u32
)(found
- &snapc
->snaps
[0]) : BAD_SNAP_INDEX
;
1196 static const char *rbd_dev_v1_snap_name(struct rbd_device
*rbd_dev
,
1200 const char *snap_name
;
1202 which
= rbd_dev_snap_index(rbd_dev
, snap_id
);
1203 if (which
== BAD_SNAP_INDEX
)
1204 return ERR_PTR(-ENOENT
);
1206 snap_name
= _rbd_dev_v1_snap_name(rbd_dev
, which
);
1207 return snap_name
? snap_name
: ERR_PTR(-ENOMEM
);
1210 static const char *rbd_snap_name(struct rbd_device
*rbd_dev
, u64 snap_id
)
1212 if (snap_id
== CEPH_NOSNAP
)
1213 return RBD_SNAP_HEAD_NAME
;
1215 rbd_assert(rbd_image_format_valid(rbd_dev
->image_format
));
1216 if (rbd_dev
->image_format
== 1)
1217 return rbd_dev_v1_snap_name(rbd_dev
, snap_id
);
1219 return rbd_dev_v2_snap_name(rbd_dev
, snap_id
);
1222 static int rbd_snap_size(struct rbd_device
*rbd_dev
, u64 snap_id
,
1225 rbd_assert(rbd_image_format_valid(rbd_dev
->image_format
));
1226 if (snap_id
== CEPH_NOSNAP
) {
1227 *snap_size
= rbd_dev
->header
.image_size
;
1228 } else if (rbd_dev
->image_format
== 1) {
1231 which
= rbd_dev_snap_index(rbd_dev
, snap_id
);
1232 if (which
== BAD_SNAP_INDEX
)
1235 *snap_size
= rbd_dev
->header
.snap_sizes
[which
];
1240 ret
= _rbd_dev_v2_snap_size(rbd_dev
, snap_id
, NULL
, &size
);
1249 static int rbd_snap_features(struct rbd_device
*rbd_dev
, u64 snap_id
,
1252 rbd_assert(rbd_image_format_valid(rbd_dev
->image_format
));
1253 if (snap_id
== CEPH_NOSNAP
) {
1254 *snap_features
= rbd_dev
->header
.features
;
1255 } else if (rbd_dev
->image_format
== 1) {
1256 *snap_features
= 0; /* No features for format 1 */
1261 ret
= _rbd_dev_v2_snap_features(rbd_dev
, snap_id
, &features
);
1265 *snap_features
= features
;
1270 static int rbd_dev_mapping_set(struct rbd_device
*rbd_dev
)
1272 u64 snap_id
= rbd_dev
->spec
->snap_id
;
1277 ret
= rbd_snap_size(rbd_dev
, snap_id
, &size
);
1280 ret
= rbd_snap_features(rbd_dev
, snap_id
, &features
);
1284 rbd_dev
->mapping
.size
= size
;
1285 rbd_dev
->mapping
.features
= features
;
1290 static void rbd_dev_mapping_clear(struct rbd_device
*rbd_dev
)
1292 rbd_dev
->mapping
.size
= 0;
1293 rbd_dev
->mapping
.features
= 0;
1296 static void zero_bvec(struct bio_vec
*bv
)
1299 unsigned long flags
;
1301 buf
= bvec_kmap_irq(bv
, &flags
);
1302 memset(buf
, 0, bv
->bv_len
);
1303 flush_dcache_page(bv
->bv_page
);
1304 bvec_kunmap_irq(buf
, &flags
);
1307 static void zero_bios(struct ceph_bio_iter
*bio_pos
, u32 off
, u32 bytes
)
1309 struct ceph_bio_iter it
= *bio_pos
;
1311 ceph_bio_iter_advance(&it
, off
);
1312 ceph_bio_iter_advance_step(&it
, bytes
, ({
1317 static void zero_bvecs(struct ceph_bvec_iter
*bvec_pos
, u32 off
, u32 bytes
)
1319 struct ceph_bvec_iter it
= *bvec_pos
;
1321 ceph_bvec_iter_advance(&it
, off
);
1322 ceph_bvec_iter_advance_step(&it
, bytes
, ({
1328 * Zero a range in @obj_req data buffer defined by a bio (list) or
1329 * (private) bio_vec array.
1331 * @off is relative to the start of the data buffer.
1333 static void rbd_obj_zero_range(struct rbd_obj_request
*obj_req
, u32 off
,
1336 switch (obj_req
->img_request
->data_type
) {
1337 case OBJ_REQUEST_BIO
:
1338 zero_bios(&obj_req
->bio_pos
, off
, bytes
);
1340 case OBJ_REQUEST_BVECS
:
1341 case OBJ_REQUEST_OWN_BVECS
:
1342 zero_bvecs(&obj_req
->bvec_pos
, off
, bytes
);
1349 static void rbd_obj_request_destroy(struct kref
*kref
);
1350 static void rbd_obj_request_put(struct rbd_obj_request
*obj_request
)
1352 rbd_assert(obj_request
!= NULL
);
1353 dout("%s: obj %p (was %d)\n", __func__
, obj_request
,
1354 kref_read(&obj_request
->kref
));
1355 kref_put(&obj_request
->kref
, rbd_obj_request_destroy
);
1358 static void rbd_img_request_get(struct rbd_img_request
*img_request
)
1360 dout("%s: img %p (was %d)\n", __func__
, img_request
,
1361 kref_read(&img_request
->kref
));
1362 kref_get(&img_request
->kref
);
1365 static void rbd_img_request_destroy(struct kref
*kref
);
1366 static void rbd_img_request_put(struct rbd_img_request
*img_request
)
1368 rbd_assert(img_request
!= NULL
);
1369 dout("%s: img %p (was %d)\n", __func__
, img_request
,
1370 kref_read(&img_request
->kref
));
1371 kref_put(&img_request
->kref
, rbd_img_request_destroy
);
1374 static inline void rbd_img_obj_request_add(struct rbd_img_request
*img_request
,
1375 struct rbd_obj_request
*obj_request
)
1377 rbd_assert(obj_request
->img_request
== NULL
);
1379 /* Image request now owns object's original reference */
1380 obj_request
->img_request
= img_request
;
1381 img_request
->pending_count
++;
1382 dout("%s: img %p obj %p\n", __func__
, img_request
, obj_request
);
1385 static inline void rbd_img_obj_request_del(struct rbd_img_request
*img_request
,
1386 struct rbd_obj_request
*obj_request
)
1388 dout("%s: img %p obj %p\n", __func__
, img_request
, obj_request
);
1389 list_del(&obj_request
->ex
.oe_item
);
1390 rbd_assert(obj_request
->img_request
== img_request
);
1391 rbd_obj_request_put(obj_request
);
1394 static void rbd_obj_request_submit(struct rbd_obj_request
*obj_request
)
1396 struct ceph_osd_request
*osd_req
= obj_request
->osd_req
;
1398 dout("%s %p object_no %016llx %llu~%llu osd_req %p\n", __func__
,
1399 obj_request
, obj_request
->ex
.oe_objno
, obj_request
->ex
.oe_off
,
1400 obj_request
->ex
.oe_len
, osd_req
);
1401 ceph_osdc_start_request(osd_req
->r_osdc
, osd_req
, false);
1405 * The default/initial value for all image request flags is 0. Each
1406 * is conditionally set to 1 at image request initialization time
1407 * and currently never change thereafter.
1409 static void img_request_layered_set(struct rbd_img_request
*img_request
)
1411 set_bit(IMG_REQ_LAYERED
, &img_request
->flags
);
1415 static void img_request_layered_clear(struct rbd_img_request
*img_request
)
1417 clear_bit(IMG_REQ_LAYERED
, &img_request
->flags
);
1421 static bool img_request_layered_test(struct rbd_img_request
*img_request
)
1424 return test_bit(IMG_REQ_LAYERED
, &img_request
->flags
) != 0;
1427 static bool rbd_obj_is_entire(struct rbd_obj_request
*obj_req
)
1429 struct rbd_device
*rbd_dev
= obj_req
->img_request
->rbd_dev
;
1431 return !obj_req
->ex
.oe_off
&&
1432 obj_req
->ex
.oe_len
== rbd_dev
->layout
.object_size
;
1435 static bool rbd_obj_is_tail(struct rbd_obj_request
*obj_req
)
1437 struct rbd_device
*rbd_dev
= obj_req
->img_request
->rbd_dev
;
1439 return obj_req
->ex
.oe_off
+ obj_req
->ex
.oe_len
==
1440 rbd_dev
->layout
.object_size
;
1444 * Must be called after rbd_obj_calc_img_extents().
1446 static bool rbd_obj_copyup_enabled(struct rbd_obj_request
*obj_req
)
1448 if (!obj_req
->num_img_extents
||
1449 (rbd_obj_is_entire(obj_req
) &&
1450 !obj_req
->img_request
->snapc
->num_snaps
))
1456 static u64
rbd_obj_img_extents_bytes(struct rbd_obj_request
*obj_req
)
1458 return ceph_file_extents_bytes(obj_req
->img_extents
,
1459 obj_req
->num_img_extents
);
1462 static bool rbd_img_is_write(struct rbd_img_request
*img_req
)
1464 switch (img_req
->op_type
) {
1468 case OBJ_OP_DISCARD
:
1469 case OBJ_OP_ZEROOUT
:
1476 static void rbd_obj_handle_request(struct rbd_obj_request
*obj_req
);
1478 static void rbd_osd_req_callback(struct ceph_osd_request
*osd_req
)
1480 struct rbd_obj_request
*obj_req
= osd_req
->r_priv
;
1482 dout("%s osd_req %p result %d for obj_req %p\n", __func__
, osd_req
,
1483 osd_req
->r_result
, obj_req
);
1484 rbd_assert(osd_req
== obj_req
->osd_req
);
1486 obj_req
->result
= osd_req
->r_result
< 0 ? osd_req
->r_result
: 0;
1487 if (!obj_req
->result
&& !rbd_img_is_write(obj_req
->img_request
))
1488 obj_req
->xferred
= osd_req
->r_result
;
1491 * Writes aren't allowed to return a data payload. In some
1492 * guarded write cases (e.g. stat + zero on an empty object)
1493 * a stat response makes it through, but we don't care.
1495 obj_req
->xferred
= 0;
1497 rbd_obj_handle_request(obj_req
);
1500 static void rbd_osd_req_format_read(struct rbd_obj_request
*obj_request
)
1502 struct ceph_osd_request
*osd_req
= obj_request
->osd_req
;
1504 osd_req
->r_flags
= CEPH_OSD_FLAG_READ
;
1505 osd_req
->r_snapid
= obj_request
->img_request
->snap_id
;
1508 static void rbd_osd_req_format_write(struct rbd_obj_request
*obj_request
)
1510 struct ceph_osd_request
*osd_req
= obj_request
->osd_req
;
1512 osd_req
->r_flags
= CEPH_OSD_FLAG_WRITE
;
1513 ktime_get_real_ts64(&osd_req
->r_mtime
);
1514 osd_req
->r_data_offset
= obj_request
->ex
.oe_off
;
1517 static struct ceph_osd_request
*
1518 __rbd_osd_req_create(struct rbd_obj_request
*obj_req
,
1519 struct ceph_snap_context
*snapc
, unsigned int num_ops
)
1521 struct rbd_device
*rbd_dev
= obj_req
->img_request
->rbd_dev
;
1522 struct ceph_osd_client
*osdc
= &rbd_dev
->rbd_client
->client
->osdc
;
1523 struct ceph_osd_request
*req
;
1524 const char *name_format
= rbd_dev
->image_format
== 1 ?
1525 RBD_V1_DATA_FORMAT
: RBD_V2_DATA_FORMAT
;
1527 req
= ceph_osdc_alloc_request(osdc
, snapc
, num_ops
, false, GFP_NOIO
);
1531 req
->r_callback
= rbd_osd_req_callback
;
1532 req
->r_priv
= obj_req
;
1535 * Data objects may be stored in a separate pool, but always in
1536 * the same namespace in that pool as the header in its pool.
1538 ceph_oloc_copy(&req
->r_base_oloc
, &rbd_dev
->header_oloc
);
1539 req
->r_base_oloc
.pool
= rbd_dev
->layout
.pool_id
;
1541 if (ceph_oid_aprintf(&req
->r_base_oid
, GFP_NOIO
, name_format
,
1542 rbd_dev
->header
.object_prefix
, obj_req
->ex
.oe_objno
))
1548 ceph_osdc_put_request(req
);
1552 static struct ceph_osd_request
*
1553 rbd_osd_req_create(struct rbd_obj_request
*obj_req
, unsigned int num_ops
)
1555 return __rbd_osd_req_create(obj_req
, obj_req
->img_request
->snapc
,
1559 static void rbd_osd_req_destroy(struct ceph_osd_request
*osd_req
)
1561 ceph_osdc_put_request(osd_req
);
1564 static struct rbd_obj_request
*rbd_obj_request_create(void)
1566 struct rbd_obj_request
*obj_request
;
1568 obj_request
= kmem_cache_zalloc(rbd_obj_request_cache
, GFP_NOIO
);
1572 ceph_object_extent_init(&obj_request
->ex
);
1573 kref_init(&obj_request
->kref
);
1575 dout("%s %p\n", __func__
, obj_request
);
1579 static void rbd_obj_request_destroy(struct kref
*kref
)
1581 struct rbd_obj_request
*obj_request
;
1584 obj_request
= container_of(kref
, struct rbd_obj_request
, kref
);
1586 dout("%s: obj %p\n", __func__
, obj_request
);
1588 if (obj_request
->osd_req
)
1589 rbd_osd_req_destroy(obj_request
->osd_req
);
1591 switch (obj_request
->img_request
->data_type
) {
1592 case OBJ_REQUEST_NODATA
:
1593 case OBJ_REQUEST_BIO
:
1594 case OBJ_REQUEST_BVECS
:
1595 break; /* Nothing to do */
1596 case OBJ_REQUEST_OWN_BVECS
:
1597 kfree(obj_request
->bvec_pos
.bvecs
);
1603 kfree(obj_request
->img_extents
);
1604 if (obj_request
->copyup_bvecs
) {
1605 for (i
= 0; i
< obj_request
->copyup_bvec_count
; i
++) {
1606 if (obj_request
->copyup_bvecs
[i
].bv_page
)
1607 __free_page(obj_request
->copyup_bvecs
[i
].bv_page
);
1609 kfree(obj_request
->copyup_bvecs
);
1612 kmem_cache_free(rbd_obj_request_cache
, obj_request
);
1615 /* It's OK to call this for a device with no parent */
1617 static void rbd_spec_put(struct rbd_spec
*spec
);
1618 static void rbd_dev_unparent(struct rbd_device
*rbd_dev
)
1620 rbd_dev_remove_parent(rbd_dev
);
1621 rbd_spec_put(rbd_dev
->parent_spec
);
1622 rbd_dev
->parent_spec
= NULL
;
1623 rbd_dev
->parent_overlap
= 0;
1627 * Parent image reference counting is used to determine when an
1628 * image's parent fields can be safely torn down--after there are no
1629 * more in-flight requests to the parent image. When the last
1630 * reference is dropped, cleaning them up is safe.
1632 static void rbd_dev_parent_put(struct rbd_device
*rbd_dev
)
1636 if (!rbd_dev
->parent_spec
)
1639 counter
= atomic_dec_return_safe(&rbd_dev
->parent_ref
);
1643 /* Last reference; clean up parent data structures */
1646 rbd_dev_unparent(rbd_dev
);
1648 rbd_warn(rbd_dev
, "parent reference underflow");
1652 * If an image has a non-zero parent overlap, get a reference to its
1655 * Returns true if the rbd device has a parent with a non-zero
1656 * overlap and a reference for it was successfully taken, or
1659 static bool rbd_dev_parent_get(struct rbd_device
*rbd_dev
)
1663 if (!rbd_dev
->parent_spec
)
1666 down_read(&rbd_dev
->header_rwsem
);
1667 if (rbd_dev
->parent_overlap
)
1668 counter
= atomic_inc_return_safe(&rbd_dev
->parent_ref
);
1669 up_read(&rbd_dev
->header_rwsem
);
1672 rbd_warn(rbd_dev
, "parent reference overflow");
1678 * Caller is responsible for filling in the list of object requests
1679 * that comprises the image request, and the Linux request pointer
1680 * (if there is one).
1682 static struct rbd_img_request
*rbd_img_request_create(
1683 struct rbd_device
*rbd_dev
,
1684 enum obj_operation_type op_type
,
1685 struct ceph_snap_context
*snapc
)
1687 struct rbd_img_request
*img_request
;
1689 img_request
= kmem_cache_zalloc(rbd_img_request_cache
, GFP_NOIO
);
1693 img_request
->rbd_dev
= rbd_dev
;
1694 img_request
->op_type
= op_type
;
1695 if (!rbd_img_is_write(img_request
))
1696 img_request
->snap_id
= rbd_dev
->spec
->snap_id
;
1698 img_request
->snapc
= snapc
;
1700 if (rbd_dev_parent_get(rbd_dev
))
1701 img_request_layered_set(img_request
);
1703 spin_lock_init(&img_request
->completion_lock
);
1704 INIT_LIST_HEAD(&img_request
->object_extents
);
1705 kref_init(&img_request
->kref
);
1707 dout("%s: rbd_dev %p %s -> img %p\n", __func__
, rbd_dev
,
1708 obj_op_name(op_type
), img_request
);
1712 static void rbd_img_request_destroy(struct kref
*kref
)
1714 struct rbd_img_request
*img_request
;
1715 struct rbd_obj_request
*obj_request
;
1716 struct rbd_obj_request
*next_obj_request
;
1718 img_request
= container_of(kref
, struct rbd_img_request
, kref
);
1720 dout("%s: img %p\n", __func__
, img_request
);
1722 for_each_obj_request_safe(img_request
, obj_request
, next_obj_request
)
1723 rbd_img_obj_request_del(img_request
, obj_request
);
1725 if (img_request_layered_test(img_request
)) {
1726 img_request_layered_clear(img_request
);
1727 rbd_dev_parent_put(img_request
->rbd_dev
);
1730 if (rbd_img_is_write(img_request
))
1731 ceph_put_snap_context(img_request
->snapc
);
1733 kmem_cache_free(rbd_img_request_cache
, img_request
);
1736 static void prune_extents(struct ceph_file_extent
*img_extents
,
1737 u32
*num_img_extents
, u64 overlap
)
1739 u32 cnt
= *num_img_extents
;
1741 /* drop extents completely beyond the overlap */
1742 while (cnt
&& img_extents
[cnt
- 1].fe_off
>= overlap
)
1746 struct ceph_file_extent
*ex
= &img_extents
[cnt
- 1];
1748 /* trim final overlapping extent */
1749 if (ex
->fe_off
+ ex
->fe_len
> overlap
)
1750 ex
->fe_len
= overlap
- ex
->fe_off
;
1753 *num_img_extents
= cnt
;
1757 * Determine the byte range(s) covered by either just the object extent
1758 * or the entire object in the parent image.
1760 static int rbd_obj_calc_img_extents(struct rbd_obj_request
*obj_req
,
1763 struct rbd_device
*rbd_dev
= obj_req
->img_request
->rbd_dev
;
1766 if (!rbd_dev
->parent_overlap
)
1769 ret
= ceph_extent_to_file(&rbd_dev
->layout
, obj_req
->ex
.oe_objno
,
1770 entire
? 0 : obj_req
->ex
.oe_off
,
1771 entire
? rbd_dev
->layout
.object_size
:
1773 &obj_req
->img_extents
,
1774 &obj_req
->num_img_extents
);
1778 prune_extents(obj_req
->img_extents
, &obj_req
->num_img_extents
,
1779 rbd_dev
->parent_overlap
);
1783 static void rbd_osd_req_setup_data(struct rbd_obj_request
*obj_req
, u32 which
)
1785 switch (obj_req
->img_request
->data_type
) {
1786 case OBJ_REQUEST_BIO
:
1787 osd_req_op_extent_osd_data_bio(obj_req
->osd_req
, which
,
1789 obj_req
->ex
.oe_len
);
1791 case OBJ_REQUEST_BVECS
:
1792 case OBJ_REQUEST_OWN_BVECS
:
1793 rbd_assert(obj_req
->bvec_pos
.iter
.bi_size
==
1794 obj_req
->ex
.oe_len
);
1795 rbd_assert(obj_req
->bvec_idx
== obj_req
->bvec_count
);
1796 osd_req_op_extent_osd_data_bvec_pos(obj_req
->osd_req
, which
,
1797 &obj_req
->bvec_pos
);
1804 static int rbd_obj_setup_read(struct rbd_obj_request
*obj_req
)
1806 obj_req
->osd_req
= __rbd_osd_req_create(obj_req
, NULL
, 1);
1807 if (!obj_req
->osd_req
)
1810 osd_req_op_extent_init(obj_req
->osd_req
, 0, CEPH_OSD_OP_READ
,
1811 obj_req
->ex
.oe_off
, obj_req
->ex
.oe_len
, 0, 0);
1812 rbd_osd_req_setup_data(obj_req
, 0);
1814 rbd_osd_req_format_read(obj_req
);
1818 static int __rbd_obj_setup_stat(struct rbd_obj_request
*obj_req
,
1821 struct page
**pages
;
1824 * The response data for a STAT call consists of:
1831 pages
= ceph_alloc_page_vector(1, GFP_NOIO
);
1833 return PTR_ERR(pages
);
1835 osd_req_op_init(obj_req
->osd_req
, which
, CEPH_OSD_OP_STAT
, 0);
1836 osd_req_op_raw_data_in_pages(obj_req
->osd_req
, which
, pages
,
1837 8 + sizeof(struct ceph_timespec
),
1842 static int count_write_ops(struct rbd_obj_request
*obj_req
)
1844 return 2; /* setallochint + write/writefull */
1847 static void __rbd_obj_setup_write(struct rbd_obj_request
*obj_req
,
1850 struct rbd_device
*rbd_dev
= obj_req
->img_request
->rbd_dev
;
1853 osd_req_op_alloc_hint_init(obj_req
->osd_req
, which
++,
1854 rbd_dev
->layout
.object_size
,
1855 rbd_dev
->layout
.object_size
);
1857 if (rbd_obj_is_entire(obj_req
))
1858 opcode
= CEPH_OSD_OP_WRITEFULL
;
1860 opcode
= CEPH_OSD_OP_WRITE
;
1862 osd_req_op_extent_init(obj_req
->osd_req
, which
, opcode
,
1863 obj_req
->ex
.oe_off
, obj_req
->ex
.oe_len
, 0, 0);
1864 rbd_osd_req_setup_data(obj_req
, which
++);
1866 rbd_assert(which
== obj_req
->osd_req
->r_num_ops
);
1867 rbd_osd_req_format_write(obj_req
);
1870 static int rbd_obj_setup_write(struct rbd_obj_request
*obj_req
)
1872 unsigned int num_osd_ops
, which
= 0;
1876 /* reverse map the entire object onto the parent */
1877 ret
= rbd_obj_calc_img_extents(obj_req
, true);
1881 need_guard
= rbd_obj_copyup_enabled(obj_req
);
1882 num_osd_ops
= need_guard
+ count_write_ops(obj_req
);
1884 obj_req
->osd_req
= rbd_osd_req_create(obj_req
, num_osd_ops
);
1885 if (!obj_req
->osd_req
)
1889 ret
= __rbd_obj_setup_stat(obj_req
, which
++);
1893 obj_req
->write_state
= RBD_OBJ_WRITE_GUARD
;
1895 obj_req
->write_state
= RBD_OBJ_WRITE_FLAT
;
1898 __rbd_obj_setup_write(obj_req
, which
);
1902 static u16
truncate_or_zero_opcode(struct rbd_obj_request
*obj_req
)
1904 return rbd_obj_is_tail(obj_req
) ? CEPH_OSD_OP_TRUNCATE
:
1908 static int rbd_obj_setup_discard(struct rbd_obj_request
*obj_req
)
1910 struct rbd_device
*rbd_dev
= obj_req
->img_request
->rbd_dev
;
1911 u64 off
= obj_req
->ex
.oe_off
;
1912 u64 next_off
= obj_req
->ex
.oe_off
+ obj_req
->ex
.oe_len
;
1916 * Align the range to alloc_size boundary and punt on discards
1917 * that are too small to free up any space.
1919 * alloc_size == object_size && is_tail() is a special case for
1920 * filestore with filestore_punch_hole = false, needed to allow
1921 * truncate (in addition to delete).
1923 if (rbd_dev
->opts
->alloc_size
!= rbd_dev
->layout
.object_size
||
1924 !rbd_obj_is_tail(obj_req
)) {
1925 off
= round_up(off
, rbd_dev
->opts
->alloc_size
);
1926 next_off
= round_down(next_off
, rbd_dev
->opts
->alloc_size
);
1927 if (off
>= next_off
)
1931 /* reverse map the entire object onto the parent */
1932 ret
= rbd_obj_calc_img_extents(obj_req
, true);
1936 obj_req
->osd_req
= rbd_osd_req_create(obj_req
, 1);
1937 if (!obj_req
->osd_req
)
1940 if (rbd_obj_is_entire(obj_req
) && !obj_req
->num_img_extents
) {
1941 osd_req_op_init(obj_req
->osd_req
, 0, CEPH_OSD_OP_DELETE
, 0);
1943 dout("%s %p %llu~%llu -> %llu~%llu\n", __func__
,
1944 obj_req
, obj_req
->ex
.oe_off
, obj_req
->ex
.oe_len
,
1945 off
, next_off
- off
);
1946 osd_req_op_extent_init(obj_req
->osd_req
, 0,
1947 truncate_or_zero_opcode(obj_req
),
1948 off
, next_off
- off
, 0, 0);
1951 obj_req
->write_state
= RBD_OBJ_WRITE_FLAT
;
1952 rbd_osd_req_format_write(obj_req
);
1956 static int count_zeroout_ops(struct rbd_obj_request
*obj_req
)
1960 if (rbd_obj_is_entire(obj_req
) && obj_req
->num_img_extents
&&
1961 !rbd_obj_copyup_enabled(obj_req
))
1962 num_osd_ops
= 2; /* create + truncate */
1964 num_osd_ops
= 1; /* delete/truncate/zero */
1969 static void __rbd_obj_setup_zeroout(struct rbd_obj_request
*obj_req
,
1974 if (rbd_obj_is_entire(obj_req
)) {
1975 if (obj_req
->num_img_extents
) {
1976 if (!rbd_obj_copyup_enabled(obj_req
))
1977 osd_req_op_init(obj_req
->osd_req
, which
++,
1978 CEPH_OSD_OP_CREATE
, 0);
1979 opcode
= CEPH_OSD_OP_TRUNCATE
;
1981 osd_req_op_init(obj_req
->osd_req
, which
++,
1982 CEPH_OSD_OP_DELETE
, 0);
1986 opcode
= truncate_or_zero_opcode(obj_req
);
1990 osd_req_op_extent_init(obj_req
->osd_req
, which
++, opcode
,
1991 obj_req
->ex
.oe_off
, obj_req
->ex
.oe_len
,
1994 rbd_assert(which
== obj_req
->osd_req
->r_num_ops
);
1995 rbd_osd_req_format_write(obj_req
);
1998 static int rbd_obj_setup_zeroout(struct rbd_obj_request
*obj_req
)
2000 unsigned int num_osd_ops
, which
= 0;
2004 /* reverse map the entire object onto the parent */
2005 ret
= rbd_obj_calc_img_extents(obj_req
, true);
2009 need_guard
= rbd_obj_copyup_enabled(obj_req
);
2010 num_osd_ops
= need_guard
+ count_zeroout_ops(obj_req
);
2012 obj_req
->osd_req
= rbd_osd_req_create(obj_req
, num_osd_ops
);
2013 if (!obj_req
->osd_req
)
2017 ret
= __rbd_obj_setup_stat(obj_req
, which
++);
2021 obj_req
->write_state
= RBD_OBJ_WRITE_GUARD
;
2023 obj_req
->write_state
= RBD_OBJ_WRITE_FLAT
;
2026 __rbd_obj_setup_zeroout(obj_req
, which
);
2031 * For each object request in @img_req, allocate an OSD request, add
2032 * individual OSD ops and prepare them for submission. The number of
2033 * OSD ops depends on op_type and the overlap point (if any).
2035 static int __rbd_img_fill_request(struct rbd_img_request
*img_req
)
2037 struct rbd_obj_request
*obj_req
, *next_obj_req
;
2040 for_each_obj_request_safe(img_req
, obj_req
, next_obj_req
) {
2041 switch (img_req
->op_type
) {
2043 ret
= rbd_obj_setup_read(obj_req
);
2046 ret
= rbd_obj_setup_write(obj_req
);
2048 case OBJ_OP_DISCARD
:
2049 ret
= rbd_obj_setup_discard(obj_req
);
2051 case OBJ_OP_ZEROOUT
:
2052 ret
= rbd_obj_setup_zeroout(obj_req
);
2060 img_req
->xferred
+= obj_req
->ex
.oe_len
;
2061 img_req
->pending_count
--;
2062 rbd_img_obj_request_del(img_req
, obj_req
);
2066 ret
= ceph_osdc_alloc_messages(obj_req
->osd_req
, GFP_NOIO
);
2074 union rbd_img_fill_iter
{
2075 struct ceph_bio_iter bio_iter
;
2076 struct ceph_bvec_iter bvec_iter
;
2079 struct rbd_img_fill_ctx
{
2080 enum obj_request_type pos_type
;
2081 union rbd_img_fill_iter
*pos
;
2082 union rbd_img_fill_iter iter
;
2083 ceph_object_extent_fn_t set_pos_fn
;
2084 ceph_object_extent_fn_t count_fn
;
2085 ceph_object_extent_fn_t copy_fn
;
2088 static struct ceph_object_extent
*alloc_object_extent(void *arg
)
2090 struct rbd_img_request
*img_req
= arg
;
2091 struct rbd_obj_request
*obj_req
;
2093 obj_req
= rbd_obj_request_create();
2097 rbd_img_obj_request_add(img_req
, obj_req
);
2098 return &obj_req
->ex
;
2102 * While su != os && sc == 1 is technically not fancy (it's the same
2103 * layout as su == os && sc == 1), we can't use the nocopy path for it
2104 * because ->set_pos_fn() should be called only once per object.
2105 * ceph_file_to_extents() invokes action_fn once per stripe unit, so
2106 * treat su != os && sc == 1 as fancy.
2108 static bool rbd_layout_is_fancy(struct ceph_file_layout
*l
)
2110 return l
->stripe_unit
!= l
->object_size
;
2113 static int rbd_img_fill_request_nocopy(struct rbd_img_request
*img_req
,
2114 struct ceph_file_extent
*img_extents
,
2115 u32 num_img_extents
,
2116 struct rbd_img_fill_ctx
*fctx
)
2121 img_req
->data_type
= fctx
->pos_type
;
2124 * Create object requests and set each object request's starting
2125 * position in the provided bio (list) or bio_vec array.
2127 fctx
->iter
= *fctx
->pos
;
2128 for (i
= 0; i
< num_img_extents
; i
++) {
2129 ret
= ceph_file_to_extents(&img_req
->rbd_dev
->layout
,
2130 img_extents
[i
].fe_off
,
2131 img_extents
[i
].fe_len
,
2132 &img_req
->object_extents
,
2133 alloc_object_extent
, img_req
,
2134 fctx
->set_pos_fn
, &fctx
->iter
);
2139 return __rbd_img_fill_request(img_req
);
2143 * Map a list of image extents to a list of object extents, create the
2144 * corresponding object requests (normally each to a different object,
2145 * but not always) and add them to @img_req. For each object request,
2146 * set up its data descriptor to point to the corresponding chunk(s) of
2147 * @fctx->pos data buffer.
2149 * Because ceph_file_to_extents() will merge adjacent object extents
2150 * together, each object request's data descriptor may point to multiple
2151 * different chunks of @fctx->pos data buffer.
2153 * @fctx->pos data buffer is assumed to be large enough.
2155 static int rbd_img_fill_request(struct rbd_img_request
*img_req
,
2156 struct ceph_file_extent
*img_extents
,
2157 u32 num_img_extents
,
2158 struct rbd_img_fill_ctx
*fctx
)
2160 struct rbd_device
*rbd_dev
= img_req
->rbd_dev
;
2161 struct rbd_obj_request
*obj_req
;
2165 if (fctx
->pos_type
== OBJ_REQUEST_NODATA
||
2166 !rbd_layout_is_fancy(&rbd_dev
->layout
))
2167 return rbd_img_fill_request_nocopy(img_req
, img_extents
,
2168 num_img_extents
, fctx
);
2170 img_req
->data_type
= OBJ_REQUEST_OWN_BVECS
;
2173 * Create object requests and determine ->bvec_count for each object
2174 * request. Note that ->bvec_count sum over all object requests may
2175 * be greater than the number of bio_vecs in the provided bio (list)
2176 * or bio_vec array because when mapped, those bio_vecs can straddle
2177 * stripe unit boundaries.
2179 fctx
->iter
= *fctx
->pos
;
2180 for (i
= 0; i
< num_img_extents
; i
++) {
2181 ret
= ceph_file_to_extents(&rbd_dev
->layout
,
2182 img_extents
[i
].fe_off
,
2183 img_extents
[i
].fe_len
,
2184 &img_req
->object_extents
,
2185 alloc_object_extent
, img_req
,
2186 fctx
->count_fn
, &fctx
->iter
);
2191 for_each_obj_request(img_req
, obj_req
) {
2192 obj_req
->bvec_pos
.bvecs
= kmalloc_array(obj_req
->bvec_count
,
2193 sizeof(*obj_req
->bvec_pos
.bvecs
),
2195 if (!obj_req
->bvec_pos
.bvecs
)
2200 * Fill in each object request's private bio_vec array, splitting and
2201 * rearranging the provided bio_vecs in stripe unit chunks as needed.
2203 fctx
->iter
= *fctx
->pos
;
2204 for (i
= 0; i
< num_img_extents
; i
++) {
2205 ret
= ceph_iterate_extents(&rbd_dev
->layout
,
2206 img_extents
[i
].fe_off
,
2207 img_extents
[i
].fe_len
,
2208 &img_req
->object_extents
,
2209 fctx
->copy_fn
, &fctx
->iter
);
2214 return __rbd_img_fill_request(img_req
);
2217 static int rbd_img_fill_nodata(struct rbd_img_request
*img_req
,
2220 struct ceph_file_extent ex
= { off
, len
};
2221 union rbd_img_fill_iter dummy
;
2222 struct rbd_img_fill_ctx fctx
= {
2223 .pos_type
= OBJ_REQUEST_NODATA
,
2227 return rbd_img_fill_request(img_req
, &ex
, 1, &fctx
);
2230 static void set_bio_pos(struct ceph_object_extent
*ex
, u32 bytes
, void *arg
)
2232 struct rbd_obj_request
*obj_req
=
2233 container_of(ex
, struct rbd_obj_request
, ex
);
2234 struct ceph_bio_iter
*it
= arg
;
2236 dout("%s objno %llu bytes %u\n", __func__
, ex
->oe_objno
, bytes
);
2237 obj_req
->bio_pos
= *it
;
2238 ceph_bio_iter_advance(it
, bytes
);
2241 static void count_bio_bvecs(struct ceph_object_extent
*ex
, u32 bytes
, void *arg
)
2243 struct rbd_obj_request
*obj_req
=
2244 container_of(ex
, struct rbd_obj_request
, ex
);
2245 struct ceph_bio_iter
*it
= arg
;
2247 dout("%s objno %llu bytes %u\n", __func__
, ex
->oe_objno
, bytes
);
2248 ceph_bio_iter_advance_step(it
, bytes
, ({
2249 obj_req
->bvec_count
++;
2254 static void copy_bio_bvecs(struct ceph_object_extent
*ex
, u32 bytes
, void *arg
)
2256 struct rbd_obj_request
*obj_req
=
2257 container_of(ex
, struct rbd_obj_request
, ex
);
2258 struct ceph_bio_iter
*it
= arg
;
2260 dout("%s objno %llu bytes %u\n", __func__
, ex
->oe_objno
, bytes
);
2261 ceph_bio_iter_advance_step(it
, bytes
, ({
2262 obj_req
->bvec_pos
.bvecs
[obj_req
->bvec_idx
++] = bv
;
2263 obj_req
->bvec_pos
.iter
.bi_size
+= bv
.bv_len
;
2267 static int __rbd_img_fill_from_bio(struct rbd_img_request
*img_req
,
2268 struct ceph_file_extent
*img_extents
,
2269 u32 num_img_extents
,
2270 struct ceph_bio_iter
*bio_pos
)
2272 struct rbd_img_fill_ctx fctx
= {
2273 .pos_type
= OBJ_REQUEST_BIO
,
2274 .pos
= (union rbd_img_fill_iter
*)bio_pos
,
2275 .set_pos_fn
= set_bio_pos
,
2276 .count_fn
= count_bio_bvecs
,
2277 .copy_fn
= copy_bio_bvecs
,
2280 return rbd_img_fill_request(img_req
, img_extents
, num_img_extents
,
2284 static int rbd_img_fill_from_bio(struct rbd_img_request
*img_req
,
2285 u64 off
, u64 len
, struct bio
*bio
)
2287 struct ceph_file_extent ex
= { off
, len
};
2288 struct ceph_bio_iter it
= { .bio
= bio
, .iter
= bio
->bi_iter
};
2290 return __rbd_img_fill_from_bio(img_req
, &ex
, 1, &it
);
2293 static void set_bvec_pos(struct ceph_object_extent
*ex
, u32 bytes
, void *arg
)
2295 struct rbd_obj_request
*obj_req
=
2296 container_of(ex
, struct rbd_obj_request
, ex
);
2297 struct ceph_bvec_iter
*it
= arg
;
2299 obj_req
->bvec_pos
= *it
;
2300 ceph_bvec_iter_shorten(&obj_req
->bvec_pos
, bytes
);
2301 ceph_bvec_iter_advance(it
, bytes
);
2304 static void count_bvecs(struct ceph_object_extent
*ex
, u32 bytes
, void *arg
)
2306 struct rbd_obj_request
*obj_req
=
2307 container_of(ex
, struct rbd_obj_request
, ex
);
2308 struct ceph_bvec_iter
*it
= arg
;
2310 ceph_bvec_iter_advance_step(it
, bytes
, ({
2311 obj_req
->bvec_count
++;
2315 static void copy_bvecs(struct ceph_object_extent
*ex
, u32 bytes
, void *arg
)
2317 struct rbd_obj_request
*obj_req
=
2318 container_of(ex
, struct rbd_obj_request
, ex
);
2319 struct ceph_bvec_iter
*it
= arg
;
2321 ceph_bvec_iter_advance_step(it
, bytes
, ({
2322 obj_req
->bvec_pos
.bvecs
[obj_req
->bvec_idx
++] = bv
;
2323 obj_req
->bvec_pos
.iter
.bi_size
+= bv
.bv_len
;
2327 static int __rbd_img_fill_from_bvecs(struct rbd_img_request
*img_req
,
2328 struct ceph_file_extent
*img_extents
,
2329 u32 num_img_extents
,
2330 struct ceph_bvec_iter
*bvec_pos
)
2332 struct rbd_img_fill_ctx fctx
= {
2333 .pos_type
= OBJ_REQUEST_BVECS
,
2334 .pos
= (union rbd_img_fill_iter
*)bvec_pos
,
2335 .set_pos_fn
= set_bvec_pos
,
2336 .count_fn
= count_bvecs
,
2337 .copy_fn
= copy_bvecs
,
2340 return rbd_img_fill_request(img_req
, img_extents
, num_img_extents
,
2344 static int rbd_img_fill_from_bvecs(struct rbd_img_request
*img_req
,
2345 struct ceph_file_extent
*img_extents
,
2346 u32 num_img_extents
,
2347 struct bio_vec
*bvecs
)
2349 struct ceph_bvec_iter it
= {
2351 .iter
= { .bi_size
= ceph_file_extents_bytes(img_extents
,
2355 return __rbd_img_fill_from_bvecs(img_req
, img_extents
, num_img_extents
,
2359 static void rbd_img_request_submit(struct rbd_img_request
*img_request
)
2361 struct rbd_obj_request
*obj_request
;
2363 dout("%s: img %p\n", __func__
, img_request
);
2365 rbd_img_request_get(img_request
);
2366 for_each_obj_request(img_request
, obj_request
)
2367 rbd_obj_request_submit(obj_request
);
2369 rbd_img_request_put(img_request
);
2372 static int rbd_obj_read_from_parent(struct rbd_obj_request
*obj_req
)
2374 struct rbd_img_request
*img_req
= obj_req
->img_request
;
2375 struct rbd_img_request
*child_img_req
;
2378 child_img_req
= rbd_img_request_create(img_req
->rbd_dev
->parent
,
2383 __set_bit(IMG_REQ_CHILD
, &child_img_req
->flags
);
2384 child_img_req
->obj_request
= obj_req
;
2386 if (!rbd_img_is_write(img_req
)) {
2387 switch (img_req
->data_type
) {
2388 case OBJ_REQUEST_BIO
:
2389 ret
= __rbd_img_fill_from_bio(child_img_req
,
2390 obj_req
->img_extents
,
2391 obj_req
->num_img_extents
,
2394 case OBJ_REQUEST_BVECS
:
2395 case OBJ_REQUEST_OWN_BVECS
:
2396 ret
= __rbd_img_fill_from_bvecs(child_img_req
,
2397 obj_req
->img_extents
,
2398 obj_req
->num_img_extents
,
2399 &obj_req
->bvec_pos
);
2405 ret
= rbd_img_fill_from_bvecs(child_img_req
,
2406 obj_req
->img_extents
,
2407 obj_req
->num_img_extents
,
2408 obj_req
->copyup_bvecs
);
2411 rbd_img_request_put(child_img_req
);
2415 rbd_img_request_submit(child_img_req
);
2419 static bool rbd_obj_handle_read(struct rbd_obj_request
*obj_req
)
2421 struct rbd_device
*rbd_dev
= obj_req
->img_request
->rbd_dev
;
2424 if (obj_req
->result
== -ENOENT
&&
2425 rbd_dev
->parent_overlap
&& !obj_req
->tried_parent
) {
2426 /* reverse map this object extent onto the parent */
2427 ret
= rbd_obj_calc_img_extents(obj_req
, false);
2429 obj_req
->result
= ret
;
2433 if (obj_req
->num_img_extents
) {
2434 obj_req
->tried_parent
= true;
2435 ret
= rbd_obj_read_from_parent(obj_req
);
2437 obj_req
->result
= ret
;
2445 * -ENOENT means a hole in the image -- zero-fill the entire
2446 * length of the request. A short read also implies zero-fill
2447 * to the end of the request. In both cases we update xferred
2448 * count to indicate the whole request was satisfied.
2450 if (obj_req
->result
== -ENOENT
||
2451 (!obj_req
->result
&& obj_req
->xferred
< obj_req
->ex
.oe_len
)) {
2452 rbd_assert(!obj_req
->xferred
|| !obj_req
->result
);
2453 rbd_obj_zero_range(obj_req
, obj_req
->xferred
,
2454 obj_req
->ex
.oe_len
- obj_req
->xferred
);
2455 obj_req
->result
= 0;
2456 obj_req
->xferred
= obj_req
->ex
.oe_len
;
2463 * copyup_bvecs pages are never highmem pages
2465 static bool is_zero_bvecs(struct bio_vec
*bvecs
, u32 bytes
)
2467 struct ceph_bvec_iter it
= {
2469 .iter
= { .bi_size
= bytes
},
2472 ceph_bvec_iter_advance_step(&it
, bytes
, ({
2473 if (memchr_inv(page_address(bv
.bv_page
) + bv
.bv_offset
, 0,
2480 #define MODS_ONLY U32_MAX
2482 static int rbd_obj_issue_copyup_empty_snapc(struct rbd_obj_request
*obj_req
,
2487 dout("%s obj_req %p bytes %u\n", __func__
, obj_req
, bytes
);
2488 rbd_assert(obj_req
->osd_req
->r_ops
[0].op
== CEPH_OSD_OP_STAT
);
2489 rbd_assert(bytes
> 0 && bytes
!= MODS_ONLY
);
2490 rbd_osd_req_destroy(obj_req
->osd_req
);
2492 obj_req
->osd_req
= __rbd_osd_req_create(obj_req
, &rbd_empty_snapc
, 1);
2493 if (!obj_req
->osd_req
)
2496 ret
= osd_req_op_cls_init(obj_req
->osd_req
, 0, "rbd", "copyup");
2500 osd_req_op_cls_request_data_bvecs(obj_req
->osd_req
, 0,
2501 obj_req
->copyup_bvecs
,
2502 obj_req
->copyup_bvec_count
,
2504 rbd_osd_req_format_write(obj_req
);
2506 ret
= ceph_osdc_alloc_messages(obj_req
->osd_req
, GFP_NOIO
);
2510 rbd_obj_request_submit(obj_req
);
2514 static int rbd_obj_issue_copyup_ops(struct rbd_obj_request
*obj_req
, u32 bytes
)
2516 struct rbd_img_request
*img_req
= obj_req
->img_request
;
2517 unsigned int num_osd_ops
= (bytes
!= MODS_ONLY
);
2518 unsigned int which
= 0;
2521 dout("%s obj_req %p bytes %u\n", __func__
, obj_req
, bytes
);
2522 rbd_assert(obj_req
->osd_req
->r_ops
[0].op
== CEPH_OSD_OP_STAT
||
2523 obj_req
->osd_req
->r_ops
[0].op
== CEPH_OSD_OP_CALL
);
2524 rbd_osd_req_destroy(obj_req
->osd_req
);
2526 switch (img_req
->op_type
) {
2528 num_osd_ops
+= count_write_ops(obj_req
);
2530 case OBJ_OP_ZEROOUT
:
2531 num_osd_ops
+= count_zeroout_ops(obj_req
);
2537 obj_req
->osd_req
= rbd_osd_req_create(obj_req
, num_osd_ops
);
2538 if (!obj_req
->osd_req
)
2541 if (bytes
!= MODS_ONLY
) {
2542 ret
= osd_req_op_cls_init(obj_req
->osd_req
, which
, "rbd",
2547 osd_req_op_cls_request_data_bvecs(obj_req
->osd_req
, which
++,
2548 obj_req
->copyup_bvecs
,
2549 obj_req
->copyup_bvec_count
,
2553 switch (img_req
->op_type
) {
2555 __rbd_obj_setup_write(obj_req
, which
);
2557 case OBJ_OP_ZEROOUT
:
2558 __rbd_obj_setup_zeroout(obj_req
, which
);
2564 ret
= ceph_osdc_alloc_messages(obj_req
->osd_req
, GFP_NOIO
);
2568 rbd_obj_request_submit(obj_req
);
2572 static int rbd_obj_issue_copyup(struct rbd_obj_request
*obj_req
, u32 bytes
)
2575 * Only send non-zero copyup data to save some I/O and network
2576 * bandwidth -- zero copyup data is equivalent to the object not
2579 if (is_zero_bvecs(obj_req
->copyup_bvecs
, bytes
)) {
2580 dout("%s obj_req %p detected zeroes\n", __func__
, obj_req
);
2584 if (obj_req
->img_request
->snapc
->num_snaps
&& bytes
> 0) {
2586 * Send a copyup request with an empty snapshot context to
2587 * deep-copyup the object through all existing snapshots.
2588 * A second request with the current snapshot context will be
2589 * sent for the actual modification.
2591 obj_req
->write_state
= RBD_OBJ_WRITE_COPYUP_EMPTY_SNAPC
;
2592 return rbd_obj_issue_copyup_empty_snapc(obj_req
, bytes
);
2595 obj_req
->write_state
= RBD_OBJ_WRITE_COPYUP_OPS
;
2596 return rbd_obj_issue_copyup_ops(obj_req
, bytes
);
2599 static int setup_copyup_bvecs(struct rbd_obj_request
*obj_req
, u64 obj_overlap
)
2603 rbd_assert(!obj_req
->copyup_bvecs
);
2604 obj_req
->copyup_bvec_count
= calc_pages_for(0, obj_overlap
);
2605 obj_req
->copyup_bvecs
= kcalloc(obj_req
->copyup_bvec_count
,
2606 sizeof(*obj_req
->copyup_bvecs
),
2608 if (!obj_req
->copyup_bvecs
)
2611 for (i
= 0; i
< obj_req
->copyup_bvec_count
; i
++) {
2612 unsigned int len
= min(obj_overlap
, (u64
)PAGE_SIZE
);
2614 obj_req
->copyup_bvecs
[i
].bv_page
= alloc_page(GFP_NOIO
);
2615 if (!obj_req
->copyup_bvecs
[i
].bv_page
)
2618 obj_req
->copyup_bvecs
[i
].bv_offset
= 0;
2619 obj_req
->copyup_bvecs
[i
].bv_len
= len
;
2623 rbd_assert(!obj_overlap
);
2627 static int rbd_obj_handle_write_guard(struct rbd_obj_request
*obj_req
)
2629 struct rbd_device
*rbd_dev
= obj_req
->img_request
->rbd_dev
;
2632 rbd_assert(obj_req
->num_img_extents
);
2633 prune_extents(obj_req
->img_extents
, &obj_req
->num_img_extents
,
2634 rbd_dev
->parent_overlap
);
2635 if (!obj_req
->num_img_extents
) {
2637 * The overlap has become 0 (most likely because the
2638 * image has been flattened). Re-submit the original write
2639 * request -- pass MODS_ONLY since the copyup isn't needed
2642 obj_req
->write_state
= RBD_OBJ_WRITE_COPYUP_OPS
;
2643 return rbd_obj_issue_copyup_ops(obj_req
, MODS_ONLY
);
2646 ret
= setup_copyup_bvecs(obj_req
, rbd_obj_img_extents_bytes(obj_req
));
2650 obj_req
->write_state
= RBD_OBJ_WRITE_READ_FROM_PARENT
;
2651 return rbd_obj_read_from_parent(obj_req
);
2654 static bool rbd_obj_handle_write(struct rbd_obj_request
*obj_req
)
2658 switch (obj_req
->write_state
) {
2659 case RBD_OBJ_WRITE_GUARD
:
2660 rbd_assert(!obj_req
->xferred
);
2661 if (obj_req
->result
== -ENOENT
) {
2663 * The target object doesn't exist. Read the data for
2664 * the entire target object up to the overlap point (if
2665 * any) from the parent, so we can use it for a copyup.
2667 ret
= rbd_obj_handle_write_guard(obj_req
);
2669 obj_req
->result
= ret
;
2675 case RBD_OBJ_WRITE_FLAT
:
2676 case RBD_OBJ_WRITE_COPYUP_OPS
:
2677 if (!obj_req
->result
)
2679 * There is no such thing as a successful short
2680 * write -- indicate the whole request was satisfied.
2682 obj_req
->xferred
= obj_req
->ex
.oe_len
;
2684 case RBD_OBJ_WRITE_READ_FROM_PARENT
:
2685 if (obj_req
->result
)
2688 rbd_assert(obj_req
->xferred
);
2689 ret
= rbd_obj_issue_copyup(obj_req
, obj_req
->xferred
);
2691 obj_req
->result
= ret
;
2692 obj_req
->xferred
= 0;
2696 case RBD_OBJ_WRITE_COPYUP_EMPTY_SNAPC
:
2697 if (obj_req
->result
)
2700 obj_req
->write_state
= RBD_OBJ_WRITE_COPYUP_OPS
;
2701 ret
= rbd_obj_issue_copyup_ops(obj_req
, MODS_ONLY
);
2703 obj_req
->result
= ret
;
2713 * Returns true if @obj_req is completed, or false otherwise.
2715 static bool __rbd_obj_handle_request(struct rbd_obj_request
*obj_req
)
2717 switch (obj_req
->img_request
->op_type
) {
2719 return rbd_obj_handle_read(obj_req
);
2721 return rbd_obj_handle_write(obj_req
);
2722 case OBJ_OP_DISCARD
:
2723 case OBJ_OP_ZEROOUT
:
2724 if (rbd_obj_handle_write(obj_req
)) {
2726 * Hide -ENOENT from delete/truncate/zero -- discarding
2727 * a non-existent object is not a problem.
2729 if (obj_req
->result
== -ENOENT
) {
2730 obj_req
->result
= 0;
2731 obj_req
->xferred
= obj_req
->ex
.oe_len
;
2741 static void rbd_obj_end_request(struct rbd_obj_request
*obj_req
)
2743 struct rbd_img_request
*img_req
= obj_req
->img_request
;
2745 rbd_assert((!obj_req
->result
&&
2746 obj_req
->xferred
== obj_req
->ex
.oe_len
) ||
2747 (obj_req
->result
< 0 && !obj_req
->xferred
));
2748 if (!obj_req
->result
) {
2749 img_req
->xferred
+= obj_req
->xferred
;
2753 rbd_warn(img_req
->rbd_dev
,
2754 "%s at objno %llu %llu~%llu result %d xferred %llu",
2755 obj_op_name(img_req
->op_type
), obj_req
->ex
.oe_objno
,
2756 obj_req
->ex
.oe_off
, obj_req
->ex
.oe_len
, obj_req
->result
,
2758 if (!img_req
->result
) {
2759 img_req
->result
= obj_req
->result
;
2760 img_req
->xferred
= 0;
2764 static void rbd_img_end_child_request(struct rbd_img_request
*img_req
)
2766 struct rbd_obj_request
*obj_req
= img_req
->obj_request
;
2768 rbd_assert(test_bit(IMG_REQ_CHILD
, &img_req
->flags
));
2769 rbd_assert((!img_req
->result
&&
2770 img_req
->xferred
== rbd_obj_img_extents_bytes(obj_req
)) ||
2771 (img_req
->result
< 0 && !img_req
->xferred
));
2773 obj_req
->result
= img_req
->result
;
2774 obj_req
->xferred
= img_req
->xferred
;
2775 rbd_img_request_put(img_req
);
2778 static void rbd_img_end_request(struct rbd_img_request
*img_req
)
2780 rbd_assert(!test_bit(IMG_REQ_CHILD
, &img_req
->flags
));
2781 rbd_assert((!img_req
->result
&&
2782 img_req
->xferred
== blk_rq_bytes(img_req
->rq
)) ||
2783 (img_req
->result
< 0 && !img_req
->xferred
));
2785 blk_mq_end_request(img_req
->rq
,
2786 errno_to_blk_status(img_req
->result
));
2787 rbd_img_request_put(img_req
);
2790 static void rbd_obj_handle_request(struct rbd_obj_request
*obj_req
)
2792 struct rbd_img_request
*img_req
;
2795 if (!__rbd_obj_handle_request(obj_req
))
2798 img_req
= obj_req
->img_request
;
2799 spin_lock(&img_req
->completion_lock
);
2800 rbd_obj_end_request(obj_req
);
2801 rbd_assert(img_req
->pending_count
);
2802 if (--img_req
->pending_count
) {
2803 spin_unlock(&img_req
->completion_lock
);
2807 spin_unlock(&img_req
->completion_lock
);
2808 if (test_bit(IMG_REQ_CHILD
, &img_req
->flags
)) {
2809 obj_req
= img_req
->obj_request
;
2810 rbd_img_end_child_request(img_req
);
2813 rbd_img_end_request(img_req
);
2816 static const struct rbd_client_id rbd_empty_cid
;
2818 static bool rbd_cid_equal(const struct rbd_client_id
*lhs
,
2819 const struct rbd_client_id
*rhs
)
2821 return lhs
->gid
== rhs
->gid
&& lhs
->handle
== rhs
->handle
;
2824 static struct rbd_client_id
rbd_get_cid(struct rbd_device
*rbd_dev
)
2826 struct rbd_client_id cid
;
2828 mutex_lock(&rbd_dev
->watch_mutex
);
2829 cid
.gid
= ceph_client_gid(rbd_dev
->rbd_client
->client
);
2830 cid
.handle
= rbd_dev
->watch_cookie
;
2831 mutex_unlock(&rbd_dev
->watch_mutex
);
2836 * lock_rwsem must be held for write
2838 static void rbd_set_owner_cid(struct rbd_device
*rbd_dev
,
2839 const struct rbd_client_id
*cid
)
2841 dout("%s rbd_dev %p %llu-%llu -> %llu-%llu\n", __func__
, rbd_dev
,
2842 rbd_dev
->owner_cid
.gid
, rbd_dev
->owner_cid
.handle
,
2843 cid
->gid
, cid
->handle
);
2844 rbd_dev
->owner_cid
= *cid
; /* struct */
2847 static void format_lock_cookie(struct rbd_device
*rbd_dev
, char *buf
)
2849 mutex_lock(&rbd_dev
->watch_mutex
);
2850 sprintf(buf
, "%s %llu", RBD_LOCK_COOKIE_PREFIX
, rbd_dev
->watch_cookie
);
2851 mutex_unlock(&rbd_dev
->watch_mutex
);
2854 static void __rbd_lock(struct rbd_device
*rbd_dev
, const char *cookie
)
2856 struct rbd_client_id cid
= rbd_get_cid(rbd_dev
);
2858 strcpy(rbd_dev
->lock_cookie
, cookie
);
2859 rbd_set_owner_cid(rbd_dev
, &cid
);
2860 queue_work(rbd_dev
->task_wq
, &rbd_dev
->acquired_lock_work
);
2864 * lock_rwsem must be held for write
2866 static int rbd_lock(struct rbd_device
*rbd_dev
)
2868 struct ceph_osd_client
*osdc
= &rbd_dev
->rbd_client
->client
->osdc
;
2872 WARN_ON(__rbd_is_lock_owner(rbd_dev
) ||
2873 rbd_dev
->lock_cookie
[0] != '\0');
2875 format_lock_cookie(rbd_dev
, cookie
);
2876 ret
= ceph_cls_lock(osdc
, &rbd_dev
->header_oid
, &rbd_dev
->header_oloc
,
2877 RBD_LOCK_NAME
, CEPH_CLS_LOCK_EXCLUSIVE
, cookie
,
2878 RBD_LOCK_TAG
, "", 0);
2882 rbd_dev
->lock_state
= RBD_LOCK_STATE_LOCKED
;
2883 __rbd_lock(rbd_dev
, cookie
);
2888 * lock_rwsem must be held for write
2890 static void rbd_unlock(struct rbd_device
*rbd_dev
)
2892 struct ceph_osd_client
*osdc
= &rbd_dev
->rbd_client
->client
->osdc
;
2895 WARN_ON(!__rbd_is_lock_owner(rbd_dev
) ||
2896 rbd_dev
->lock_cookie
[0] == '\0');
2898 ret
= ceph_cls_unlock(osdc
, &rbd_dev
->header_oid
, &rbd_dev
->header_oloc
,
2899 RBD_LOCK_NAME
, rbd_dev
->lock_cookie
);
2900 if (ret
&& ret
!= -ENOENT
)
2901 rbd_warn(rbd_dev
, "failed to unlock: %d", ret
);
2903 /* treat errors as the image is unlocked */
2904 rbd_dev
->lock_state
= RBD_LOCK_STATE_UNLOCKED
;
2905 rbd_dev
->lock_cookie
[0] = '\0';
2906 rbd_set_owner_cid(rbd_dev
, &rbd_empty_cid
);
2907 queue_work(rbd_dev
->task_wq
, &rbd_dev
->released_lock_work
);
2910 static int __rbd_notify_op_lock(struct rbd_device
*rbd_dev
,
2911 enum rbd_notify_op notify_op
,
2912 struct page
***preply_pages
,
2915 struct ceph_osd_client
*osdc
= &rbd_dev
->rbd_client
->client
->osdc
;
2916 struct rbd_client_id cid
= rbd_get_cid(rbd_dev
);
2917 char buf
[4 + 8 + 8 + CEPH_ENCODING_START_BLK_LEN
];
2918 int buf_size
= sizeof(buf
);
2921 dout("%s rbd_dev %p notify_op %d\n", __func__
, rbd_dev
, notify_op
);
2923 /* encode *LockPayload NotifyMessage (op + ClientId) */
2924 ceph_start_encoding(&p
, 2, 1, buf_size
- CEPH_ENCODING_START_BLK_LEN
);
2925 ceph_encode_32(&p
, notify_op
);
2926 ceph_encode_64(&p
, cid
.gid
);
2927 ceph_encode_64(&p
, cid
.handle
);
2929 return ceph_osdc_notify(osdc
, &rbd_dev
->header_oid
,
2930 &rbd_dev
->header_oloc
, buf
, buf_size
,
2931 RBD_NOTIFY_TIMEOUT
, preply_pages
, preply_len
);
2934 static void rbd_notify_op_lock(struct rbd_device
*rbd_dev
,
2935 enum rbd_notify_op notify_op
)
2937 struct page
**reply_pages
;
2940 __rbd_notify_op_lock(rbd_dev
, notify_op
, &reply_pages
, &reply_len
);
2941 ceph_release_page_vector(reply_pages
, calc_pages_for(0, reply_len
));
2944 static void rbd_notify_acquired_lock(struct work_struct
*work
)
2946 struct rbd_device
*rbd_dev
= container_of(work
, struct rbd_device
,
2947 acquired_lock_work
);
2949 rbd_notify_op_lock(rbd_dev
, RBD_NOTIFY_OP_ACQUIRED_LOCK
);
2952 static void rbd_notify_released_lock(struct work_struct
*work
)
2954 struct rbd_device
*rbd_dev
= container_of(work
, struct rbd_device
,
2955 released_lock_work
);
2957 rbd_notify_op_lock(rbd_dev
, RBD_NOTIFY_OP_RELEASED_LOCK
);
2960 static int rbd_request_lock(struct rbd_device
*rbd_dev
)
2962 struct page
**reply_pages
;
2964 bool lock_owner_responded
= false;
2967 dout("%s rbd_dev %p\n", __func__
, rbd_dev
);
2969 ret
= __rbd_notify_op_lock(rbd_dev
, RBD_NOTIFY_OP_REQUEST_LOCK
,
2970 &reply_pages
, &reply_len
);
2971 if (ret
&& ret
!= -ETIMEDOUT
) {
2972 rbd_warn(rbd_dev
, "failed to request lock: %d", ret
);
2976 if (reply_len
> 0 && reply_len
<= PAGE_SIZE
) {
2977 void *p
= page_address(reply_pages
[0]);
2978 void *const end
= p
+ reply_len
;
2981 ceph_decode_32_safe(&p
, end
, n
, e_inval
); /* num_acks */
2986 ceph_decode_need(&p
, end
, 8 + 8, e_inval
);
2987 p
+= 8 + 8; /* skip gid and cookie */
2989 ceph_decode_32_safe(&p
, end
, len
, e_inval
);
2993 if (lock_owner_responded
) {
2995 "duplicate lock owners detected");
3000 lock_owner_responded
= true;
3001 ret
= ceph_start_decoding(&p
, end
, 1, "ResponseMessage",
3005 "failed to decode ResponseMessage: %d",
3010 ret
= ceph_decode_32(&p
);
3014 if (!lock_owner_responded
) {
3015 rbd_warn(rbd_dev
, "no lock owners detected");
3020 ceph_release_page_vector(reply_pages
, calc_pages_for(0, reply_len
));
3028 static void wake_requests(struct rbd_device
*rbd_dev
, bool wake_all
)
3030 dout("%s rbd_dev %p wake_all %d\n", __func__
, rbd_dev
, wake_all
);
3032 cancel_delayed_work(&rbd_dev
->lock_dwork
);
3034 wake_up_all(&rbd_dev
->lock_waitq
);
3036 wake_up(&rbd_dev
->lock_waitq
);
3039 static int get_lock_owner_info(struct rbd_device
*rbd_dev
,
3040 struct ceph_locker
**lockers
, u32
*num_lockers
)
3042 struct ceph_osd_client
*osdc
= &rbd_dev
->rbd_client
->client
->osdc
;
3047 dout("%s rbd_dev %p\n", __func__
, rbd_dev
);
3049 ret
= ceph_cls_lock_info(osdc
, &rbd_dev
->header_oid
,
3050 &rbd_dev
->header_oloc
, RBD_LOCK_NAME
,
3051 &lock_type
, &lock_tag
, lockers
, num_lockers
);
3055 if (*num_lockers
== 0) {
3056 dout("%s rbd_dev %p no lockers detected\n", __func__
, rbd_dev
);
3060 if (strcmp(lock_tag
, RBD_LOCK_TAG
)) {
3061 rbd_warn(rbd_dev
, "locked by external mechanism, tag %s",
3067 if (lock_type
== CEPH_CLS_LOCK_SHARED
) {
3068 rbd_warn(rbd_dev
, "shared lock type detected");
3073 if (strncmp((*lockers
)[0].id
.cookie
, RBD_LOCK_COOKIE_PREFIX
,
3074 strlen(RBD_LOCK_COOKIE_PREFIX
))) {
3075 rbd_warn(rbd_dev
, "locked by external mechanism, cookie %s",
3076 (*lockers
)[0].id
.cookie
);
3086 static int find_watcher(struct rbd_device
*rbd_dev
,
3087 const struct ceph_locker
*locker
)
3089 struct ceph_osd_client
*osdc
= &rbd_dev
->rbd_client
->client
->osdc
;
3090 struct ceph_watch_item
*watchers
;
3096 ret
= ceph_osdc_list_watchers(osdc
, &rbd_dev
->header_oid
,
3097 &rbd_dev
->header_oloc
, &watchers
,
3102 sscanf(locker
->id
.cookie
, RBD_LOCK_COOKIE_PREFIX
" %llu", &cookie
);
3103 for (i
= 0; i
< num_watchers
; i
++) {
3104 if (!memcmp(&watchers
[i
].addr
, &locker
->info
.addr
,
3105 sizeof(locker
->info
.addr
)) &&
3106 watchers
[i
].cookie
== cookie
) {
3107 struct rbd_client_id cid
= {
3108 .gid
= le64_to_cpu(watchers
[i
].name
.num
),
3112 dout("%s rbd_dev %p found cid %llu-%llu\n", __func__
,
3113 rbd_dev
, cid
.gid
, cid
.handle
);
3114 rbd_set_owner_cid(rbd_dev
, &cid
);
3120 dout("%s rbd_dev %p no watchers\n", __func__
, rbd_dev
);
3128 * lock_rwsem must be held for write
3130 static int rbd_try_lock(struct rbd_device
*rbd_dev
)
3132 struct ceph_client
*client
= rbd_dev
->rbd_client
->client
;
3133 struct ceph_locker
*lockers
;
3138 ret
= rbd_lock(rbd_dev
);
3142 /* determine if the current lock holder is still alive */
3143 ret
= get_lock_owner_info(rbd_dev
, &lockers
, &num_lockers
);
3147 if (num_lockers
== 0)
3150 ret
= find_watcher(rbd_dev
, lockers
);
3153 ret
= 0; /* have to request lock */
3157 rbd_warn(rbd_dev
, "%s%llu seems dead, breaking lock",
3158 ENTITY_NAME(lockers
[0].id
.name
));
3160 ret
= ceph_monc_blacklist_add(&client
->monc
,
3161 &lockers
[0].info
.addr
);
3163 rbd_warn(rbd_dev
, "blacklist of %s%llu failed: %d",
3164 ENTITY_NAME(lockers
[0].id
.name
), ret
);
3168 ret
= ceph_cls_break_lock(&client
->osdc
, &rbd_dev
->header_oid
,
3169 &rbd_dev
->header_oloc
, RBD_LOCK_NAME
,
3170 lockers
[0].id
.cookie
,
3171 &lockers
[0].id
.name
);
3172 if (ret
&& ret
!= -ENOENT
)
3176 ceph_free_lockers(lockers
, num_lockers
);
3180 ceph_free_lockers(lockers
, num_lockers
);
3185 * ret is set only if lock_state is RBD_LOCK_STATE_UNLOCKED
3187 static enum rbd_lock_state
rbd_try_acquire_lock(struct rbd_device
*rbd_dev
,
3190 enum rbd_lock_state lock_state
;
3192 down_read(&rbd_dev
->lock_rwsem
);
3193 dout("%s rbd_dev %p read lock_state %d\n", __func__
, rbd_dev
,
3194 rbd_dev
->lock_state
);
3195 if (__rbd_is_lock_owner(rbd_dev
)) {
3196 lock_state
= rbd_dev
->lock_state
;
3197 up_read(&rbd_dev
->lock_rwsem
);
3201 up_read(&rbd_dev
->lock_rwsem
);
3202 down_write(&rbd_dev
->lock_rwsem
);
3203 dout("%s rbd_dev %p write lock_state %d\n", __func__
, rbd_dev
,
3204 rbd_dev
->lock_state
);
3205 if (!__rbd_is_lock_owner(rbd_dev
)) {
3206 *pret
= rbd_try_lock(rbd_dev
);
3208 rbd_warn(rbd_dev
, "failed to acquire lock: %d", *pret
);
3211 lock_state
= rbd_dev
->lock_state
;
3212 up_write(&rbd_dev
->lock_rwsem
);
3216 static void rbd_acquire_lock(struct work_struct
*work
)
3218 struct rbd_device
*rbd_dev
= container_of(to_delayed_work(work
),
3219 struct rbd_device
, lock_dwork
);
3220 enum rbd_lock_state lock_state
;
3223 dout("%s rbd_dev %p\n", __func__
, rbd_dev
);
3225 lock_state
= rbd_try_acquire_lock(rbd_dev
, &ret
);
3226 if (lock_state
!= RBD_LOCK_STATE_UNLOCKED
|| ret
== -EBLACKLISTED
) {
3227 if (lock_state
== RBD_LOCK_STATE_LOCKED
)
3228 wake_requests(rbd_dev
, true);
3229 dout("%s rbd_dev %p lock_state %d ret %d - done\n", __func__
,
3230 rbd_dev
, lock_state
, ret
);
3234 ret
= rbd_request_lock(rbd_dev
);
3235 if (ret
== -ETIMEDOUT
) {
3236 goto again
; /* treat this as a dead client */
3237 } else if (ret
== -EROFS
) {
3238 rbd_warn(rbd_dev
, "peer will not release lock");
3240 * If this is rbd_add_acquire_lock(), we want to fail
3241 * immediately -- reuse BLACKLISTED flag. Otherwise we
3244 if (!(rbd_dev
->disk
->flags
& GENHD_FL_UP
)) {
3245 set_bit(RBD_DEV_FLAG_BLACKLISTED
, &rbd_dev
->flags
);
3246 /* wake "rbd map --exclusive" process */
3247 wake_requests(rbd_dev
, false);
3249 } else if (ret
< 0) {
3250 rbd_warn(rbd_dev
, "error requesting lock: %d", ret
);
3251 mod_delayed_work(rbd_dev
->task_wq
, &rbd_dev
->lock_dwork
,
3255 * lock owner acked, but resend if we don't see them
3258 dout("%s rbd_dev %p requeueing lock_dwork\n", __func__
,
3260 mod_delayed_work(rbd_dev
->task_wq
, &rbd_dev
->lock_dwork
,
3261 msecs_to_jiffies(2 * RBD_NOTIFY_TIMEOUT
* MSEC_PER_SEC
));
3266 * lock_rwsem must be held for write
3268 static bool rbd_release_lock(struct rbd_device
*rbd_dev
)
3270 dout("%s rbd_dev %p read lock_state %d\n", __func__
, rbd_dev
,
3271 rbd_dev
->lock_state
);
3272 if (rbd_dev
->lock_state
!= RBD_LOCK_STATE_LOCKED
)
3275 rbd_dev
->lock_state
= RBD_LOCK_STATE_RELEASING
;
3276 downgrade_write(&rbd_dev
->lock_rwsem
);
3278 * Ensure that all in-flight IO is flushed.
3280 * FIXME: ceph_osdc_sync() flushes the entire OSD client, which
3281 * may be shared with other devices.
3283 ceph_osdc_sync(&rbd_dev
->rbd_client
->client
->osdc
);
3284 up_read(&rbd_dev
->lock_rwsem
);
3286 down_write(&rbd_dev
->lock_rwsem
);
3287 dout("%s rbd_dev %p write lock_state %d\n", __func__
, rbd_dev
,
3288 rbd_dev
->lock_state
);
3289 if (rbd_dev
->lock_state
!= RBD_LOCK_STATE_RELEASING
)
3292 rbd_unlock(rbd_dev
);
3294 * Give others a chance to grab the lock - we would re-acquire
3295 * almost immediately if we got new IO during ceph_osdc_sync()
3296 * otherwise. We need to ack our own notifications, so this
3297 * lock_dwork will be requeued from rbd_wait_state_locked()
3298 * after wake_requests() in rbd_handle_released_lock().
3300 cancel_delayed_work(&rbd_dev
->lock_dwork
);
3304 static void rbd_release_lock_work(struct work_struct
*work
)
3306 struct rbd_device
*rbd_dev
= container_of(work
, struct rbd_device
,
3309 down_write(&rbd_dev
->lock_rwsem
);
3310 rbd_release_lock(rbd_dev
);
3311 up_write(&rbd_dev
->lock_rwsem
);
3314 static void rbd_handle_acquired_lock(struct rbd_device
*rbd_dev
, u8 struct_v
,
3317 struct rbd_client_id cid
= { 0 };
3319 if (struct_v
>= 2) {
3320 cid
.gid
= ceph_decode_64(p
);
3321 cid
.handle
= ceph_decode_64(p
);
3324 dout("%s rbd_dev %p cid %llu-%llu\n", __func__
, rbd_dev
, cid
.gid
,
3326 if (!rbd_cid_equal(&cid
, &rbd_empty_cid
)) {
3327 down_write(&rbd_dev
->lock_rwsem
);
3328 if (rbd_cid_equal(&cid
, &rbd_dev
->owner_cid
)) {
3330 * we already know that the remote client is
3333 up_write(&rbd_dev
->lock_rwsem
);
3337 rbd_set_owner_cid(rbd_dev
, &cid
);
3338 downgrade_write(&rbd_dev
->lock_rwsem
);
3340 down_read(&rbd_dev
->lock_rwsem
);
3343 if (!__rbd_is_lock_owner(rbd_dev
))
3344 wake_requests(rbd_dev
, false);
3345 up_read(&rbd_dev
->lock_rwsem
);
3348 static void rbd_handle_released_lock(struct rbd_device
*rbd_dev
, u8 struct_v
,
3351 struct rbd_client_id cid
= { 0 };
3353 if (struct_v
>= 2) {
3354 cid
.gid
= ceph_decode_64(p
);
3355 cid
.handle
= ceph_decode_64(p
);
3358 dout("%s rbd_dev %p cid %llu-%llu\n", __func__
, rbd_dev
, cid
.gid
,
3360 if (!rbd_cid_equal(&cid
, &rbd_empty_cid
)) {
3361 down_write(&rbd_dev
->lock_rwsem
);
3362 if (!rbd_cid_equal(&cid
, &rbd_dev
->owner_cid
)) {
3363 dout("%s rbd_dev %p unexpected owner, cid %llu-%llu != owner_cid %llu-%llu\n",
3364 __func__
, rbd_dev
, cid
.gid
, cid
.handle
,
3365 rbd_dev
->owner_cid
.gid
, rbd_dev
->owner_cid
.handle
);
3366 up_write(&rbd_dev
->lock_rwsem
);
3370 rbd_set_owner_cid(rbd_dev
, &rbd_empty_cid
);
3371 downgrade_write(&rbd_dev
->lock_rwsem
);
3373 down_read(&rbd_dev
->lock_rwsem
);
3376 if (!__rbd_is_lock_owner(rbd_dev
))
3377 wake_requests(rbd_dev
, false);
3378 up_read(&rbd_dev
->lock_rwsem
);
3382 * Returns result for ResponseMessage to be encoded (<= 0), or 1 if no
3383 * ResponseMessage is needed.
3385 static int rbd_handle_request_lock(struct rbd_device
*rbd_dev
, u8 struct_v
,
3388 struct rbd_client_id my_cid
= rbd_get_cid(rbd_dev
);
3389 struct rbd_client_id cid
= { 0 };
3392 if (struct_v
>= 2) {
3393 cid
.gid
= ceph_decode_64(p
);
3394 cid
.handle
= ceph_decode_64(p
);
3397 dout("%s rbd_dev %p cid %llu-%llu\n", __func__
, rbd_dev
, cid
.gid
,
3399 if (rbd_cid_equal(&cid
, &my_cid
))
3402 down_read(&rbd_dev
->lock_rwsem
);
3403 if (__rbd_is_lock_owner(rbd_dev
)) {
3404 if (rbd_dev
->lock_state
== RBD_LOCK_STATE_LOCKED
&&
3405 rbd_cid_equal(&rbd_dev
->owner_cid
, &rbd_empty_cid
))
3409 * encode ResponseMessage(0) so the peer can detect
3414 if (rbd_dev
->lock_state
== RBD_LOCK_STATE_LOCKED
) {
3415 if (!rbd_dev
->opts
->exclusive
) {
3416 dout("%s rbd_dev %p queueing unlock_work\n",
3418 queue_work(rbd_dev
->task_wq
,
3419 &rbd_dev
->unlock_work
);
3421 /* refuse to release the lock */
3428 up_read(&rbd_dev
->lock_rwsem
);
3432 static void __rbd_acknowledge_notify(struct rbd_device
*rbd_dev
,
3433 u64 notify_id
, u64 cookie
, s32
*result
)
3435 struct ceph_osd_client
*osdc
= &rbd_dev
->rbd_client
->client
->osdc
;
3436 char buf
[4 + CEPH_ENCODING_START_BLK_LEN
];
3437 int buf_size
= sizeof(buf
);
3443 /* encode ResponseMessage */
3444 ceph_start_encoding(&p
, 1, 1,
3445 buf_size
- CEPH_ENCODING_START_BLK_LEN
);
3446 ceph_encode_32(&p
, *result
);
3451 ret
= ceph_osdc_notify_ack(osdc
, &rbd_dev
->header_oid
,
3452 &rbd_dev
->header_oloc
, notify_id
, cookie
,
3455 rbd_warn(rbd_dev
, "acknowledge_notify failed: %d", ret
);
3458 static void rbd_acknowledge_notify(struct rbd_device
*rbd_dev
, u64 notify_id
,
3461 dout("%s rbd_dev %p\n", __func__
, rbd_dev
);
3462 __rbd_acknowledge_notify(rbd_dev
, notify_id
, cookie
, NULL
);
3465 static void rbd_acknowledge_notify_result(struct rbd_device
*rbd_dev
,
3466 u64 notify_id
, u64 cookie
, s32 result
)
3468 dout("%s rbd_dev %p result %d\n", __func__
, rbd_dev
, result
);
3469 __rbd_acknowledge_notify(rbd_dev
, notify_id
, cookie
, &result
);
3472 static void rbd_watch_cb(void *arg
, u64 notify_id
, u64 cookie
,
3473 u64 notifier_id
, void *data
, size_t data_len
)
3475 struct rbd_device
*rbd_dev
= arg
;
3477 void *const end
= p
+ data_len
;
3483 dout("%s rbd_dev %p cookie %llu notify_id %llu data_len %zu\n",
3484 __func__
, rbd_dev
, cookie
, notify_id
, data_len
);
3486 ret
= ceph_start_decoding(&p
, end
, 1, "NotifyMessage",
3489 rbd_warn(rbd_dev
, "failed to decode NotifyMessage: %d",
3494 notify_op
= ceph_decode_32(&p
);
3496 /* legacy notification for header updates */
3497 notify_op
= RBD_NOTIFY_OP_HEADER_UPDATE
;
3501 dout("%s rbd_dev %p notify_op %u\n", __func__
, rbd_dev
, notify_op
);
3502 switch (notify_op
) {
3503 case RBD_NOTIFY_OP_ACQUIRED_LOCK
:
3504 rbd_handle_acquired_lock(rbd_dev
, struct_v
, &p
);
3505 rbd_acknowledge_notify(rbd_dev
, notify_id
, cookie
);
3507 case RBD_NOTIFY_OP_RELEASED_LOCK
:
3508 rbd_handle_released_lock(rbd_dev
, struct_v
, &p
);
3509 rbd_acknowledge_notify(rbd_dev
, notify_id
, cookie
);
3511 case RBD_NOTIFY_OP_REQUEST_LOCK
:
3512 ret
= rbd_handle_request_lock(rbd_dev
, struct_v
, &p
);
3514 rbd_acknowledge_notify_result(rbd_dev
, notify_id
,
3517 rbd_acknowledge_notify(rbd_dev
, notify_id
, cookie
);
3519 case RBD_NOTIFY_OP_HEADER_UPDATE
:
3520 ret
= rbd_dev_refresh(rbd_dev
);
3522 rbd_warn(rbd_dev
, "refresh failed: %d", ret
);
3524 rbd_acknowledge_notify(rbd_dev
, notify_id
, cookie
);
3527 if (rbd_is_lock_owner(rbd_dev
))
3528 rbd_acknowledge_notify_result(rbd_dev
, notify_id
,
3529 cookie
, -EOPNOTSUPP
);
3531 rbd_acknowledge_notify(rbd_dev
, notify_id
, cookie
);
3536 static void __rbd_unregister_watch(struct rbd_device
*rbd_dev
);
3538 static void rbd_watch_errcb(void *arg
, u64 cookie
, int err
)
3540 struct rbd_device
*rbd_dev
= arg
;
3542 rbd_warn(rbd_dev
, "encountered watch error: %d", err
);
3544 down_write(&rbd_dev
->lock_rwsem
);
3545 rbd_set_owner_cid(rbd_dev
, &rbd_empty_cid
);
3546 up_write(&rbd_dev
->lock_rwsem
);
3548 mutex_lock(&rbd_dev
->watch_mutex
);
3549 if (rbd_dev
->watch_state
== RBD_WATCH_STATE_REGISTERED
) {
3550 __rbd_unregister_watch(rbd_dev
);
3551 rbd_dev
->watch_state
= RBD_WATCH_STATE_ERROR
;
3553 queue_delayed_work(rbd_dev
->task_wq
, &rbd_dev
->watch_dwork
, 0);
3555 mutex_unlock(&rbd_dev
->watch_mutex
);
3559 * watch_mutex must be locked
3561 static int __rbd_register_watch(struct rbd_device
*rbd_dev
)
3563 struct ceph_osd_client
*osdc
= &rbd_dev
->rbd_client
->client
->osdc
;
3564 struct ceph_osd_linger_request
*handle
;
3566 rbd_assert(!rbd_dev
->watch_handle
);
3567 dout("%s rbd_dev %p\n", __func__
, rbd_dev
);
3569 handle
= ceph_osdc_watch(osdc
, &rbd_dev
->header_oid
,
3570 &rbd_dev
->header_oloc
, rbd_watch_cb
,
3571 rbd_watch_errcb
, rbd_dev
);
3573 return PTR_ERR(handle
);
3575 rbd_dev
->watch_handle
= handle
;
3580 * watch_mutex must be locked
3582 static void __rbd_unregister_watch(struct rbd_device
*rbd_dev
)
3584 struct ceph_osd_client
*osdc
= &rbd_dev
->rbd_client
->client
->osdc
;
3587 rbd_assert(rbd_dev
->watch_handle
);
3588 dout("%s rbd_dev %p\n", __func__
, rbd_dev
);
3590 ret
= ceph_osdc_unwatch(osdc
, rbd_dev
->watch_handle
);
3592 rbd_warn(rbd_dev
, "failed to unwatch: %d", ret
);
3594 rbd_dev
->watch_handle
= NULL
;
3597 static int rbd_register_watch(struct rbd_device
*rbd_dev
)
3601 mutex_lock(&rbd_dev
->watch_mutex
);
3602 rbd_assert(rbd_dev
->watch_state
== RBD_WATCH_STATE_UNREGISTERED
);
3603 ret
= __rbd_register_watch(rbd_dev
);
3607 rbd_dev
->watch_state
= RBD_WATCH_STATE_REGISTERED
;
3608 rbd_dev
->watch_cookie
= rbd_dev
->watch_handle
->linger_id
;
3611 mutex_unlock(&rbd_dev
->watch_mutex
);
3615 static void cancel_tasks_sync(struct rbd_device
*rbd_dev
)
3617 dout("%s rbd_dev %p\n", __func__
, rbd_dev
);
3619 cancel_work_sync(&rbd_dev
->acquired_lock_work
);
3620 cancel_work_sync(&rbd_dev
->released_lock_work
);
3621 cancel_delayed_work_sync(&rbd_dev
->lock_dwork
);
3622 cancel_work_sync(&rbd_dev
->unlock_work
);
3625 static void rbd_unregister_watch(struct rbd_device
*rbd_dev
)
3627 WARN_ON(waitqueue_active(&rbd_dev
->lock_waitq
));
3628 cancel_tasks_sync(rbd_dev
);
3630 mutex_lock(&rbd_dev
->watch_mutex
);
3631 if (rbd_dev
->watch_state
== RBD_WATCH_STATE_REGISTERED
)
3632 __rbd_unregister_watch(rbd_dev
);
3633 rbd_dev
->watch_state
= RBD_WATCH_STATE_UNREGISTERED
;
3634 mutex_unlock(&rbd_dev
->watch_mutex
);
3636 cancel_delayed_work_sync(&rbd_dev
->watch_dwork
);
3637 ceph_osdc_flush_notifies(&rbd_dev
->rbd_client
->client
->osdc
);
3641 * lock_rwsem must be held for write
3643 static void rbd_reacquire_lock(struct rbd_device
*rbd_dev
)
3645 struct ceph_osd_client
*osdc
= &rbd_dev
->rbd_client
->client
->osdc
;
3649 WARN_ON(rbd_dev
->lock_state
!= RBD_LOCK_STATE_LOCKED
);
3651 format_lock_cookie(rbd_dev
, cookie
);
3652 ret
= ceph_cls_set_cookie(osdc
, &rbd_dev
->header_oid
,
3653 &rbd_dev
->header_oloc
, RBD_LOCK_NAME
,
3654 CEPH_CLS_LOCK_EXCLUSIVE
, rbd_dev
->lock_cookie
,
3655 RBD_LOCK_TAG
, cookie
);
3657 if (ret
!= -EOPNOTSUPP
)
3658 rbd_warn(rbd_dev
, "failed to update lock cookie: %d",
3662 * Lock cookie cannot be updated on older OSDs, so do
3663 * a manual release and queue an acquire.
3665 if (rbd_release_lock(rbd_dev
))
3666 queue_delayed_work(rbd_dev
->task_wq
,
3667 &rbd_dev
->lock_dwork
, 0);
3669 __rbd_lock(rbd_dev
, cookie
);
3673 static void rbd_reregister_watch(struct work_struct
*work
)
3675 struct rbd_device
*rbd_dev
= container_of(to_delayed_work(work
),
3676 struct rbd_device
, watch_dwork
);
3679 dout("%s rbd_dev %p\n", __func__
, rbd_dev
);
3681 mutex_lock(&rbd_dev
->watch_mutex
);
3682 if (rbd_dev
->watch_state
!= RBD_WATCH_STATE_ERROR
) {
3683 mutex_unlock(&rbd_dev
->watch_mutex
);
3687 ret
= __rbd_register_watch(rbd_dev
);
3689 rbd_warn(rbd_dev
, "failed to reregister watch: %d", ret
);
3690 if (ret
== -EBLACKLISTED
|| ret
== -ENOENT
) {
3691 set_bit(RBD_DEV_FLAG_BLACKLISTED
, &rbd_dev
->flags
);
3692 wake_requests(rbd_dev
, true);
3694 queue_delayed_work(rbd_dev
->task_wq
,
3695 &rbd_dev
->watch_dwork
,
3698 mutex_unlock(&rbd_dev
->watch_mutex
);
3702 rbd_dev
->watch_state
= RBD_WATCH_STATE_REGISTERED
;
3703 rbd_dev
->watch_cookie
= rbd_dev
->watch_handle
->linger_id
;
3704 mutex_unlock(&rbd_dev
->watch_mutex
);
3706 down_write(&rbd_dev
->lock_rwsem
);
3707 if (rbd_dev
->lock_state
== RBD_LOCK_STATE_LOCKED
)
3708 rbd_reacquire_lock(rbd_dev
);
3709 up_write(&rbd_dev
->lock_rwsem
);
3711 ret
= rbd_dev_refresh(rbd_dev
);
3713 rbd_warn(rbd_dev
, "reregistration refresh failed: %d", ret
);
3717 * Synchronous osd object method call. Returns the number of bytes
3718 * returned in the outbound buffer, or a negative error code.
3720 static int rbd_obj_method_sync(struct rbd_device
*rbd_dev
,
3721 struct ceph_object_id
*oid
,
3722 struct ceph_object_locator
*oloc
,
3723 const char *method_name
,
3724 const void *outbound
,
3725 size_t outbound_size
,
3727 size_t inbound_size
)
3729 struct ceph_osd_client
*osdc
= &rbd_dev
->rbd_client
->client
->osdc
;
3730 struct page
*req_page
= NULL
;
3731 struct page
*reply_page
;
3735 * Method calls are ultimately read operations. The result
3736 * should placed into the inbound buffer provided. They
3737 * also supply outbound data--parameters for the object
3738 * method. Currently if this is present it will be a
3742 if (outbound_size
> PAGE_SIZE
)
3745 req_page
= alloc_page(GFP_KERNEL
);
3749 memcpy(page_address(req_page
), outbound
, outbound_size
);
3752 reply_page
= alloc_page(GFP_KERNEL
);
3755 __free_page(req_page
);
3759 ret
= ceph_osdc_call(osdc
, oid
, oloc
, RBD_DRV_NAME
, method_name
,
3760 CEPH_OSD_FLAG_READ
, req_page
, outbound_size
,
3761 reply_page
, &inbound_size
);
3763 memcpy(inbound
, page_address(reply_page
), inbound_size
);
3768 __free_page(req_page
);
3769 __free_page(reply_page
);
3774 * lock_rwsem must be held for read
3776 static int rbd_wait_state_locked(struct rbd_device
*rbd_dev
, bool may_acquire
)
3779 unsigned long timeout
;
3782 if (test_bit(RBD_DEV_FLAG_BLACKLISTED
, &rbd_dev
->flags
))
3783 return -EBLACKLISTED
;
3785 if (rbd_dev
->lock_state
== RBD_LOCK_STATE_LOCKED
)
3789 rbd_warn(rbd_dev
, "exclusive lock required");
3795 * Note the use of mod_delayed_work() in rbd_acquire_lock()
3796 * and cancel_delayed_work() in wake_requests().
3798 dout("%s rbd_dev %p queueing lock_dwork\n", __func__
, rbd_dev
);
3799 queue_delayed_work(rbd_dev
->task_wq
, &rbd_dev
->lock_dwork
, 0);
3800 prepare_to_wait_exclusive(&rbd_dev
->lock_waitq
, &wait
,
3801 TASK_UNINTERRUPTIBLE
);
3802 up_read(&rbd_dev
->lock_rwsem
);
3803 timeout
= schedule_timeout(ceph_timeout_jiffies(
3804 rbd_dev
->opts
->lock_timeout
));
3805 down_read(&rbd_dev
->lock_rwsem
);
3806 if (test_bit(RBD_DEV_FLAG_BLACKLISTED
, &rbd_dev
->flags
)) {
3807 ret
= -EBLACKLISTED
;
3811 rbd_warn(rbd_dev
, "timed out waiting for lock");
3815 } while (rbd_dev
->lock_state
!= RBD_LOCK_STATE_LOCKED
);
3817 finish_wait(&rbd_dev
->lock_waitq
, &wait
);
3821 static void rbd_queue_workfn(struct work_struct
*work
)
3823 struct request
*rq
= blk_mq_rq_from_pdu(work
);
3824 struct rbd_device
*rbd_dev
= rq
->q
->queuedata
;
3825 struct rbd_img_request
*img_request
;
3826 struct ceph_snap_context
*snapc
= NULL
;
3827 u64 offset
= (u64
)blk_rq_pos(rq
) << SECTOR_SHIFT
;
3828 u64 length
= blk_rq_bytes(rq
);
3829 enum obj_operation_type op_type
;
3831 bool must_be_locked
;
3834 switch (req_op(rq
)) {
3835 case REQ_OP_DISCARD
:
3836 op_type
= OBJ_OP_DISCARD
;
3838 case REQ_OP_WRITE_ZEROES
:
3839 op_type
= OBJ_OP_ZEROOUT
;
3842 op_type
= OBJ_OP_WRITE
;
3845 op_type
= OBJ_OP_READ
;
3848 dout("%s: non-fs request type %d\n", __func__
, req_op(rq
));
3853 /* Ignore/skip any zero-length requests */
3856 dout("%s: zero-length request\n", __func__
);
3861 rbd_assert(op_type
== OBJ_OP_READ
||
3862 rbd_dev
->spec
->snap_id
== CEPH_NOSNAP
);
3865 * Quit early if the mapped snapshot no longer exists. It's
3866 * still possible the snapshot will have disappeared by the
3867 * time our request arrives at the osd, but there's no sense in
3868 * sending it if we already know.
3870 if (!test_bit(RBD_DEV_FLAG_EXISTS
, &rbd_dev
->flags
)) {
3871 dout("request for non-existent snapshot");
3872 rbd_assert(rbd_dev
->spec
->snap_id
!= CEPH_NOSNAP
);
3877 if (offset
&& length
> U64_MAX
- offset
+ 1) {
3878 rbd_warn(rbd_dev
, "bad request range (%llu~%llu)", offset
,
3881 goto err_rq
; /* Shouldn't happen */
3884 blk_mq_start_request(rq
);
3886 down_read(&rbd_dev
->header_rwsem
);
3887 mapping_size
= rbd_dev
->mapping
.size
;
3888 if (op_type
!= OBJ_OP_READ
) {
3889 snapc
= rbd_dev
->header
.snapc
;
3890 ceph_get_snap_context(snapc
);
3892 up_read(&rbd_dev
->header_rwsem
);
3894 if (offset
+ length
> mapping_size
) {
3895 rbd_warn(rbd_dev
, "beyond EOD (%llu~%llu > %llu)", offset
,
3896 length
, mapping_size
);
3902 (rbd_dev
->header
.features
& RBD_FEATURE_EXCLUSIVE_LOCK
) &&
3903 (op_type
!= OBJ_OP_READ
|| rbd_dev
->opts
->lock_on_read
);
3904 if (must_be_locked
) {
3905 down_read(&rbd_dev
->lock_rwsem
);
3906 result
= rbd_wait_state_locked(rbd_dev
,
3907 !rbd_dev
->opts
->exclusive
);
3912 img_request
= rbd_img_request_create(rbd_dev
, op_type
, snapc
);
3917 img_request
->rq
= rq
;
3918 snapc
= NULL
; /* img_request consumes a ref */
3920 if (op_type
== OBJ_OP_DISCARD
|| op_type
== OBJ_OP_ZEROOUT
)
3921 result
= rbd_img_fill_nodata(img_request
, offset
, length
);
3923 result
= rbd_img_fill_from_bio(img_request
, offset
, length
,
3925 if (result
|| !img_request
->pending_count
)
3926 goto err_img_request
;
3928 rbd_img_request_submit(img_request
);
3930 up_read(&rbd_dev
->lock_rwsem
);
3934 rbd_img_request_put(img_request
);
3937 up_read(&rbd_dev
->lock_rwsem
);
3940 rbd_warn(rbd_dev
, "%s %llx at %llx result %d",
3941 obj_op_name(op_type
), length
, offset
, result
);
3942 ceph_put_snap_context(snapc
);
3944 blk_mq_end_request(rq
, errno_to_blk_status(result
));
3947 static blk_status_t
rbd_queue_rq(struct blk_mq_hw_ctx
*hctx
,
3948 const struct blk_mq_queue_data
*bd
)
3950 struct request
*rq
= bd
->rq
;
3951 struct work_struct
*work
= blk_mq_rq_to_pdu(rq
);
3953 queue_work(rbd_wq
, work
);
3957 static void rbd_free_disk(struct rbd_device
*rbd_dev
)
3959 blk_cleanup_queue(rbd_dev
->disk
->queue
);
3960 blk_mq_free_tag_set(&rbd_dev
->tag_set
);
3961 put_disk(rbd_dev
->disk
);
3962 rbd_dev
->disk
= NULL
;
3965 static int rbd_obj_read_sync(struct rbd_device
*rbd_dev
,
3966 struct ceph_object_id
*oid
,
3967 struct ceph_object_locator
*oloc
,
3968 void *buf
, int buf_len
)
3971 struct ceph_osd_client
*osdc
= &rbd_dev
->rbd_client
->client
->osdc
;
3972 struct ceph_osd_request
*req
;
3973 struct page
**pages
;
3974 int num_pages
= calc_pages_for(0, buf_len
);
3977 req
= ceph_osdc_alloc_request(osdc
, NULL
, 1, false, GFP_KERNEL
);
3981 ceph_oid_copy(&req
->r_base_oid
, oid
);
3982 ceph_oloc_copy(&req
->r_base_oloc
, oloc
);
3983 req
->r_flags
= CEPH_OSD_FLAG_READ
;
3985 pages
= ceph_alloc_page_vector(num_pages
, GFP_KERNEL
);
3986 if (IS_ERR(pages
)) {
3987 ret
= PTR_ERR(pages
);
3991 osd_req_op_extent_init(req
, 0, CEPH_OSD_OP_READ
, 0, buf_len
, 0, 0);
3992 osd_req_op_extent_osd_data_pages(req
, 0, pages
, buf_len
, 0, false,
3995 ret
= ceph_osdc_alloc_messages(req
, GFP_KERNEL
);
3999 ceph_osdc_start_request(osdc
, req
, false);
4000 ret
= ceph_osdc_wait_request(osdc
, req
);
4002 ceph_copy_from_page_vector(pages
, buf
, 0, ret
);
4005 ceph_osdc_put_request(req
);
4010 * Read the complete header for the given rbd device. On successful
4011 * return, the rbd_dev->header field will contain up-to-date
4012 * information about the image.
4014 static int rbd_dev_v1_header_info(struct rbd_device
*rbd_dev
)
4016 struct rbd_image_header_ondisk
*ondisk
= NULL
;
4023 * The complete header will include an array of its 64-bit
4024 * snapshot ids, followed by the names of those snapshots as
4025 * a contiguous block of NUL-terminated strings. Note that
4026 * the number of snapshots could change by the time we read
4027 * it in, in which case we re-read it.
4034 size
= sizeof (*ondisk
);
4035 size
+= snap_count
* sizeof (struct rbd_image_snap_ondisk
);
4037 ondisk
= kmalloc(size
, GFP_KERNEL
);
4041 ret
= rbd_obj_read_sync(rbd_dev
, &rbd_dev
->header_oid
,
4042 &rbd_dev
->header_oloc
, ondisk
, size
);
4045 if ((size_t)ret
< size
) {
4047 rbd_warn(rbd_dev
, "short header read (want %zd got %d)",
4051 if (!rbd_dev_ondisk_valid(ondisk
)) {
4053 rbd_warn(rbd_dev
, "invalid header");
4057 names_size
= le64_to_cpu(ondisk
->snap_names_len
);
4058 want_count
= snap_count
;
4059 snap_count
= le32_to_cpu(ondisk
->snap_count
);
4060 } while (snap_count
!= want_count
);
4062 ret
= rbd_header_from_disk(rbd_dev
, ondisk
);
4070 * Clear the rbd device's EXISTS flag if the snapshot it's mapped to
4071 * has disappeared from the (just updated) snapshot context.
4073 static void rbd_exists_validate(struct rbd_device
*rbd_dev
)
4077 if (!test_bit(RBD_DEV_FLAG_EXISTS
, &rbd_dev
->flags
))
4080 snap_id
= rbd_dev
->spec
->snap_id
;
4081 if (snap_id
== CEPH_NOSNAP
)
4084 if (rbd_dev_snap_index(rbd_dev
, snap_id
) == BAD_SNAP_INDEX
)
4085 clear_bit(RBD_DEV_FLAG_EXISTS
, &rbd_dev
->flags
);
4088 static void rbd_dev_update_size(struct rbd_device
*rbd_dev
)
4093 * If EXISTS is not set, rbd_dev->disk may be NULL, so don't
4094 * try to update its size. If REMOVING is set, updating size
4095 * is just useless work since the device can't be opened.
4097 if (test_bit(RBD_DEV_FLAG_EXISTS
, &rbd_dev
->flags
) &&
4098 !test_bit(RBD_DEV_FLAG_REMOVING
, &rbd_dev
->flags
)) {
4099 size
= (sector_t
)rbd_dev
->mapping
.size
/ SECTOR_SIZE
;
4100 dout("setting size to %llu sectors", (unsigned long long)size
);
4101 set_capacity(rbd_dev
->disk
, size
);
4102 revalidate_disk(rbd_dev
->disk
);
4106 static int rbd_dev_refresh(struct rbd_device
*rbd_dev
)
4111 down_write(&rbd_dev
->header_rwsem
);
4112 mapping_size
= rbd_dev
->mapping
.size
;
4114 ret
= rbd_dev_header_info(rbd_dev
);
4119 * If there is a parent, see if it has disappeared due to the
4120 * mapped image getting flattened.
4122 if (rbd_dev
->parent
) {
4123 ret
= rbd_dev_v2_parent_info(rbd_dev
);
4128 if (rbd_dev
->spec
->snap_id
== CEPH_NOSNAP
) {
4129 rbd_dev
->mapping
.size
= rbd_dev
->header
.image_size
;
4131 /* validate mapped snapshot's EXISTS flag */
4132 rbd_exists_validate(rbd_dev
);
4136 up_write(&rbd_dev
->header_rwsem
);
4137 if (!ret
&& mapping_size
!= rbd_dev
->mapping
.size
)
4138 rbd_dev_update_size(rbd_dev
);
4143 static int rbd_init_request(struct blk_mq_tag_set
*set
, struct request
*rq
,
4144 unsigned int hctx_idx
, unsigned int numa_node
)
4146 struct work_struct
*work
= blk_mq_rq_to_pdu(rq
);
4148 INIT_WORK(work
, rbd_queue_workfn
);
4152 static const struct blk_mq_ops rbd_mq_ops
= {
4153 .queue_rq
= rbd_queue_rq
,
4154 .init_request
= rbd_init_request
,
4157 static int rbd_init_disk(struct rbd_device
*rbd_dev
)
4159 struct gendisk
*disk
;
4160 struct request_queue
*q
;
4161 unsigned int objset_bytes
=
4162 rbd_dev
->layout
.object_size
* rbd_dev
->layout
.stripe_count
;
4165 /* create gendisk info */
4166 disk
= alloc_disk(single_major
?
4167 (1 << RBD_SINGLE_MAJOR_PART_SHIFT
) :
4168 RBD_MINORS_PER_MAJOR
);
4172 snprintf(disk
->disk_name
, sizeof(disk
->disk_name
), RBD_DRV_NAME
"%d",
4174 disk
->major
= rbd_dev
->major
;
4175 disk
->first_minor
= rbd_dev
->minor
;
4177 disk
->flags
|= GENHD_FL_EXT_DEVT
;
4178 disk
->fops
= &rbd_bd_ops
;
4179 disk
->private_data
= rbd_dev
;
4181 memset(&rbd_dev
->tag_set
, 0, sizeof(rbd_dev
->tag_set
));
4182 rbd_dev
->tag_set
.ops
= &rbd_mq_ops
;
4183 rbd_dev
->tag_set
.queue_depth
= rbd_dev
->opts
->queue_depth
;
4184 rbd_dev
->tag_set
.numa_node
= NUMA_NO_NODE
;
4185 rbd_dev
->tag_set
.flags
= BLK_MQ_F_SHOULD_MERGE
;
4186 rbd_dev
->tag_set
.nr_hw_queues
= 1;
4187 rbd_dev
->tag_set
.cmd_size
= sizeof(struct work_struct
);
4189 err
= blk_mq_alloc_tag_set(&rbd_dev
->tag_set
);
4193 q
= blk_mq_init_queue(&rbd_dev
->tag_set
);
4199 blk_queue_flag_set(QUEUE_FLAG_NONROT
, q
);
4200 /* QUEUE_FLAG_ADD_RANDOM is off by default for blk-mq */
4202 blk_queue_max_hw_sectors(q
, objset_bytes
>> SECTOR_SHIFT
);
4203 q
->limits
.max_sectors
= queue_max_hw_sectors(q
);
4204 blk_queue_max_segments(q
, USHRT_MAX
);
4205 blk_queue_max_segment_size(q
, UINT_MAX
);
4206 blk_queue_io_min(q
, objset_bytes
);
4207 blk_queue_io_opt(q
, objset_bytes
);
4209 if (rbd_dev
->opts
->trim
) {
4210 blk_queue_flag_set(QUEUE_FLAG_DISCARD
, q
);
4211 q
->limits
.discard_granularity
= objset_bytes
;
4212 blk_queue_max_discard_sectors(q
, objset_bytes
>> SECTOR_SHIFT
);
4213 blk_queue_max_write_zeroes_sectors(q
, objset_bytes
>> SECTOR_SHIFT
);
4216 if (!ceph_test_opt(rbd_dev
->rbd_client
->client
, NOCRC
))
4217 q
->backing_dev_info
->capabilities
|= BDI_CAP_STABLE_WRITES
;
4220 * disk_release() expects a queue ref from add_disk() and will
4221 * put it. Hold an extra ref until add_disk() is called.
4223 WARN_ON(!blk_get_queue(q
));
4225 q
->queuedata
= rbd_dev
;
4227 rbd_dev
->disk
= disk
;
4231 blk_mq_free_tag_set(&rbd_dev
->tag_set
);
4241 static struct rbd_device
*dev_to_rbd_dev(struct device
*dev
)
4243 return container_of(dev
, struct rbd_device
, dev
);
4246 static ssize_t
rbd_size_show(struct device
*dev
,
4247 struct device_attribute
*attr
, char *buf
)
4249 struct rbd_device
*rbd_dev
= dev_to_rbd_dev(dev
);
4251 return sprintf(buf
, "%llu\n",
4252 (unsigned long long)rbd_dev
->mapping
.size
);
4256 * Note this shows the features for whatever's mapped, which is not
4257 * necessarily the base image.
4259 static ssize_t
rbd_features_show(struct device
*dev
,
4260 struct device_attribute
*attr
, char *buf
)
4262 struct rbd_device
*rbd_dev
= dev_to_rbd_dev(dev
);
4264 return sprintf(buf
, "0x%016llx\n",
4265 (unsigned long long)rbd_dev
->mapping
.features
);
4268 static ssize_t
rbd_major_show(struct device
*dev
,
4269 struct device_attribute
*attr
, char *buf
)
4271 struct rbd_device
*rbd_dev
= dev_to_rbd_dev(dev
);
4274 return sprintf(buf
, "%d\n", rbd_dev
->major
);
4276 return sprintf(buf
, "(none)\n");
4279 static ssize_t
rbd_minor_show(struct device
*dev
,
4280 struct device_attribute
*attr
, char *buf
)
4282 struct rbd_device
*rbd_dev
= dev_to_rbd_dev(dev
);
4284 return sprintf(buf
, "%d\n", rbd_dev
->minor
);
4287 static ssize_t
rbd_client_addr_show(struct device
*dev
,
4288 struct device_attribute
*attr
, char *buf
)
4290 struct rbd_device
*rbd_dev
= dev_to_rbd_dev(dev
);
4291 struct ceph_entity_addr
*client_addr
=
4292 ceph_client_addr(rbd_dev
->rbd_client
->client
);
4294 return sprintf(buf
, "%pISpc/%u\n", &client_addr
->in_addr
,
4295 le32_to_cpu(client_addr
->nonce
));
4298 static ssize_t
rbd_client_id_show(struct device
*dev
,
4299 struct device_attribute
*attr
, char *buf
)
4301 struct rbd_device
*rbd_dev
= dev_to_rbd_dev(dev
);
4303 return sprintf(buf
, "client%lld\n",
4304 ceph_client_gid(rbd_dev
->rbd_client
->client
));
4307 static ssize_t
rbd_cluster_fsid_show(struct device
*dev
,
4308 struct device_attribute
*attr
, char *buf
)
4310 struct rbd_device
*rbd_dev
= dev_to_rbd_dev(dev
);
4312 return sprintf(buf
, "%pU\n", &rbd_dev
->rbd_client
->client
->fsid
);
4315 static ssize_t
rbd_config_info_show(struct device
*dev
,
4316 struct device_attribute
*attr
, char *buf
)
4318 struct rbd_device
*rbd_dev
= dev_to_rbd_dev(dev
);
4320 return sprintf(buf
, "%s\n", rbd_dev
->config_info
);
4323 static ssize_t
rbd_pool_show(struct device
*dev
,
4324 struct device_attribute
*attr
, char *buf
)
4326 struct rbd_device
*rbd_dev
= dev_to_rbd_dev(dev
);
4328 return sprintf(buf
, "%s\n", rbd_dev
->spec
->pool_name
);
4331 static ssize_t
rbd_pool_id_show(struct device
*dev
,
4332 struct device_attribute
*attr
, char *buf
)
4334 struct rbd_device
*rbd_dev
= dev_to_rbd_dev(dev
);
4336 return sprintf(buf
, "%llu\n",
4337 (unsigned long long) rbd_dev
->spec
->pool_id
);
4340 static ssize_t
rbd_pool_ns_show(struct device
*dev
,
4341 struct device_attribute
*attr
, char *buf
)
4343 struct rbd_device
*rbd_dev
= dev_to_rbd_dev(dev
);
4345 return sprintf(buf
, "%s\n", rbd_dev
->spec
->pool_ns
?: "");
4348 static ssize_t
rbd_name_show(struct device
*dev
,
4349 struct device_attribute
*attr
, char *buf
)
4351 struct rbd_device
*rbd_dev
= dev_to_rbd_dev(dev
);
4353 if (rbd_dev
->spec
->image_name
)
4354 return sprintf(buf
, "%s\n", rbd_dev
->spec
->image_name
);
4356 return sprintf(buf
, "(unknown)\n");
4359 static ssize_t
rbd_image_id_show(struct device
*dev
,
4360 struct device_attribute
*attr
, char *buf
)
4362 struct rbd_device
*rbd_dev
= dev_to_rbd_dev(dev
);
4364 return sprintf(buf
, "%s\n", rbd_dev
->spec
->image_id
);
4368 * Shows the name of the currently-mapped snapshot (or
4369 * RBD_SNAP_HEAD_NAME for the base image).
4371 static ssize_t
rbd_snap_show(struct device
*dev
,
4372 struct device_attribute
*attr
,
4375 struct rbd_device
*rbd_dev
= dev_to_rbd_dev(dev
);
4377 return sprintf(buf
, "%s\n", rbd_dev
->spec
->snap_name
);
4380 static ssize_t
rbd_snap_id_show(struct device
*dev
,
4381 struct device_attribute
*attr
, char *buf
)
4383 struct rbd_device
*rbd_dev
= dev_to_rbd_dev(dev
);
4385 return sprintf(buf
, "%llu\n", rbd_dev
->spec
->snap_id
);
4389 * For a v2 image, shows the chain of parent images, separated by empty
4390 * lines. For v1 images or if there is no parent, shows "(no parent
4393 static ssize_t
rbd_parent_show(struct device
*dev
,
4394 struct device_attribute
*attr
,
4397 struct rbd_device
*rbd_dev
= dev_to_rbd_dev(dev
);
4400 if (!rbd_dev
->parent
)
4401 return sprintf(buf
, "(no parent image)\n");
4403 for ( ; rbd_dev
->parent
; rbd_dev
= rbd_dev
->parent
) {
4404 struct rbd_spec
*spec
= rbd_dev
->parent_spec
;
4406 count
+= sprintf(&buf
[count
], "%s"
4407 "pool_id %llu\npool_name %s\n"
4409 "image_id %s\nimage_name %s\n"
4410 "snap_id %llu\nsnap_name %s\n"
4412 !count
? "" : "\n", /* first? */
4413 spec
->pool_id
, spec
->pool_name
,
4414 spec
->pool_ns
?: "",
4415 spec
->image_id
, spec
->image_name
?: "(unknown)",
4416 spec
->snap_id
, spec
->snap_name
,
4417 rbd_dev
->parent_overlap
);
4423 static ssize_t
rbd_image_refresh(struct device
*dev
,
4424 struct device_attribute
*attr
,
4428 struct rbd_device
*rbd_dev
= dev_to_rbd_dev(dev
);
4431 ret
= rbd_dev_refresh(rbd_dev
);
4438 static DEVICE_ATTR(size
, 0444, rbd_size_show
, NULL
);
4439 static DEVICE_ATTR(features
, 0444, rbd_features_show
, NULL
);
4440 static DEVICE_ATTR(major
, 0444, rbd_major_show
, NULL
);
4441 static DEVICE_ATTR(minor
, 0444, rbd_minor_show
, NULL
);
4442 static DEVICE_ATTR(client_addr
, 0444, rbd_client_addr_show
, NULL
);
4443 static DEVICE_ATTR(client_id
, 0444, rbd_client_id_show
, NULL
);
4444 static DEVICE_ATTR(cluster_fsid
, 0444, rbd_cluster_fsid_show
, NULL
);
4445 static DEVICE_ATTR(config_info
, 0400, rbd_config_info_show
, NULL
);
4446 static DEVICE_ATTR(pool
, 0444, rbd_pool_show
, NULL
);
4447 static DEVICE_ATTR(pool_id
, 0444, rbd_pool_id_show
, NULL
);
4448 static DEVICE_ATTR(pool_ns
, 0444, rbd_pool_ns_show
, NULL
);
4449 static DEVICE_ATTR(name
, 0444, rbd_name_show
, NULL
);
4450 static DEVICE_ATTR(image_id
, 0444, rbd_image_id_show
, NULL
);
4451 static DEVICE_ATTR(refresh
, 0200, NULL
, rbd_image_refresh
);
4452 static DEVICE_ATTR(current_snap
, 0444, rbd_snap_show
, NULL
);
4453 static DEVICE_ATTR(snap_id
, 0444, rbd_snap_id_show
, NULL
);
4454 static DEVICE_ATTR(parent
, 0444, rbd_parent_show
, NULL
);
4456 static struct attribute
*rbd_attrs
[] = {
4457 &dev_attr_size
.attr
,
4458 &dev_attr_features
.attr
,
4459 &dev_attr_major
.attr
,
4460 &dev_attr_minor
.attr
,
4461 &dev_attr_client_addr
.attr
,
4462 &dev_attr_client_id
.attr
,
4463 &dev_attr_cluster_fsid
.attr
,
4464 &dev_attr_config_info
.attr
,
4465 &dev_attr_pool
.attr
,
4466 &dev_attr_pool_id
.attr
,
4467 &dev_attr_pool_ns
.attr
,
4468 &dev_attr_name
.attr
,
4469 &dev_attr_image_id
.attr
,
4470 &dev_attr_current_snap
.attr
,
4471 &dev_attr_snap_id
.attr
,
4472 &dev_attr_parent
.attr
,
4473 &dev_attr_refresh
.attr
,
4477 static struct attribute_group rbd_attr_group
= {
4481 static const struct attribute_group
*rbd_attr_groups
[] = {
4486 static void rbd_dev_release(struct device
*dev
);
4488 static const struct device_type rbd_device_type
= {
4490 .groups
= rbd_attr_groups
,
4491 .release
= rbd_dev_release
,
4494 static struct rbd_spec
*rbd_spec_get(struct rbd_spec
*spec
)
4496 kref_get(&spec
->kref
);
4501 static void rbd_spec_free(struct kref
*kref
);
4502 static void rbd_spec_put(struct rbd_spec
*spec
)
4505 kref_put(&spec
->kref
, rbd_spec_free
);
4508 static struct rbd_spec
*rbd_spec_alloc(void)
4510 struct rbd_spec
*spec
;
4512 spec
= kzalloc(sizeof (*spec
), GFP_KERNEL
);
4516 spec
->pool_id
= CEPH_NOPOOL
;
4517 spec
->snap_id
= CEPH_NOSNAP
;
4518 kref_init(&spec
->kref
);
4523 static void rbd_spec_free(struct kref
*kref
)
4525 struct rbd_spec
*spec
= container_of(kref
, struct rbd_spec
, kref
);
4527 kfree(spec
->pool_name
);
4528 kfree(spec
->pool_ns
);
4529 kfree(spec
->image_id
);
4530 kfree(spec
->image_name
);
4531 kfree(spec
->snap_name
);
4535 static void rbd_dev_free(struct rbd_device
*rbd_dev
)
4537 WARN_ON(rbd_dev
->watch_state
!= RBD_WATCH_STATE_UNREGISTERED
);
4538 WARN_ON(rbd_dev
->lock_state
!= RBD_LOCK_STATE_UNLOCKED
);
4540 ceph_oid_destroy(&rbd_dev
->header_oid
);
4541 ceph_oloc_destroy(&rbd_dev
->header_oloc
);
4542 kfree(rbd_dev
->config_info
);
4544 rbd_put_client(rbd_dev
->rbd_client
);
4545 rbd_spec_put(rbd_dev
->spec
);
4546 kfree(rbd_dev
->opts
);
4550 static void rbd_dev_release(struct device
*dev
)
4552 struct rbd_device
*rbd_dev
= dev_to_rbd_dev(dev
);
4553 bool need_put
= !!rbd_dev
->opts
;
4556 destroy_workqueue(rbd_dev
->task_wq
);
4557 ida_simple_remove(&rbd_dev_id_ida
, rbd_dev
->dev_id
);
4560 rbd_dev_free(rbd_dev
);
4563 * This is racy, but way better than putting module outside of
4564 * the release callback. The race window is pretty small, so
4565 * doing something similar to dm (dm-builtin.c) is overkill.
4568 module_put(THIS_MODULE
);
4571 static struct rbd_device
*__rbd_dev_create(struct rbd_client
*rbdc
,
4572 struct rbd_spec
*spec
)
4574 struct rbd_device
*rbd_dev
;
4576 rbd_dev
= kzalloc(sizeof(*rbd_dev
), GFP_KERNEL
);
4580 spin_lock_init(&rbd_dev
->lock
);
4581 INIT_LIST_HEAD(&rbd_dev
->node
);
4582 init_rwsem(&rbd_dev
->header_rwsem
);
4584 rbd_dev
->header
.data_pool_id
= CEPH_NOPOOL
;
4585 ceph_oid_init(&rbd_dev
->header_oid
);
4586 rbd_dev
->header_oloc
.pool
= spec
->pool_id
;
4587 if (spec
->pool_ns
) {
4588 WARN_ON(!*spec
->pool_ns
);
4589 rbd_dev
->header_oloc
.pool_ns
=
4590 ceph_find_or_create_string(spec
->pool_ns
,
4591 strlen(spec
->pool_ns
));
4594 mutex_init(&rbd_dev
->watch_mutex
);
4595 rbd_dev
->watch_state
= RBD_WATCH_STATE_UNREGISTERED
;
4596 INIT_DELAYED_WORK(&rbd_dev
->watch_dwork
, rbd_reregister_watch
);
4598 init_rwsem(&rbd_dev
->lock_rwsem
);
4599 rbd_dev
->lock_state
= RBD_LOCK_STATE_UNLOCKED
;
4600 INIT_WORK(&rbd_dev
->acquired_lock_work
, rbd_notify_acquired_lock
);
4601 INIT_WORK(&rbd_dev
->released_lock_work
, rbd_notify_released_lock
);
4602 INIT_DELAYED_WORK(&rbd_dev
->lock_dwork
, rbd_acquire_lock
);
4603 INIT_WORK(&rbd_dev
->unlock_work
, rbd_release_lock_work
);
4604 init_waitqueue_head(&rbd_dev
->lock_waitq
);
4606 rbd_dev
->dev
.bus
= &rbd_bus_type
;
4607 rbd_dev
->dev
.type
= &rbd_device_type
;
4608 rbd_dev
->dev
.parent
= &rbd_root_dev
;
4609 device_initialize(&rbd_dev
->dev
);
4611 rbd_dev
->rbd_client
= rbdc
;
4612 rbd_dev
->spec
= spec
;
4618 * Create a mapping rbd_dev.
4620 static struct rbd_device
*rbd_dev_create(struct rbd_client
*rbdc
,
4621 struct rbd_spec
*spec
,
4622 struct rbd_options
*opts
)
4624 struct rbd_device
*rbd_dev
;
4626 rbd_dev
= __rbd_dev_create(rbdc
, spec
);
4630 rbd_dev
->opts
= opts
;
4632 /* get an id and fill in device name */
4633 rbd_dev
->dev_id
= ida_simple_get(&rbd_dev_id_ida
, 0,
4634 minor_to_rbd_dev_id(1 << MINORBITS
),
4636 if (rbd_dev
->dev_id
< 0)
4639 sprintf(rbd_dev
->name
, RBD_DRV_NAME
"%d", rbd_dev
->dev_id
);
4640 rbd_dev
->task_wq
= alloc_ordered_workqueue("%s-tasks", WQ_MEM_RECLAIM
,
4642 if (!rbd_dev
->task_wq
)
4645 /* we have a ref from do_rbd_add() */
4646 __module_get(THIS_MODULE
);
4648 dout("%s rbd_dev %p dev_id %d\n", __func__
, rbd_dev
, rbd_dev
->dev_id
);
4652 ida_simple_remove(&rbd_dev_id_ida
, rbd_dev
->dev_id
);
4654 rbd_dev_free(rbd_dev
);
4658 static void rbd_dev_destroy(struct rbd_device
*rbd_dev
)
4661 put_device(&rbd_dev
->dev
);
4665 * Get the size and object order for an image snapshot, or if
4666 * snap_id is CEPH_NOSNAP, gets this information for the base
4669 static int _rbd_dev_v2_snap_size(struct rbd_device
*rbd_dev
, u64 snap_id
,
4670 u8
*order
, u64
*snap_size
)
4672 __le64 snapid
= cpu_to_le64(snap_id
);
4677 } __attribute__ ((packed
)) size_buf
= { 0 };
4679 ret
= rbd_obj_method_sync(rbd_dev
, &rbd_dev
->header_oid
,
4680 &rbd_dev
->header_oloc
, "get_size",
4681 &snapid
, sizeof(snapid
),
4682 &size_buf
, sizeof(size_buf
));
4683 dout("%s: rbd_obj_method_sync returned %d\n", __func__
, ret
);
4686 if (ret
< sizeof (size_buf
))
4690 *order
= size_buf
.order
;
4691 dout(" order %u", (unsigned int)*order
);
4693 *snap_size
= le64_to_cpu(size_buf
.size
);
4695 dout(" snap_id 0x%016llx snap_size = %llu\n",
4696 (unsigned long long)snap_id
,
4697 (unsigned long long)*snap_size
);
4702 static int rbd_dev_v2_image_size(struct rbd_device
*rbd_dev
)
4704 return _rbd_dev_v2_snap_size(rbd_dev
, CEPH_NOSNAP
,
4705 &rbd_dev
->header
.obj_order
,
4706 &rbd_dev
->header
.image_size
);
4709 static int rbd_dev_v2_object_prefix(struct rbd_device
*rbd_dev
)
4715 reply_buf
= kzalloc(RBD_OBJ_PREFIX_LEN_MAX
, GFP_KERNEL
);
4719 ret
= rbd_obj_method_sync(rbd_dev
, &rbd_dev
->header_oid
,
4720 &rbd_dev
->header_oloc
, "get_object_prefix",
4721 NULL
, 0, reply_buf
, RBD_OBJ_PREFIX_LEN_MAX
);
4722 dout("%s: rbd_obj_method_sync returned %d\n", __func__
, ret
);
4727 rbd_dev
->header
.object_prefix
= ceph_extract_encoded_string(&p
,
4728 p
+ ret
, NULL
, GFP_NOIO
);
4731 if (IS_ERR(rbd_dev
->header
.object_prefix
)) {
4732 ret
= PTR_ERR(rbd_dev
->header
.object_prefix
);
4733 rbd_dev
->header
.object_prefix
= NULL
;
4735 dout(" object_prefix = %s\n", rbd_dev
->header
.object_prefix
);
4743 static int _rbd_dev_v2_snap_features(struct rbd_device
*rbd_dev
, u64 snap_id
,
4746 __le64 snapid
= cpu_to_le64(snap_id
);
4750 } __attribute__ ((packed
)) features_buf
= { 0 };
4754 ret
= rbd_obj_method_sync(rbd_dev
, &rbd_dev
->header_oid
,
4755 &rbd_dev
->header_oloc
, "get_features",
4756 &snapid
, sizeof(snapid
),
4757 &features_buf
, sizeof(features_buf
));
4758 dout("%s: rbd_obj_method_sync returned %d\n", __func__
, ret
);
4761 if (ret
< sizeof (features_buf
))
4764 unsup
= le64_to_cpu(features_buf
.incompat
) & ~RBD_FEATURES_SUPPORTED
;
4766 rbd_warn(rbd_dev
, "image uses unsupported features: 0x%llx",
4771 *snap_features
= le64_to_cpu(features_buf
.features
);
4773 dout(" snap_id 0x%016llx features = 0x%016llx incompat = 0x%016llx\n",
4774 (unsigned long long)snap_id
,
4775 (unsigned long long)*snap_features
,
4776 (unsigned long long)le64_to_cpu(features_buf
.incompat
));
4781 static int rbd_dev_v2_features(struct rbd_device
*rbd_dev
)
4783 return _rbd_dev_v2_snap_features(rbd_dev
, CEPH_NOSNAP
,
4784 &rbd_dev
->header
.features
);
4787 struct parent_image_info
{
4789 const char *pool_ns
;
4790 const char *image_id
;
4798 * The caller is responsible for @pii.
4800 static int decode_parent_image_spec(void **p
, void *end
,
4801 struct parent_image_info
*pii
)
4807 ret
= ceph_start_decoding(p
, end
, 1, "ParentImageSpec",
4808 &struct_v
, &struct_len
);
4812 ceph_decode_64_safe(p
, end
, pii
->pool_id
, e_inval
);
4813 pii
->pool_ns
= ceph_extract_encoded_string(p
, end
, NULL
, GFP_KERNEL
);
4814 if (IS_ERR(pii
->pool_ns
)) {
4815 ret
= PTR_ERR(pii
->pool_ns
);
4816 pii
->pool_ns
= NULL
;
4819 pii
->image_id
= ceph_extract_encoded_string(p
, end
, NULL
, GFP_KERNEL
);
4820 if (IS_ERR(pii
->image_id
)) {
4821 ret
= PTR_ERR(pii
->image_id
);
4822 pii
->image_id
= NULL
;
4825 ceph_decode_64_safe(p
, end
, pii
->snap_id
, e_inval
);
4832 static int __get_parent_info(struct rbd_device
*rbd_dev
,
4833 struct page
*req_page
,
4834 struct page
*reply_page
,
4835 struct parent_image_info
*pii
)
4837 struct ceph_osd_client
*osdc
= &rbd_dev
->rbd_client
->client
->osdc
;
4838 size_t reply_len
= PAGE_SIZE
;
4842 ret
= ceph_osdc_call(osdc
, &rbd_dev
->header_oid
, &rbd_dev
->header_oloc
,
4843 "rbd", "parent_get", CEPH_OSD_FLAG_READ
,
4844 req_page
, sizeof(u64
), reply_page
, &reply_len
);
4846 return ret
== -EOPNOTSUPP
? 1 : ret
;
4848 p
= page_address(reply_page
);
4849 end
= p
+ reply_len
;
4850 ret
= decode_parent_image_spec(&p
, end
, pii
);
4854 ret
= ceph_osdc_call(osdc
, &rbd_dev
->header_oid
, &rbd_dev
->header_oloc
,
4855 "rbd", "parent_overlap_get", CEPH_OSD_FLAG_READ
,
4856 req_page
, sizeof(u64
), reply_page
, &reply_len
);
4860 p
= page_address(reply_page
);
4861 end
= p
+ reply_len
;
4862 ceph_decode_8_safe(&p
, end
, pii
->has_overlap
, e_inval
);
4863 if (pii
->has_overlap
)
4864 ceph_decode_64_safe(&p
, end
, pii
->overlap
, e_inval
);
4873 * The caller is responsible for @pii.
4875 static int __get_parent_info_legacy(struct rbd_device
*rbd_dev
,
4876 struct page
*req_page
,
4877 struct page
*reply_page
,
4878 struct parent_image_info
*pii
)
4880 struct ceph_osd_client
*osdc
= &rbd_dev
->rbd_client
->client
->osdc
;
4881 size_t reply_len
= PAGE_SIZE
;
4885 ret
= ceph_osdc_call(osdc
, &rbd_dev
->header_oid
, &rbd_dev
->header_oloc
,
4886 "rbd", "get_parent", CEPH_OSD_FLAG_READ
,
4887 req_page
, sizeof(u64
), reply_page
, &reply_len
);
4891 p
= page_address(reply_page
);
4892 end
= p
+ reply_len
;
4893 ceph_decode_64_safe(&p
, end
, pii
->pool_id
, e_inval
);
4894 pii
->image_id
= ceph_extract_encoded_string(&p
, end
, NULL
, GFP_KERNEL
);
4895 if (IS_ERR(pii
->image_id
)) {
4896 ret
= PTR_ERR(pii
->image_id
);
4897 pii
->image_id
= NULL
;
4900 ceph_decode_64_safe(&p
, end
, pii
->snap_id
, e_inval
);
4901 pii
->has_overlap
= true;
4902 ceph_decode_64_safe(&p
, end
, pii
->overlap
, e_inval
);
4910 static int get_parent_info(struct rbd_device
*rbd_dev
,
4911 struct parent_image_info
*pii
)
4913 struct page
*req_page
, *reply_page
;
4917 req_page
= alloc_page(GFP_KERNEL
);
4921 reply_page
= alloc_page(GFP_KERNEL
);
4923 __free_page(req_page
);
4927 p
= page_address(req_page
);
4928 ceph_encode_64(&p
, rbd_dev
->spec
->snap_id
);
4929 ret
= __get_parent_info(rbd_dev
, req_page
, reply_page
, pii
);
4931 ret
= __get_parent_info_legacy(rbd_dev
, req_page
, reply_page
,
4934 __free_page(req_page
);
4935 __free_page(reply_page
);
4939 static int rbd_dev_v2_parent_info(struct rbd_device
*rbd_dev
)
4941 struct rbd_spec
*parent_spec
;
4942 struct parent_image_info pii
= { 0 };
4945 parent_spec
= rbd_spec_alloc();
4949 ret
= get_parent_info(rbd_dev
, &pii
);
4953 dout("%s pool_id %llu pool_ns %s image_id %s snap_id %llu has_overlap %d overlap %llu\n",
4954 __func__
, pii
.pool_id
, pii
.pool_ns
, pii
.image_id
, pii
.snap_id
,
4955 pii
.has_overlap
, pii
.overlap
);
4957 if (pii
.pool_id
== CEPH_NOPOOL
|| !pii
.has_overlap
) {
4959 * Either the parent never existed, or we have
4960 * record of it but the image got flattened so it no
4961 * longer has a parent. When the parent of a
4962 * layered image disappears we immediately set the
4963 * overlap to 0. The effect of this is that all new
4964 * requests will be treated as if the image had no
4967 * If !pii.has_overlap, the parent image spec is not
4968 * applicable. It's there to avoid duplication in each
4971 if (rbd_dev
->parent_overlap
) {
4972 rbd_dev
->parent_overlap
= 0;
4973 rbd_dev_parent_put(rbd_dev
);
4974 pr_info("%s: clone image has been flattened\n",
4975 rbd_dev
->disk
->disk_name
);
4978 goto out
; /* No parent? No problem. */
4981 /* The ceph file layout needs to fit pool id in 32 bits */
4984 if (pii
.pool_id
> (u64
)U32_MAX
) {
4985 rbd_warn(NULL
, "parent pool id too large (%llu > %u)",
4986 (unsigned long long)pii
.pool_id
, U32_MAX
);
4991 * The parent won't change (except when the clone is
4992 * flattened, already handled that). So we only need to
4993 * record the parent spec we have not already done so.
4995 if (!rbd_dev
->parent_spec
) {
4996 parent_spec
->pool_id
= pii
.pool_id
;
4997 if (pii
.pool_ns
&& *pii
.pool_ns
) {
4998 parent_spec
->pool_ns
= pii
.pool_ns
;
5001 parent_spec
->image_id
= pii
.image_id
;
5002 pii
.image_id
= NULL
;
5003 parent_spec
->snap_id
= pii
.snap_id
;
5005 rbd_dev
->parent_spec
= parent_spec
;
5006 parent_spec
= NULL
; /* rbd_dev now owns this */
5010 * We always update the parent overlap. If it's zero we issue
5011 * a warning, as we will proceed as if there was no parent.
5015 /* refresh, careful to warn just once */
5016 if (rbd_dev
->parent_overlap
)
5018 "clone now standalone (overlap became 0)");
5021 rbd_warn(rbd_dev
, "clone is standalone (overlap 0)");
5024 rbd_dev
->parent_overlap
= pii
.overlap
;
5030 kfree(pii
.image_id
);
5031 rbd_spec_put(parent_spec
);
5035 static int rbd_dev_v2_striping_info(struct rbd_device
*rbd_dev
)
5039 __le64 stripe_count
;
5040 } __attribute__ ((packed
)) striping_info_buf
= { 0 };
5041 size_t size
= sizeof (striping_info_buf
);
5045 ret
= rbd_obj_method_sync(rbd_dev
, &rbd_dev
->header_oid
,
5046 &rbd_dev
->header_oloc
, "get_stripe_unit_count",
5047 NULL
, 0, &striping_info_buf
, size
);
5048 dout("%s: rbd_obj_method_sync returned %d\n", __func__
, ret
);
5054 p
= &striping_info_buf
;
5055 rbd_dev
->header
.stripe_unit
= ceph_decode_64(&p
);
5056 rbd_dev
->header
.stripe_count
= ceph_decode_64(&p
);
5060 static int rbd_dev_v2_data_pool(struct rbd_device
*rbd_dev
)
5062 __le64 data_pool_id
;
5065 ret
= rbd_obj_method_sync(rbd_dev
, &rbd_dev
->header_oid
,
5066 &rbd_dev
->header_oloc
, "get_data_pool",
5067 NULL
, 0, &data_pool_id
, sizeof(data_pool_id
));
5070 if (ret
< sizeof(data_pool_id
))
5073 rbd_dev
->header
.data_pool_id
= le64_to_cpu(data_pool_id
);
5074 WARN_ON(rbd_dev
->header
.data_pool_id
== CEPH_NOPOOL
);
5078 static char *rbd_dev_image_name(struct rbd_device
*rbd_dev
)
5080 CEPH_DEFINE_OID_ONSTACK(oid
);
5081 size_t image_id_size
;
5086 void *reply_buf
= NULL
;
5088 char *image_name
= NULL
;
5091 rbd_assert(!rbd_dev
->spec
->image_name
);
5093 len
= strlen(rbd_dev
->spec
->image_id
);
5094 image_id_size
= sizeof (__le32
) + len
;
5095 image_id
= kmalloc(image_id_size
, GFP_KERNEL
);
5100 end
= image_id
+ image_id_size
;
5101 ceph_encode_string(&p
, end
, rbd_dev
->spec
->image_id
, (u32
)len
);
5103 size
= sizeof (__le32
) + RBD_IMAGE_NAME_LEN_MAX
;
5104 reply_buf
= kmalloc(size
, GFP_KERNEL
);
5108 ceph_oid_printf(&oid
, "%s", RBD_DIRECTORY
);
5109 ret
= rbd_obj_method_sync(rbd_dev
, &oid
, &rbd_dev
->header_oloc
,
5110 "dir_get_name", image_id
, image_id_size
,
5115 end
= reply_buf
+ ret
;
5117 image_name
= ceph_extract_encoded_string(&p
, end
, &len
, GFP_KERNEL
);
5118 if (IS_ERR(image_name
))
5121 dout("%s: name is %s len is %zd\n", __func__
, image_name
, len
);
5129 static u64
rbd_v1_snap_id_by_name(struct rbd_device
*rbd_dev
, const char *name
)
5131 struct ceph_snap_context
*snapc
= rbd_dev
->header
.snapc
;
5132 const char *snap_name
;
5135 /* Skip over names until we find the one we are looking for */
5137 snap_name
= rbd_dev
->header
.snap_names
;
5138 while (which
< snapc
->num_snaps
) {
5139 if (!strcmp(name
, snap_name
))
5140 return snapc
->snaps
[which
];
5141 snap_name
+= strlen(snap_name
) + 1;
5147 static u64
rbd_v2_snap_id_by_name(struct rbd_device
*rbd_dev
, const char *name
)
5149 struct ceph_snap_context
*snapc
= rbd_dev
->header
.snapc
;
5154 for (which
= 0; !found
&& which
< snapc
->num_snaps
; which
++) {
5155 const char *snap_name
;
5157 snap_id
= snapc
->snaps
[which
];
5158 snap_name
= rbd_dev_v2_snap_name(rbd_dev
, snap_id
);
5159 if (IS_ERR(snap_name
)) {
5160 /* ignore no-longer existing snapshots */
5161 if (PTR_ERR(snap_name
) == -ENOENT
)
5166 found
= !strcmp(name
, snap_name
);
5169 return found
? snap_id
: CEPH_NOSNAP
;
5173 * Assumes name is never RBD_SNAP_HEAD_NAME; returns CEPH_NOSNAP if
5174 * no snapshot by that name is found, or if an error occurs.
5176 static u64
rbd_snap_id_by_name(struct rbd_device
*rbd_dev
, const char *name
)
5178 if (rbd_dev
->image_format
== 1)
5179 return rbd_v1_snap_id_by_name(rbd_dev
, name
);
5181 return rbd_v2_snap_id_by_name(rbd_dev
, name
);
5185 * An image being mapped will have everything but the snap id.
5187 static int rbd_spec_fill_snap_id(struct rbd_device
*rbd_dev
)
5189 struct rbd_spec
*spec
= rbd_dev
->spec
;
5191 rbd_assert(spec
->pool_id
!= CEPH_NOPOOL
&& spec
->pool_name
);
5192 rbd_assert(spec
->image_id
&& spec
->image_name
);
5193 rbd_assert(spec
->snap_name
);
5195 if (strcmp(spec
->snap_name
, RBD_SNAP_HEAD_NAME
)) {
5198 snap_id
= rbd_snap_id_by_name(rbd_dev
, spec
->snap_name
);
5199 if (snap_id
== CEPH_NOSNAP
)
5202 spec
->snap_id
= snap_id
;
5204 spec
->snap_id
= CEPH_NOSNAP
;
5211 * A parent image will have all ids but none of the names.
5213 * All names in an rbd spec are dynamically allocated. It's OK if we
5214 * can't figure out the name for an image id.
5216 static int rbd_spec_fill_names(struct rbd_device
*rbd_dev
)
5218 struct ceph_osd_client
*osdc
= &rbd_dev
->rbd_client
->client
->osdc
;
5219 struct rbd_spec
*spec
= rbd_dev
->spec
;
5220 const char *pool_name
;
5221 const char *image_name
;
5222 const char *snap_name
;
5225 rbd_assert(spec
->pool_id
!= CEPH_NOPOOL
);
5226 rbd_assert(spec
->image_id
);
5227 rbd_assert(spec
->snap_id
!= CEPH_NOSNAP
);
5229 /* Get the pool name; we have to make our own copy of this */
5231 pool_name
= ceph_pg_pool_name_by_id(osdc
->osdmap
, spec
->pool_id
);
5233 rbd_warn(rbd_dev
, "no pool with id %llu", spec
->pool_id
);
5236 pool_name
= kstrdup(pool_name
, GFP_KERNEL
);
5240 /* Fetch the image name; tolerate failure here */
5242 image_name
= rbd_dev_image_name(rbd_dev
);
5244 rbd_warn(rbd_dev
, "unable to get image name");
5246 /* Fetch the snapshot name */
5248 snap_name
= rbd_snap_name(rbd_dev
, spec
->snap_id
);
5249 if (IS_ERR(snap_name
)) {
5250 ret
= PTR_ERR(snap_name
);
5254 spec
->pool_name
= pool_name
;
5255 spec
->image_name
= image_name
;
5256 spec
->snap_name
= snap_name
;
5266 static int rbd_dev_v2_snap_context(struct rbd_device
*rbd_dev
)
5275 struct ceph_snap_context
*snapc
;
5279 * We'll need room for the seq value (maximum snapshot id),
5280 * snapshot count, and array of that many snapshot ids.
5281 * For now we have a fixed upper limit on the number we're
5282 * prepared to receive.
5284 size
= sizeof (__le64
) + sizeof (__le32
) +
5285 RBD_MAX_SNAP_COUNT
* sizeof (__le64
);
5286 reply_buf
= kzalloc(size
, GFP_KERNEL
);
5290 ret
= rbd_obj_method_sync(rbd_dev
, &rbd_dev
->header_oid
,
5291 &rbd_dev
->header_oloc
, "get_snapcontext",
5292 NULL
, 0, reply_buf
, size
);
5293 dout("%s: rbd_obj_method_sync returned %d\n", __func__
, ret
);
5298 end
= reply_buf
+ ret
;
5300 ceph_decode_64_safe(&p
, end
, seq
, out
);
5301 ceph_decode_32_safe(&p
, end
, snap_count
, out
);
5304 * Make sure the reported number of snapshot ids wouldn't go
5305 * beyond the end of our buffer. But before checking that,
5306 * make sure the computed size of the snapshot context we
5307 * allocate is representable in a size_t.
5309 if (snap_count
> (SIZE_MAX
- sizeof (struct ceph_snap_context
))
5314 if (!ceph_has_room(&p
, end
, snap_count
* sizeof (__le64
)))
5318 snapc
= ceph_create_snap_context(snap_count
, GFP_KERNEL
);
5324 for (i
= 0; i
< snap_count
; i
++)
5325 snapc
->snaps
[i
] = ceph_decode_64(&p
);
5327 ceph_put_snap_context(rbd_dev
->header
.snapc
);
5328 rbd_dev
->header
.snapc
= snapc
;
5330 dout(" snap context seq = %llu, snap_count = %u\n",
5331 (unsigned long long)seq
, (unsigned int)snap_count
);
5338 static const char *rbd_dev_v2_snap_name(struct rbd_device
*rbd_dev
,
5349 size
= sizeof (__le32
) + RBD_MAX_SNAP_NAME_LEN
;
5350 reply_buf
= kmalloc(size
, GFP_KERNEL
);
5352 return ERR_PTR(-ENOMEM
);
5354 snapid
= cpu_to_le64(snap_id
);
5355 ret
= rbd_obj_method_sync(rbd_dev
, &rbd_dev
->header_oid
,
5356 &rbd_dev
->header_oloc
, "get_snapshot_name",
5357 &snapid
, sizeof(snapid
), reply_buf
, size
);
5358 dout("%s: rbd_obj_method_sync returned %d\n", __func__
, ret
);
5360 snap_name
= ERR_PTR(ret
);
5365 end
= reply_buf
+ ret
;
5366 snap_name
= ceph_extract_encoded_string(&p
, end
, NULL
, GFP_KERNEL
);
5367 if (IS_ERR(snap_name
))
5370 dout(" snap_id 0x%016llx snap_name = %s\n",
5371 (unsigned long long)snap_id
, snap_name
);
5378 static int rbd_dev_v2_header_info(struct rbd_device
*rbd_dev
)
5380 bool first_time
= rbd_dev
->header
.object_prefix
== NULL
;
5383 ret
= rbd_dev_v2_image_size(rbd_dev
);
5388 ret
= rbd_dev_v2_header_onetime(rbd_dev
);
5393 ret
= rbd_dev_v2_snap_context(rbd_dev
);
5394 if (ret
&& first_time
) {
5395 kfree(rbd_dev
->header
.object_prefix
);
5396 rbd_dev
->header
.object_prefix
= NULL
;
5402 static int rbd_dev_header_info(struct rbd_device
*rbd_dev
)
5404 rbd_assert(rbd_image_format_valid(rbd_dev
->image_format
));
5406 if (rbd_dev
->image_format
== 1)
5407 return rbd_dev_v1_header_info(rbd_dev
);
5409 return rbd_dev_v2_header_info(rbd_dev
);
5413 * Skips over white space at *buf, and updates *buf to point to the
5414 * first found non-space character (if any). Returns the length of
5415 * the token (string of non-white space characters) found. Note
5416 * that *buf must be terminated with '\0'.
5418 static inline size_t next_token(const char **buf
)
5421 * These are the characters that produce nonzero for
5422 * isspace() in the "C" and "POSIX" locales.
5424 const char *spaces
= " \f\n\r\t\v";
5426 *buf
+= strspn(*buf
, spaces
); /* Find start of token */
5428 return strcspn(*buf
, spaces
); /* Return token length */
5432 * Finds the next token in *buf, dynamically allocates a buffer big
5433 * enough to hold a copy of it, and copies the token into the new
5434 * buffer. The copy is guaranteed to be terminated with '\0'. Note
5435 * that a duplicate buffer is created even for a zero-length token.
5437 * Returns a pointer to the newly-allocated duplicate, or a null
5438 * pointer if memory for the duplicate was not available. If
5439 * the lenp argument is a non-null pointer, the length of the token
5440 * (not including the '\0') is returned in *lenp.
5442 * If successful, the *buf pointer will be updated to point beyond
5443 * the end of the found token.
5445 * Note: uses GFP_KERNEL for allocation.
5447 static inline char *dup_token(const char **buf
, size_t *lenp
)
5452 len
= next_token(buf
);
5453 dup
= kmemdup(*buf
, len
+ 1, GFP_KERNEL
);
5456 *(dup
+ len
) = '\0';
5466 * Parse the options provided for an "rbd add" (i.e., rbd image
5467 * mapping) request. These arrive via a write to /sys/bus/rbd/add,
5468 * and the data written is passed here via a NUL-terminated buffer.
5469 * Returns 0 if successful or an error code otherwise.
5471 * The information extracted from these options is recorded in
5472 * the other parameters which return dynamically-allocated
5475 * The address of a pointer that will refer to a ceph options
5476 * structure. Caller must release the returned pointer using
5477 * ceph_destroy_options() when it is no longer needed.
5479 * Address of an rbd options pointer. Fully initialized by
5480 * this function; caller must release with kfree().
5482 * Address of an rbd image specification pointer. Fully
5483 * initialized by this function based on parsed options.
5484 * Caller must release with rbd_spec_put().
5486 * The options passed take this form:
5487 * <mon_addrs> <options> <pool_name> <image_name> [<snap_id>]
5490 * A comma-separated list of one or more monitor addresses.
5491 * A monitor address is an ip address, optionally followed
5492 * by a port number (separated by a colon).
5493 * I.e.: ip1[:port1][,ip2[:port2]...]
5495 * A comma-separated list of ceph and/or rbd options.
5497 * The name of the rados pool containing the rbd image.
5499 * The name of the image in that pool to map.
5501 * An optional snapshot id. If provided, the mapping will
5502 * present data from the image at the time that snapshot was
5503 * created. The image head is used if no snapshot id is
5504 * provided. Snapshot mappings are always read-only.
5506 static int rbd_add_parse_args(const char *buf
,
5507 struct ceph_options
**ceph_opts
,
5508 struct rbd_options
**opts
,
5509 struct rbd_spec
**rbd_spec
)
5513 const char *mon_addrs
;
5515 size_t mon_addrs_size
;
5516 struct parse_rbd_opts_ctx pctx
= { 0 };
5517 struct ceph_options
*copts
;
5520 /* The first four tokens are required */
5522 len
= next_token(&buf
);
5524 rbd_warn(NULL
, "no monitor address(es) provided");
5528 mon_addrs_size
= len
+ 1;
5532 options
= dup_token(&buf
, NULL
);
5536 rbd_warn(NULL
, "no options provided");
5540 pctx
.spec
= rbd_spec_alloc();
5544 pctx
.spec
->pool_name
= dup_token(&buf
, NULL
);
5545 if (!pctx
.spec
->pool_name
)
5547 if (!*pctx
.spec
->pool_name
) {
5548 rbd_warn(NULL
, "no pool name provided");
5552 pctx
.spec
->image_name
= dup_token(&buf
, NULL
);
5553 if (!pctx
.spec
->image_name
)
5555 if (!*pctx
.spec
->image_name
) {
5556 rbd_warn(NULL
, "no image name provided");
5561 * Snapshot name is optional; default is to use "-"
5562 * (indicating the head/no snapshot).
5564 len
= next_token(&buf
);
5566 buf
= RBD_SNAP_HEAD_NAME
; /* No snapshot supplied */
5567 len
= sizeof (RBD_SNAP_HEAD_NAME
) - 1;
5568 } else if (len
> RBD_MAX_SNAP_NAME_LEN
) {
5569 ret
= -ENAMETOOLONG
;
5572 snap_name
= kmemdup(buf
, len
+ 1, GFP_KERNEL
);
5575 *(snap_name
+ len
) = '\0';
5576 pctx
.spec
->snap_name
= snap_name
;
5578 /* Initialize all rbd options to the defaults */
5580 pctx
.opts
= kzalloc(sizeof(*pctx
.opts
), GFP_KERNEL
);
5584 pctx
.opts
->read_only
= RBD_READ_ONLY_DEFAULT
;
5585 pctx
.opts
->queue_depth
= RBD_QUEUE_DEPTH_DEFAULT
;
5586 pctx
.opts
->alloc_size
= RBD_ALLOC_SIZE_DEFAULT
;
5587 pctx
.opts
->lock_timeout
= RBD_LOCK_TIMEOUT_DEFAULT
;
5588 pctx
.opts
->lock_on_read
= RBD_LOCK_ON_READ_DEFAULT
;
5589 pctx
.opts
->exclusive
= RBD_EXCLUSIVE_DEFAULT
;
5590 pctx
.opts
->trim
= RBD_TRIM_DEFAULT
;
5592 copts
= ceph_parse_options(options
, mon_addrs
,
5593 mon_addrs
+ mon_addrs_size
- 1,
5594 parse_rbd_opts_token
, &pctx
);
5595 if (IS_ERR(copts
)) {
5596 ret
= PTR_ERR(copts
);
5603 *rbd_spec
= pctx
.spec
;
5610 rbd_spec_put(pctx
.spec
);
5616 static void rbd_dev_image_unlock(struct rbd_device
*rbd_dev
)
5618 down_write(&rbd_dev
->lock_rwsem
);
5619 if (__rbd_is_lock_owner(rbd_dev
))
5620 rbd_unlock(rbd_dev
);
5621 up_write(&rbd_dev
->lock_rwsem
);
5624 static int rbd_add_acquire_lock(struct rbd_device
*rbd_dev
)
5628 if (!(rbd_dev
->header
.features
& RBD_FEATURE_EXCLUSIVE_LOCK
)) {
5629 rbd_warn(rbd_dev
, "exclusive-lock feature is not enabled");
5633 /* FIXME: "rbd map --exclusive" should be in interruptible */
5634 down_read(&rbd_dev
->lock_rwsem
);
5635 ret
= rbd_wait_state_locked(rbd_dev
, true);
5636 up_read(&rbd_dev
->lock_rwsem
);
5638 rbd_warn(rbd_dev
, "failed to acquire exclusive lock");
5646 * An rbd format 2 image has a unique identifier, distinct from the
5647 * name given to it by the user. Internally, that identifier is
5648 * what's used to specify the names of objects related to the image.
5650 * A special "rbd id" object is used to map an rbd image name to its
5651 * id. If that object doesn't exist, then there is no v2 rbd image
5652 * with the supplied name.
5654 * This function will record the given rbd_dev's image_id field if
5655 * it can be determined, and in that case will return 0. If any
5656 * errors occur a negative errno will be returned and the rbd_dev's
5657 * image_id field will be unchanged (and should be NULL).
5659 static int rbd_dev_image_id(struct rbd_device
*rbd_dev
)
5663 CEPH_DEFINE_OID_ONSTACK(oid
);
5668 * When probing a parent image, the image id is already
5669 * known (and the image name likely is not). There's no
5670 * need to fetch the image id again in this case. We
5671 * do still need to set the image format though.
5673 if (rbd_dev
->spec
->image_id
) {
5674 rbd_dev
->image_format
= *rbd_dev
->spec
->image_id
? 2 : 1;
5680 * First, see if the format 2 image id file exists, and if
5681 * so, get the image's persistent id from it.
5683 ret
= ceph_oid_aprintf(&oid
, GFP_KERNEL
, "%s%s", RBD_ID_PREFIX
,
5684 rbd_dev
->spec
->image_name
);
5688 dout("rbd id object name is %s\n", oid
.name
);
5690 /* Response will be an encoded string, which includes a length */
5692 size
= sizeof (__le32
) + RBD_IMAGE_ID_LEN_MAX
;
5693 response
= kzalloc(size
, GFP_NOIO
);
5699 /* If it doesn't exist we'll assume it's a format 1 image */
5701 ret
= rbd_obj_method_sync(rbd_dev
, &oid
, &rbd_dev
->header_oloc
,
5703 response
, RBD_IMAGE_ID_LEN_MAX
);
5704 dout("%s: rbd_obj_method_sync returned %d\n", __func__
, ret
);
5705 if (ret
== -ENOENT
) {
5706 image_id
= kstrdup("", GFP_KERNEL
);
5707 ret
= image_id
? 0 : -ENOMEM
;
5709 rbd_dev
->image_format
= 1;
5710 } else if (ret
>= 0) {
5713 image_id
= ceph_extract_encoded_string(&p
, p
+ ret
,
5715 ret
= PTR_ERR_OR_ZERO(image_id
);
5717 rbd_dev
->image_format
= 2;
5721 rbd_dev
->spec
->image_id
= image_id
;
5722 dout("image_id is %s\n", image_id
);
5726 ceph_oid_destroy(&oid
);
5731 * Undo whatever state changes are made by v1 or v2 header info
5734 static void rbd_dev_unprobe(struct rbd_device
*rbd_dev
)
5736 struct rbd_image_header
*header
;
5738 rbd_dev_parent_put(rbd_dev
);
5740 /* Free dynamic fields from the header, then zero it out */
5742 header
= &rbd_dev
->header
;
5743 ceph_put_snap_context(header
->snapc
);
5744 kfree(header
->snap_sizes
);
5745 kfree(header
->snap_names
);
5746 kfree(header
->object_prefix
);
5747 memset(header
, 0, sizeof (*header
));
5750 static int rbd_dev_v2_header_onetime(struct rbd_device
*rbd_dev
)
5754 ret
= rbd_dev_v2_object_prefix(rbd_dev
);
5759 * Get the and check features for the image. Currently the
5760 * features are assumed to never change.
5762 ret
= rbd_dev_v2_features(rbd_dev
);
5766 /* If the image supports fancy striping, get its parameters */
5768 if (rbd_dev
->header
.features
& RBD_FEATURE_STRIPINGV2
) {
5769 ret
= rbd_dev_v2_striping_info(rbd_dev
);
5774 if (rbd_dev
->header
.features
& RBD_FEATURE_DATA_POOL
) {
5775 ret
= rbd_dev_v2_data_pool(rbd_dev
);
5780 rbd_init_layout(rbd_dev
);
5784 rbd_dev
->header
.features
= 0;
5785 kfree(rbd_dev
->header
.object_prefix
);
5786 rbd_dev
->header
.object_prefix
= NULL
;
5791 * @depth is rbd_dev_image_probe() -> rbd_dev_probe_parent() ->
5792 * rbd_dev_image_probe() recursion depth, which means it's also the
5793 * length of the already discovered part of the parent chain.
5795 static int rbd_dev_probe_parent(struct rbd_device
*rbd_dev
, int depth
)
5797 struct rbd_device
*parent
= NULL
;
5800 if (!rbd_dev
->parent_spec
)
5803 if (++depth
> RBD_MAX_PARENT_CHAIN_LEN
) {
5804 pr_info("parent chain is too long (%d)\n", depth
);
5809 parent
= __rbd_dev_create(rbd_dev
->rbd_client
, rbd_dev
->parent_spec
);
5816 * Images related by parent/child relationships always share
5817 * rbd_client and spec/parent_spec, so bump their refcounts.
5819 __rbd_get_client(rbd_dev
->rbd_client
);
5820 rbd_spec_get(rbd_dev
->parent_spec
);
5822 ret
= rbd_dev_image_probe(parent
, depth
);
5826 rbd_dev
->parent
= parent
;
5827 atomic_set(&rbd_dev
->parent_ref
, 1);
5831 rbd_dev_unparent(rbd_dev
);
5832 rbd_dev_destroy(parent
);
5836 static void rbd_dev_device_release(struct rbd_device
*rbd_dev
)
5838 clear_bit(RBD_DEV_FLAG_EXISTS
, &rbd_dev
->flags
);
5839 rbd_dev_mapping_clear(rbd_dev
);
5840 rbd_free_disk(rbd_dev
);
5842 unregister_blkdev(rbd_dev
->major
, rbd_dev
->name
);
5846 * rbd_dev->header_rwsem must be locked for write and will be unlocked
5849 static int rbd_dev_device_setup(struct rbd_device
*rbd_dev
)
5853 /* Record our major and minor device numbers. */
5855 if (!single_major
) {
5856 ret
= register_blkdev(0, rbd_dev
->name
);
5858 goto err_out_unlock
;
5860 rbd_dev
->major
= ret
;
5863 rbd_dev
->major
= rbd_major
;
5864 rbd_dev
->minor
= rbd_dev_id_to_minor(rbd_dev
->dev_id
);
5867 /* Set up the blkdev mapping. */
5869 ret
= rbd_init_disk(rbd_dev
);
5871 goto err_out_blkdev
;
5873 ret
= rbd_dev_mapping_set(rbd_dev
);
5877 set_capacity(rbd_dev
->disk
, rbd_dev
->mapping
.size
/ SECTOR_SIZE
);
5878 set_disk_ro(rbd_dev
->disk
, rbd_dev
->opts
->read_only
);
5880 ret
= dev_set_name(&rbd_dev
->dev
, "%d", rbd_dev
->dev_id
);
5882 goto err_out_mapping
;
5884 set_bit(RBD_DEV_FLAG_EXISTS
, &rbd_dev
->flags
);
5885 up_write(&rbd_dev
->header_rwsem
);
5889 rbd_dev_mapping_clear(rbd_dev
);
5891 rbd_free_disk(rbd_dev
);
5894 unregister_blkdev(rbd_dev
->major
, rbd_dev
->name
);
5896 up_write(&rbd_dev
->header_rwsem
);
5900 static int rbd_dev_header_name(struct rbd_device
*rbd_dev
)
5902 struct rbd_spec
*spec
= rbd_dev
->spec
;
5905 /* Record the header object name for this rbd image. */
5907 rbd_assert(rbd_image_format_valid(rbd_dev
->image_format
));
5908 if (rbd_dev
->image_format
== 1)
5909 ret
= ceph_oid_aprintf(&rbd_dev
->header_oid
, GFP_KERNEL
, "%s%s",
5910 spec
->image_name
, RBD_SUFFIX
);
5912 ret
= ceph_oid_aprintf(&rbd_dev
->header_oid
, GFP_KERNEL
, "%s%s",
5913 RBD_HEADER_PREFIX
, spec
->image_id
);
5918 static void rbd_dev_image_release(struct rbd_device
*rbd_dev
)
5920 rbd_dev_unprobe(rbd_dev
);
5922 rbd_unregister_watch(rbd_dev
);
5923 rbd_dev
->image_format
= 0;
5924 kfree(rbd_dev
->spec
->image_id
);
5925 rbd_dev
->spec
->image_id
= NULL
;
5929 * Probe for the existence of the header object for the given rbd
5930 * device. If this image is the one being mapped (i.e., not a
5931 * parent), initiate a watch on its header object before using that
5932 * object to get detailed information about the rbd image.
5934 static int rbd_dev_image_probe(struct rbd_device
*rbd_dev
, int depth
)
5939 * Get the id from the image id object. Unless there's an
5940 * error, rbd_dev->spec->image_id will be filled in with
5941 * a dynamically-allocated string, and rbd_dev->image_format
5942 * will be set to either 1 or 2.
5944 ret
= rbd_dev_image_id(rbd_dev
);
5948 ret
= rbd_dev_header_name(rbd_dev
);
5950 goto err_out_format
;
5953 ret
= rbd_register_watch(rbd_dev
);
5956 pr_info("image %s/%s%s%s does not exist\n",
5957 rbd_dev
->spec
->pool_name
,
5958 rbd_dev
->spec
->pool_ns
?: "",
5959 rbd_dev
->spec
->pool_ns
? "/" : "",
5960 rbd_dev
->spec
->image_name
);
5961 goto err_out_format
;
5965 ret
= rbd_dev_header_info(rbd_dev
);
5970 * If this image is the one being mapped, we have pool name and
5971 * id, image name and id, and snap name - need to fill snap id.
5972 * Otherwise this is a parent image, identified by pool, image
5973 * and snap ids - need to fill in names for those ids.
5976 ret
= rbd_spec_fill_snap_id(rbd_dev
);
5978 ret
= rbd_spec_fill_names(rbd_dev
);
5981 pr_info("snap %s/%s%s%s@%s does not exist\n",
5982 rbd_dev
->spec
->pool_name
,
5983 rbd_dev
->spec
->pool_ns
?: "",
5984 rbd_dev
->spec
->pool_ns
? "/" : "",
5985 rbd_dev
->spec
->image_name
,
5986 rbd_dev
->spec
->snap_name
);
5990 if (rbd_dev
->header
.features
& RBD_FEATURE_LAYERING
) {
5991 ret
= rbd_dev_v2_parent_info(rbd_dev
);
5996 ret
= rbd_dev_probe_parent(rbd_dev
, depth
);
6000 dout("discovered format %u image, header name is %s\n",
6001 rbd_dev
->image_format
, rbd_dev
->header_oid
.name
);
6005 rbd_dev_unprobe(rbd_dev
);
6008 rbd_unregister_watch(rbd_dev
);
6010 rbd_dev
->image_format
= 0;
6011 kfree(rbd_dev
->spec
->image_id
);
6012 rbd_dev
->spec
->image_id
= NULL
;
6016 static ssize_t
do_rbd_add(struct bus_type
*bus
,
6020 struct rbd_device
*rbd_dev
= NULL
;
6021 struct ceph_options
*ceph_opts
= NULL
;
6022 struct rbd_options
*rbd_opts
= NULL
;
6023 struct rbd_spec
*spec
= NULL
;
6024 struct rbd_client
*rbdc
;
6027 if (!try_module_get(THIS_MODULE
))
6030 /* parse add command */
6031 rc
= rbd_add_parse_args(buf
, &ceph_opts
, &rbd_opts
, &spec
);
6035 rbdc
= rbd_get_client(ceph_opts
);
6042 rc
= ceph_pg_poolid_by_name(rbdc
->client
->osdc
.osdmap
, spec
->pool_name
);
6045 pr_info("pool %s does not exist\n", spec
->pool_name
);
6046 goto err_out_client
;
6048 spec
->pool_id
= (u64
)rc
;
6050 rbd_dev
= rbd_dev_create(rbdc
, spec
, rbd_opts
);
6053 goto err_out_client
;
6055 rbdc
= NULL
; /* rbd_dev now owns this */
6056 spec
= NULL
; /* rbd_dev now owns this */
6057 rbd_opts
= NULL
; /* rbd_dev now owns this */
6059 rbd_dev
->config_info
= kstrdup(buf
, GFP_KERNEL
);
6060 if (!rbd_dev
->config_info
) {
6062 goto err_out_rbd_dev
;
6065 down_write(&rbd_dev
->header_rwsem
);
6066 rc
= rbd_dev_image_probe(rbd_dev
, 0);
6068 up_write(&rbd_dev
->header_rwsem
);
6069 goto err_out_rbd_dev
;
6072 /* If we are mapping a snapshot it must be marked read-only */
6073 if (rbd_dev
->spec
->snap_id
!= CEPH_NOSNAP
)
6074 rbd_dev
->opts
->read_only
= true;
6076 if (rbd_dev
->opts
->alloc_size
> rbd_dev
->layout
.object_size
) {
6077 rbd_warn(rbd_dev
, "alloc_size adjusted to %u",
6078 rbd_dev
->layout
.object_size
);
6079 rbd_dev
->opts
->alloc_size
= rbd_dev
->layout
.object_size
;
6082 rc
= rbd_dev_device_setup(rbd_dev
);
6084 goto err_out_image_probe
;
6086 if (rbd_dev
->opts
->exclusive
) {
6087 rc
= rbd_add_acquire_lock(rbd_dev
);
6089 goto err_out_device_setup
;
6092 /* Everything's ready. Announce the disk to the world. */
6094 rc
= device_add(&rbd_dev
->dev
);
6096 goto err_out_image_lock
;
6098 add_disk(rbd_dev
->disk
);
6099 /* see rbd_init_disk() */
6100 blk_put_queue(rbd_dev
->disk
->queue
);
6102 spin_lock(&rbd_dev_list_lock
);
6103 list_add_tail(&rbd_dev
->node
, &rbd_dev_list
);
6104 spin_unlock(&rbd_dev_list_lock
);
6106 pr_info("%s: capacity %llu features 0x%llx\n", rbd_dev
->disk
->disk_name
,
6107 (unsigned long long)get_capacity(rbd_dev
->disk
) << SECTOR_SHIFT
,
6108 rbd_dev
->header
.features
);
6111 module_put(THIS_MODULE
);
6115 rbd_dev_image_unlock(rbd_dev
);
6116 err_out_device_setup
:
6117 rbd_dev_device_release(rbd_dev
);
6118 err_out_image_probe
:
6119 rbd_dev_image_release(rbd_dev
);
6121 rbd_dev_destroy(rbd_dev
);
6123 rbd_put_client(rbdc
);
6130 static ssize_t
add_store(struct bus_type
*bus
, const char *buf
, size_t count
)
6135 return do_rbd_add(bus
, buf
, count
);
6138 static ssize_t
add_single_major_store(struct bus_type
*bus
, const char *buf
,
6141 return do_rbd_add(bus
, buf
, count
);
6144 static void rbd_dev_remove_parent(struct rbd_device
*rbd_dev
)
6146 while (rbd_dev
->parent
) {
6147 struct rbd_device
*first
= rbd_dev
;
6148 struct rbd_device
*second
= first
->parent
;
6149 struct rbd_device
*third
;
6152 * Follow to the parent with no grandparent and
6155 while (second
&& (third
= second
->parent
)) {
6160 rbd_dev_image_release(second
);
6161 rbd_dev_destroy(second
);
6162 first
->parent
= NULL
;
6163 first
->parent_overlap
= 0;
6165 rbd_assert(first
->parent_spec
);
6166 rbd_spec_put(first
->parent_spec
);
6167 first
->parent_spec
= NULL
;
6171 static ssize_t
do_rbd_remove(struct bus_type
*bus
,
6175 struct rbd_device
*rbd_dev
= NULL
;
6176 struct list_head
*tmp
;
6184 sscanf(buf
, "%d %5s", &dev_id
, opt_buf
);
6186 pr_err("dev_id out of range\n");
6189 if (opt_buf
[0] != '\0') {
6190 if (!strcmp(opt_buf
, "force")) {
6193 pr_err("bad remove option at '%s'\n", opt_buf
);
6199 spin_lock(&rbd_dev_list_lock
);
6200 list_for_each(tmp
, &rbd_dev_list
) {
6201 rbd_dev
= list_entry(tmp
, struct rbd_device
, node
);
6202 if (rbd_dev
->dev_id
== dev_id
) {
6208 spin_lock_irq(&rbd_dev
->lock
);
6209 if (rbd_dev
->open_count
&& !force
)
6211 else if (test_and_set_bit(RBD_DEV_FLAG_REMOVING
,
6214 spin_unlock_irq(&rbd_dev
->lock
);
6216 spin_unlock(&rbd_dev_list_lock
);
6222 * Prevent new IO from being queued and wait for existing
6223 * IO to complete/fail.
6225 blk_mq_freeze_queue(rbd_dev
->disk
->queue
);
6226 blk_set_queue_dying(rbd_dev
->disk
->queue
);
6229 del_gendisk(rbd_dev
->disk
);
6230 spin_lock(&rbd_dev_list_lock
);
6231 list_del_init(&rbd_dev
->node
);
6232 spin_unlock(&rbd_dev_list_lock
);
6233 device_del(&rbd_dev
->dev
);
6235 rbd_dev_image_unlock(rbd_dev
);
6236 rbd_dev_device_release(rbd_dev
);
6237 rbd_dev_image_release(rbd_dev
);
6238 rbd_dev_destroy(rbd_dev
);
6242 static ssize_t
remove_store(struct bus_type
*bus
, const char *buf
, size_t count
)
6247 return do_rbd_remove(bus
, buf
, count
);
6250 static ssize_t
remove_single_major_store(struct bus_type
*bus
, const char *buf
,
6253 return do_rbd_remove(bus
, buf
, count
);
6257 * create control files in sysfs
6260 static int __init
rbd_sysfs_init(void)
6264 ret
= device_register(&rbd_root_dev
);
6268 ret
= bus_register(&rbd_bus_type
);
6270 device_unregister(&rbd_root_dev
);
6275 static void __exit
rbd_sysfs_cleanup(void)
6277 bus_unregister(&rbd_bus_type
);
6278 device_unregister(&rbd_root_dev
);
6281 static int __init
rbd_slab_init(void)
6283 rbd_assert(!rbd_img_request_cache
);
6284 rbd_img_request_cache
= KMEM_CACHE(rbd_img_request
, 0);
6285 if (!rbd_img_request_cache
)
6288 rbd_assert(!rbd_obj_request_cache
);
6289 rbd_obj_request_cache
= KMEM_CACHE(rbd_obj_request
, 0);
6290 if (!rbd_obj_request_cache
)
6296 kmem_cache_destroy(rbd_img_request_cache
);
6297 rbd_img_request_cache
= NULL
;
6301 static void rbd_slab_exit(void)
6303 rbd_assert(rbd_obj_request_cache
);
6304 kmem_cache_destroy(rbd_obj_request_cache
);
6305 rbd_obj_request_cache
= NULL
;
6307 rbd_assert(rbd_img_request_cache
);
6308 kmem_cache_destroy(rbd_img_request_cache
);
6309 rbd_img_request_cache
= NULL
;
6312 static int __init
rbd_init(void)
6316 if (!libceph_compatible(NULL
)) {
6317 rbd_warn(NULL
, "libceph incompatibility (quitting)");
6321 rc
= rbd_slab_init();
6326 * The number of active work items is limited by the number of
6327 * rbd devices * queue depth, so leave @max_active at default.
6329 rbd_wq
= alloc_workqueue(RBD_DRV_NAME
, WQ_MEM_RECLAIM
, 0);
6336 rbd_major
= register_blkdev(0, RBD_DRV_NAME
);
6337 if (rbd_major
< 0) {
6343 rc
= rbd_sysfs_init();
6345 goto err_out_blkdev
;
6348 pr_info("loaded (major %d)\n", rbd_major
);
6350 pr_info("loaded\n");
6356 unregister_blkdev(rbd_major
, RBD_DRV_NAME
);
6358 destroy_workqueue(rbd_wq
);
6364 static void __exit
rbd_exit(void)
6366 ida_destroy(&rbd_dev_id_ida
);
6367 rbd_sysfs_cleanup();
6369 unregister_blkdev(rbd_major
, RBD_DRV_NAME
);
6370 destroy_workqueue(rbd_wq
);
6374 module_init(rbd_init
);
6375 module_exit(rbd_exit
);
6377 MODULE_AUTHOR("Alex Elder <elder@inktank.com>");
6378 MODULE_AUTHOR("Sage Weil <sage@newdream.net>");
6379 MODULE_AUTHOR("Yehuda Sadeh <yehuda@hq.newdream.net>");
6380 /* following authorship retained from original osdblk.c */
6381 MODULE_AUTHOR("Jeff Garzik <jeff@garzik.org>");
6383 MODULE_DESCRIPTION("RADOS Block Device (RBD) driver");
6384 MODULE_LICENSE("GPL");