3 rbd.c -- Export ceph rados objects as a Linux block device
6 based on drivers/block/osdblk.c:
8 Copyright 2009 Red Hat, Inc.
10 This program is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation.
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with this program; see the file COPYING. If not, write to
21 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
25 For usage instructions, please refer to:
27 Documentation/ABI/testing/sysfs-bus-rbd
31 #include <linux/ceph/libceph.h>
32 #include <linux/ceph/osd_client.h>
33 #include <linux/ceph/mon_client.h>
34 #include <linux/ceph/cls_lock_client.h>
35 #include <linux/ceph/striper.h>
36 #include <linux/ceph/decode.h>
37 #include <linux/parser.h>
38 #include <linux/bsearch.h>
40 #include <linux/kernel.h>
41 #include <linux/device.h>
42 #include <linux/module.h>
43 #include <linux/blk-mq.h>
45 #include <linux/blkdev.h>
46 #include <linux/slab.h>
47 #include <linux/idr.h>
48 #include <linux/workqueue.h>
50 #include "rbd_types.h"
52 #define RBD_DEBUG /* Activate rbd_assert() calls */
55 * Increment the given counter and return its updated value.
56 * If the counter is already 0 it will not be incremented.
57 * If the counter is already at its maximum value returns
58 * -EINVAL without updating it.
60 static int atomic_inc_return_safe(atomic_t
*v
)
64 counter
= (unsigned int)atomic_fetch_add_unless(v
, 1, 0);
65 if (counter
<= (unsigned int)INT_MAX
)
73 /* Decrement the counter. Return the resulting value, or -EINVAL */
74 static int atomic_dec_return_safe(atomic_t
*v
)
78 counter
= atomic_dec_return(v
);
87 #define RBD_DRV_NAME "rbd"
89 #define RBD_MINORS_PER_MAJOR 256
90 #define RBD_SINGLE_MAJOR_PART_SHIFT 4
92 #define RBD_MAX_PARENT_CHAIN_LEN 16
94 #define RBD_SNAP_DEV_NAME_PREFIX "snap_"
95 #define RBD_MAX_SNAP_NAME_LEN \
96 (NAME_MAX - (sizeof (RBD_SNAP_DEV_NAME_PREFIX) - 1))
98 #define RBD_MAX_SNAP_COUNT 510 /* allows max snapc to fit in 4KB */
100 #define RBD_SNAP_HEAD_NAME "-"
102 #define BAD_SNAP_INDEX U32_MAX /* invalid index into snap array */
104 /* This allows a single page to hold an image name sent by OSD */
105 #define RBD_IMAGE_NAME_LEN_MAX (PAGE_SIZE - sizeof (__le32) - 1)
106 #define RBD_IMAGE_ID_LEN_MAX 64
108 #define RBD_OBJ_PREFIX_LEN_MAX 64
110 #define RBD_NOTIFY_TIMEOUT 5 /* seconds */
111 #define RBD_RETRY_DELAY msecs_to_jiffies(1000)
115 #define RBD_FEATURE_LAYERING (1ULL<<0)
116 #define RBD_FEATURE_STRIPINGV2 (1ULL<<1)
117 #define RBD_FEATURE_EXCLUSIVE_LOCK (1ULL<<2)
118 #define RBD_FEATURE_OBJECT_MAP (1ULL<<3)
119 #define RBD_FEATURE_FAST_DIFF (1ULL<<4)
120 #define RBD_FEATURE_DEEP_FLATTEN (1ULL<<5)
121 #define RBD_FEATURE_DATA_POOL (1ULL<<7)
122 #define RBD_FEATURE_OPERATIONS (1ULL<<8)
124 #define RBD_FEATURES_ALL (RBD_FEATURE_LAYERING | \
125 RBD_FEATURE_STRIPINGV2 | \
126 RBD_FEATURE_EXCLUSIVE_LOCK | \
127 RBD_FEATURE_OBJECT_MAP | \
128 RBD_FEATURE_FAST_DIFF | \
129 RBD_FEATURE_DEEP_FLATTEN | \
130 RBD_FEATURE_DATA_POOL | \
131 RBD_FEATURE_OPERATIONS)
133 /* Features supported by this (client software) implementation. */
135 #define RBD_FEATURES_SUPPORTED (RBD_FEATURES_ALL)
138 * An RBD device name will be "rbd#", where the "rbd" comes from
139 * RBD_DRV_NAME above, and # is a unique integer identifier.
141 #define DEV_NAME_LEN 32
144 * block device image metadata (in-memory version)
146 struct rbd_image_header
{
147 /* These six fields never change for a given rbd image */
153 u64 features
; /* Might be changeable someday? */
155 /* The remaining fields need to be updated occasionally */
157 struct ceph_snap_context
*snapc
;
158 char *snap_names
; /* format 1 only */
159 u64
*snap_sizes
; /* format 1 only */
163 * An rbd image specification.
165 * The tuple (pool_id, image_id, snap_id) is sufficient to uniquely
166 * identify an image. Each rbd_dev structure includes a pointer to
167 * an rbd_spec structure that encapsulates this identity.
169 * Each of the id's in an rbd_spec has an associated name. For a
170 * user-mapped image, the names are supplied and the id's associated
171 * with them are looked up. For a layered image, a parent image is
172 * defined by the tuple, and the names are looked up.
174 * An rbd_dev structure contains a parent_spec pointer which is
175 * non-null if the image it represents is a child in a layered
176 * image. This pointer will refer to the rbd_spec structure used
177 * by the parent rbd_dev for its own identity (i.e., the structure
178 * is shared between the parent and child).
180 * Since these structures are populated once, during the discovery
181 * phase of image construction, they are effectively immutable so
182 * we make no effort to synchronize access to them.
184 * Note that code herein does not assume the image name is known (it
185 * could be a null pointer).
189 const char *pool_name
;
190 const char *pool_ns
; /* NULL if default, never "" */
192 const char *image_id
;
193 const char *image_name
;
196 const char *snap_name
;
202 * an instance of the client. multiple devices may share an rbd client.
205 struct ceph_client
*client
;
207 struct list_head node
;
210 struct pending_result
{
211 int result
; /* first nonzero result */
215 struct rbd_img_request
;
217 enum obj_request_type
{
218 OBJ_REQUEST_NODATA
= 1,
219 OBJ_REQUEST_BIO
, /* pointer into provided bio (list) */
220 OBJ_REQUEST_BVECS
, /* pointer into provided bio_vec array */
221 OBJ_REQUEST_OWN_BVECS
, /* private bio_vec array, doesn't own pages */
224 enum obj_operation_type
{
231 #define RBD_OBJ_FLAG_DELETION (1U << 0)
232 #define RBD_OBJ_FLAG_COPYUP_ENABLED (1U << 1)
233 #define RBD_OBJ_FLAG_COPYUP_ZEROS (1U << 2)
234 #define RBD_OBJ_FLAG_MAY_EXIST (1U << 3)
235 #define RBD_OBJ_FLAG_NOOP_FOR_NONEXISTENT (1U << 4)
237 enum rbd_obj_read_state
{
238 RBD_OBJ_READ_START
= 1,
244 * Writes go through the following state machine to deal with
247 * . . . . . RBD_OBJ_WRITE_GUARD. . . . . . . . . . . . . .
250 * . RBD_OBJ_WRITE_READ_FROM_PARENT. . . .
252 * . v v (deep-copyup .
253 * (image . RBD_OBJ_WRITE_COPYUP_EMPTY_SNAPC . not needed) .
256 * . . . .RBD_OBJ_WRITE_COPYUP_OPS. . . . . (copyup .
259 * done . . . . . . . . . . . . . . . . . .
264 * Writes start in RBD_OBJ_WRITE_GUARD or _FLAT, depending on whether
265 * assert_exists guard is needed or not (in some cases it's not needed
266 * even if there is a parent).
268 enum rbd_obj_write_state
{
269 RBD_OBJ_WRITE_START
= 1,
270 RBD_OBJ_WRITE_PRE_OBJECT_MAP
,
271 RBD_OBJ_WRITE_OBJECT
,
272 __RBD_OBJ_WRITE_COPYUP
,
273 RBD_OBJ_WRITE_COPYUP
,
274 RBD_OBJ_WRITE_POST_OBJECT_MAP
,
277 enum rbd_obj_copyup_state
{
278 RBD_OBJ_COPYUP_START
= 1,
279 RBD_OBJ_COPYUP_READ_PARENT
,
280 __RBD_OBJ_COPYUP_OBJECT_MAPS
,
281 RBD_OBJ_COPYUP_OBJECT_MAPS
,
282 __RBD_OBJ_COPYUP_WRITE_OBJECT
,
283 RBD_OBJ_COPYUP_WRITE_OBJECT
,
286 struct rbd_obj_request
{
287 struct ceph_object_extent ex
;
288 unsigned int flags
; /* RBD_OBJ_FLAG_* */
290 enum rbd_obj_read_state read_state
; /* for reads */
291 enum rbd_obj_write_state write_state
; /* for writes */
294 struct rbd_img_request
*img_request
;
295 struct ceph_file_extent
*img_extents
;
299 struct ceph_bio_iter bio_pos
;
301 struct ceph_bvec_iter bvec_pos
;
307 enum rbd_obj_copyup_state copyup_state
;
308 struct bio_vec
*copyup_bvecs
;
309 u32 copyup_bvec_count
;
311 struct list_head osd_reqs
; /* w/ r_private_item */
313 struct mutex state_mutex
;
314 struct pending_result pending
;
319 IMG_REQ_CHILD
, /* initiator: block = 0, child image = 1 */
320 IMG_REQ_LAYERED
, /* ENOENT handling: normal = 0, layered = 1 */
325 RBD_IMG_EXCLUSIVE_LOCK
,
326 __RBD_IMG_OBJECT_REQUESTS
,
327 RBD_IMG_OBJECT_REQUESTS
,
330 struct rbd_img_request
{
331 struct rbd_device
*rbd_dev
;
332 enum obj_operation_type op_type
;
333 enum obj_request_type data_type
;
335 enum rbd_img_state state
;
337 u64 snap_id
; /* for reads */
338 struct ceph_snap_context
*snapc
; /* for writes */
341 struct request
*rq
; /* block request */
342 struct rbd_obj_request
*obj_request
; /* obj req initiator */
345 struct list_head lock_item
;
346 struct list_head object_extents
; /* obj_req.ex structs */
348 struct mutex state_mutex
;
349 struct pending_result pending
;
350 struct work_struct work
;
355 #define for_each_obj_request(ireq, oreq) \
356 list_for_each_entry(oreq, &(ireq)->object_extents, ex.oe_item)
357 #define for_each_obj_request_safe(ireq, oreq, n) \
358 list_for_each_entry_safe(oreq, n, &(ireq)->object_extents, ex.oe_item)
360 enum rbd_watch_state
{
361 RBD_WATCH_STATE_UNREGISTERED
,
362 RBD_WATCH_STATE_REGISTERED
,
363 RBD_WATCH_STATE_ERROR
,
366 enum rbd_lock_state
{
367 RBD_LOCK_STATE_UNLOCKED
,
368 RBD_LOCK_STATE_LOCKED
,
369 RBD_LOCK_STATE_RELEASING
,
372 /* WatchNotify::ClientId */
373 struct rbd_client_id
{
387 int dev_id
; /* blkdev unique id */
389 int major
; /* blkdev assigned major */
391 struct gendisk
*disk
; /* blkdev's gendisk and rq */
393 u32 image_format
; /* Either 1 or 2 */
394 struct rbd_client
*rbd_client
;
396 char name
[DEV_NAME_LEN
]; /* blkdev name, e.g. rbd3 */
398 spinlock_t lock
; /* queue, flags, open_count */
400 struct rbd_image_header header
;
401 unsigned long flags
; /* possibly lock protected */
402 struct rbd_spec
*spec
;
403 struct rbd_options
*opts
;
404 char *config_info
; /* add{,_single_major} string */
406 struct ceph_object_id header_oid
;
407 struct ceph_object_locator header_oloc
;
409 struct ceph_file_layout layout
; /* used for all rbd requests */
411 struct mutex watch_mutex
;
412 enum rbd_watch_state watch_state
;
413 struct ceph_osd_linger_request
*watch_handle
;
415 struct delayed_work watch_dwork
;
417 struct rw_semaphore lock_rwsem
;
418 enum rbd_lock_state lock_state
;
419 char lock_cookie
[32];
420 struct rbd_client_id owner_cid
;
421 struct work_struct acquired_lock_work
;
422 struct work_struct released_lock_work
;
423 struct delayed_work lock_dwork
;
424 struct work_struct unlock_work
;
425 spinlock_t lock_lists_lock
;
426 struct list_head acquiring_list
;
427 struct list_head running_list
;
428 struct completion acquire_wait
;
430 struct completion releasing_wait
;
432 spinlock_t object_map_lock
;
434 u64 object_map_size
; /* in objects */
435 u64 object_map_flags
;
437 struct workqueue_struct
*task_wq
;
439 struct rbd_spec
*parent_spec
;
442 struct rbd_device
*parent
;
444 /* Block layer tags. */
445 struct blk_mq_tag_set tag_set
;
447 /* protects updating the header */
448 struct rw_semaphore header_rwsem
;
450 struct rbd_mapping mapping
;
452 struct list_head node
;
456 unsigned long open_count
; /* protected by lock */
460 * Flag bits for rbd_dev->flags:
461 * - REMOVING (which is coupled with rbd_dev->open_count) is protected
465 RBD_DEV_FLAG_EXISTS
, /* mapped snapshot has not been deleted */
466 RBD_DEV_FLAG_REMOVING
, /* this mapping is being removed */
469 static DEFINE_MUTEX(client_mutex
); /* Serialize client creation */
471 static LIST_HEAD(rbd_dev_list
); /* devices */
472 static DEFINE_SPINLOCK(rbd_dev_list_lock
);
474 static LIST_HEAD(rbd_client_list
); /* clients */
475 static DEFINE_SPINLOCK(rbd_client_list_lock
);
477 /* Slab caches for frequently-allocated structures */
479 static struct kmem_cache
*rbd_img_request_cache
;
480 static struct kmem_cache
*rbd_obj_request_cache
;
482 static int rbd_major
;
483 static DEFINE_IDA(rbd_dev_id_ida
);
485 static struct workqueue_struct
*rbd_wq
;
487 static struct ceph_snap_context rbd_empty_snapc
= {
488 .nref
= REFCOUNT_INIT(1),
492 * single-major requires >= 0.75 version of userspace rbd utility.
494 static bool single_major
= true;
495 module_param(single_major
, bool, 0444);
496 MODULE_PARM_DESC(single_major
, "Use a single major number for all rbd devices (default: true)");
498 static ssize_t
add_store(struct bus_type
*bus
, const char *buf
, size_t count
);
499 static ssize_t
remove_store(struct bus_type
*bus
, const char *buf
,
501 static ssize_t
add_single_major_store(struct bus_type
*bus
, const char *buf
,
503 static ssize_t
remove_single_major_store(struct bus_type
*bus
, const char *buf
,
505 static int rbd_dev_image_probe(struct rbd_device
*rbd_dev
, int depth
);
507 static int rbd_dev_id_to_minor(int dev_id
)
509 return dev_id
<< RBD_SINGLE_MAJOR_PART_SHIFT
;
512 static int minor_to_rbd_dev_id(int minor
)
514 return minor
>> RBD_SINGLE_MAJOR_PART_SHIFT
;
517 static bool __rbd_is_lock_owner(struct rbd_device
*rbd_dev
)
519 lockdep_assert_held(&rbd_dev
->lock_rwsem
);
521 return rbd_dev
->lock_state
== RBD_LOCK_STATE_LOCKED
||
522 rbd_dev
->lock_state
== RBD_LOCK_STATE_RELEASING
;
525 static bool rbd_is_lock_owner(struct rbd_device
*rbd_dev
)
529 down_read(&rbd_dev
->lock_rwsem
);
530 is_lock_owner
= __rbd_is_lock_owner(rbd_dev
);
531 up_read(&rbd_dev
->lock_rwsem
);
532 return is_lock_owner
;
535 static ssize_t
supported_features_show(struct bus_type
*bus
, char *buf
)
537 return sprintf(buf
, "0x%llx\n", RBD_FEATURES_SUPPORTED
);
540 static BUS_ATTR_WO(add
);
541 static BUS_ATTR_WO(remove
);
542 static BUS_ATTR_WO(add_single_major
);
543 static BUS_ATTR_WO(remove_single_major
);
544 static BUS_ATTR_RO(supported_features
);
546 static struct attribute
*rbd_bus_attrs
[] = {
548 &bus_attr_remove
.attr
,
549 &bus_attr_add_single_major
.attr
,
550 &bus_attr_remove_single_major
.attr
,
551 &bus_attr_supported_features
.attr
,
555 static umode_t
rbd_bus_is_visible(struct kobject
*kobj
,
556 struct attribute
*attr
, int index
)
559 (attr
== &bus_attr_add_single_major
.attr
||
560 attr
== &bus_attr_remove_single_major
.attr
))
566 static const struct attribute_group rbd_bus_group
= {
567 .attrs
= rbd_bus_attrs
,
568 .is_visible
= rbd_bus_is_visible
,
570 __ATTRIBUTE_GROUPS(rbd_bus
);
572 static struct bus_type rbd_bus_type
= {
574 .bus_groups
= rbd_bus_groups
,
577 static void rbd_root_dev_release(struct device
*dev
)
581 static struct device rbd_root_dev
= {
583 .release
= rbd_root_dev_release
,
586 static __printf(2, 3)
587 void rbd_warn(struct rbd_device
*rbd_dev
, const char *fmt
, ...)
589 struct va_format vaf
;
597 printk(KERN_WARNING
"%s: %pV\n", RBD_DRV_NAME
, &vaf
);
598 else if (rbd_dev
->disk
)
599 printk(KERN_WARNING
"%s: %s: %pV\n",
600 RBD_DRV_NAME
, rbd_dev
->disk
->disk_name
, &vaf
);
601 else if (rbd_dev
->spec
&& rbd_dev
->spec
->image_name
)
602 printk(KERN_WARNING
"%s: image %s: %pV\n",
603 RBD_DRV_NAME
, rbd_dev
->spec
->image_name
, &vaf
);
604 else if (rbd_dev
->spec
&& rbd_dev
->spec
->image_id
)
605 printk(KERN_WARNING
"%s: id %s: %pV\n",
606 RBD_DRV_NAME
, rbd_dev
->spec
->image_id
, &vaf
);
608 printk(KERN_WARNING
"%s: rbd_dev %p: %pV\n",
609 RBD_DRV_NAME
, rbd_dev
, &vaf
);
614 #define rbd_assert(expr) \
615 if (unlikely(!(expr))) { \
616 printk(KERN_ERR "\nAssertion failure in %s() " \
618 "\trbd_assert(%s);\n\n", \
619 __func__, __LINE__, #expr); \
622 #else /* !RBD_DEBUG */
623 # define rbd_assert(expr) ((void) 0)
624 #endif /* !RBD_DEBUG */
626 static void rbd_dev_remove_parent(struct rbd_device
*rbd_dev
);
628 static int rbd_dev_refresh(struct rbd_device
*rbd_dev
);
629 static int rbd_dev_v2_header_onetime(struct rbd_device
*rbd_dev
);
630 static int rbd_dev_header_info(struct rbd_device
*rbd_dev
);
631 static int rbd_dev_v2_parent_info(struct rbd_device
*rbd_dev
);
632 static const char *rbd_dev_v2_snap_name(struct rbd_device
*rbd_dev
,
634 static int _rbd_dev_v2_snap_size(struct rbd_device
*rbd_dev
, u64 snap_id
,
635 u8
*order
, u64
*snap_size
);
636 static int _rbd_dev_v2_snap_features(struct rbd_device
*rbd_dev
, u64 snap_id
,
638 static int rbd_dev_v2_get_flags(struct rbd_device
*rbd_dev
);
640 static void rbd_obj_handle_request(struct rbd_obj_request
*obj_req
, int result
);
641 static void rbd_img_handle_request(struct rbd_img_request
*img_req
, int result
);
644 * Return true if nothing else is pending.
646 static bool pending_result_dec(struct pending_result
*pending
, int *result
)
648 rbd_assert(pending
->num_pending
> 0);
650 if (*result
&& !pending
->result
)
651 pending
->result
= *result
;
652 if (--pending
->num_pending
)
655 *result
= pending
->result
;
659 static int rbd_open(struct block_device
*bdev
, fmode_t mode
)
661 struct rbd_device
*rbd_dev
= bdev
->bd_disk
->private_data
;
662 bool removing
= false;
664 spin_lock_irq(&rbd_dev
->lock
);
665 if (test_bit(RBD_DEV_FLAG_REMOVING
, &rbd_dev
->flags
))
668 rbd_dev
->open_count
++;
669 spin_unlock_irq(&rbd_dev
->lock
);
673 (void) get_device(&rbd_dev
->dev
);
678 static void rbd_release(struct gendisk
*disk
, fmode_t mode
)
680 struct rbd_device
*rbd_dev
= disk
->private_data
;
681 unsigned long open_count_before
;
683 spin_lock_irq(&rbd_dev
->lock
);
684 open_count_before
= rbd_dev
->open_count
--;
685 spin_unlock_irq(&rbd_dev
->lock
);
686 rbd_assert(open_count_before
> 0);
688 put_device(&rbd_dev
->dev
);
691 static int rbd_ioctl_set_ro(struct rbd_device
*rbd_dev
, unsigned long arg
)
695 if (get_user(ro
, (int __user
*)arg
))
698 /* Snapshots can't be marked read-write */
699 if (rbd_dev
->spec
->snap_id
!= CEPH_NOSNAP
&& !ro
)
702 /* Let blkdev_roset() handle it */
706 static int rbd_ioctl(struct block_device
*bdev
, fmode_t mode
,
707 unsigned int cmd
, unsigned long arg
)
709 struct rbd_device
*rbd_dev
= bdev
->bd_disk
->private_data
;
714 ret
= rbd_ioctl_set_ro(rbd_dev
, arg
);
724 static int rbd_compat_ioctl(struct block_device
*bdev
, fmode_t mode
,
725 unsigned int cmd
, unsigned long arg
)
727 return rbd_ioctl(bdev
, mode
, cmd
, arg
);
729 #endif /* CONFIG_COMPAT */
731 static const struct block_device_operations rbd_bd_ops
= {
732 .owner
= THIS_MODULE
,
734 .release
= rbd_release
,
737 .compat_ioctl
= rbd_compat_ioctl
,
742 * Initialize an rbd client instance. Success or not, this function
743 * consumes ceph_opts. Caller holds client_mutex.
745 static struct rbd_client
*rbd_client_create(struct ceph_options
*ceph_opts
)
747 struct rbd_client
*rbdc
;
750 dout("%s:\n", __func__
);
751 rbdc
= kmalloc(sizeof(struct rbd_client
), GFP_KERNEL
);
755 kref_init(&rbdc
->kref
);
756 INIT_LIST_HEAD(&rbdc
->node
);
758 rbdc
->client
= ceph_create_client(ceph_opts
, rbdc
);
759 if (IS_ERR(rbdc
->client
))
761 ceph_opts
= NULL
; /* Now rbdc->client is responsible for ceph_opts */
763 ret
= ceph_open_session(rbdc
->client
);
767 spin_lock(&rbd_client_list_lock
);
768 list_add_tail(&rbdc
->node
, &rbd_client_list
);
769 spin_unlock(&rbd_client_list_lock
);
771 dout("%s: rbdc %p\n", __func__
, rbdc
);
775 ceph_destroy_client(rbdc
->client
);
780 ceph_destroy_options(ceph_opts
);
781 dout("%s: error %d\n", __func__
, ret
);
786 static struct rbd_client
*__rbd_get_client(struct rbd_client
*rbdc
)
788 kref_get(&rbdc
->kref
);
794 * Find a ceph client with specific addr and configuration. If
795 * found, bump its reference count.
797 static struct rbd_client
*rbd_client_find(struct ceph_options
*ceph_opts
)
799 struct rbd_client
*client_node
;
802 if (ceph_opts
->flags
& CEPH_OPT_NOSHARE
)
805 spin_lock(&rbd_client_list_lock
);
806 list_for_each_entry(client_node
, &rbd_client_list
, node
) {
807 if (!ceph_compare_options(ceph_opts
, client_node
->client
)) {
808 __rbd_get_client(client_node
);
814 spin_unlock(&rbd_client_list_lock
);
816 return found
? client_node
: NULL
;
820 * (Per device) rbd map options
830 /* string args above */
839 static match_table_t rbd_opts_tokens
= {
840 {Opt_queue_depth
, "queue_depth=%d"},
841 {Opt_alloc_size
, "alloc_size=%d"},
842 {Opt_lock_timeout
, "lock_timeout=%d"},
844 {Opt_pool_ns
, "_pool_ns=%s"},
845 /* string args above */
846 {Opt_read_only
, "read_only"},
847 {Opt_read_only
, "ro"}, /* Alternate spelling */
848 {Opt_read_write
, "read_write"},
849 {Opt_read_write
, "rw"}, /* Alternate spelling */
850 {Opt_lock_on_read
, "lock_on_read"},
851 {Opt_exclusive
, "exclusive"},
852 {Opt_notrim
, "notrim"},
859 unsigned long lock_timeout
;
866 #define RBD_QUEUE_DEPTH_DEFAULT BLKDEV_MAX_RQ
867 #define RBD_ALLOC_SIZE_DEFAULT (64 * 1024)
868 #define RBD_LOCK_TIMEOUT_DEFAULT 0 /* no timeout */
869 #define RBD_READ_ONLY_DEFAULT false
870 #define RBD_LOCK_ON_READ_DEFAULT false
871 #define RBD_EXCLUSIVE_DEFAULT false
872 #define RBD_TRIM_DEFAULT true
874 struct parse_rbd_opts_ctx
{
875 struct rbd_spec
*spec
;
876 struct rbd_options
*opts
;
879 static int parse_rbd_opts_token(char *c
, void *private)
881 struct parse_rbd_opts_ctx
*pctx
= private;
882 substring_t argstr
[MAX_OPT_ARGS
];
883 int token
, intval
, ret
;
885 token
= match_token(c
, rbd_opts_tokens
, argstr
);
886 if (token
< Opt_last_int
) {
887 ret
= match_int(&argstr
[0], &intval
);
889 pr_err("bad option arg (not int) at '%s'\n", c
);
892 dout("got int token %d val %d\n", token
, intval
);
893 } else if (token
> Opt_last_int
&& token
< Opt_last_string
) {
894 dout("got string token %d val %s\n", token
, argstr
[0].from
);
896 dout("got token %d\n", token
);
900 case Opt_queue_depth
:
902 pr_err("queue_depth out of range\n");
905 pctx
->opts
->queue_depth
= intval
;
908 if (intval
< SECTOR_SIZE
) {
909 pr_err("alloc_size out of range\n");
912 if (!is_power_of_2(intval
)) {
913 pr_err("alloc_size must be a power of 2\n");
916 pctx
->opts
->alloc_size
= intval
;
918 case Opt_lock_timeout
:
919 /* 0 is "wait forever" (i.e. infinite timeout) */
920 if (intval
< 0 || intval
> INT_MAX
/ 1000) {
921 pr_err("lock_timeout out of range\n");
924 pctx
->opts
->lock_timeout
= msecs_to_jiffies(intval
* 1000);
927 kfree(pctx
->spec
->pool_ns
);
928 pctx
->spec
->pool_ns
= match_strdup(argstr
);
929 if (!pctx
->spec
->pool_ns
)
933 pctx
->opts
->read_only
= true;
936 pctx
->opts
->read_only
= false;
938 case Opt_lock_on_read
:
939 pctx
->opts
->lock_on_read
= true;
942 pctx
->opts
->exclusive
= true;
945 pctx
->opts
->trim
= false;
948 /* libceph prints "bad option" msg */
955 static char* obj_op_name(enum obj_operation_type op_type
)
972 * Destroy ceph client
974 * Caller must hold rbd_client_list_lock.
976 static void rbd_client_release(struct kref
*kref
)
978 struct rbd_client
*rbdc
= container_of(kref
, struct rbd_client
, kref
);
980 dout("%s: rbdc %p\n", __func__
, rbdc
);
981 spin_lock(&rbd_client_list_lock
);
982 list_del(&rbdc
->node
);
983 spin_unlock(&rbd_client_list_lock
);
985 ceph_destroy_client(rbdc
->client
);
990 * Drop reference to ceph client node. If it's not referenced anymore, release
993 static void rbd_put_client(struct rbd_client
*rbdc
)
996 kref_put(&rbdc
->kref
, rbd_client_release
);
1000 * Get a ceph client with specific addr and configuration, if one does
1001 * not exist create it. Either way, ceph_opts is consumed by this
1004 static struct rbd_client
*rbd_get_client(struct ceph_options
*ceph_opts
)
1006 struct rbd_client
*rbdc
;
1009 mutex_lock(&client_mutex
);
1010 rbdc
= rbd_client_find(ceph_opts
);
1012 ceph_destroy_options(ceph_opts
);
1015 * Using an existing client. Make sure ->pg_pools is up to
1016 * date before we look up the pool id in do_rbd_add().
1018 ret
= ceph_wait_for_latest_osdmap(rbdc
->client
,
1019 rbdc
->client
->options
->mount_timeout
);
1021 rbd_warn(NULL
, "failed to get latest osdmap: %d", ret
);
1022 rbd_put_client(rbdc
);
1023 rbdc
= ERR_PTR(ret
);
1026 rbdc
= rbd_client_create(ceph_opts
);
1028 mutex_unlock(&client_mutex
);
1033 static bool rbd_image_format_valid(u32 image_format
)
1035 return image_format
== 1 || image_format
== 2;
1038 static bool rbd_dev_ondisk_valid(struct rbd_image_header_ondisk
*ondisk
)
1043 /* The header has to start with the magic rbd header text */
1044 if (memcmp(&ondisk
->text
, RBD_HEADER_TEXT
, sizeof (RBD_HEADER_TEXT
)))
1047 /* The bio layer requires at least sector-sized I/O */
1049 if (ondisk
->options
.order
< SECTOR_SHIFT
)
1052 /* If we use u64 in a few spots we may be able to loosen this */
1054 if (ondisk
->options
.order
> 8 * sizeof (int) - 1)
1058 * The size of a snapshot header has to fit in a size_t, and
1059 * that limits the number of snapshots.
1061 snap_count
= le32_to_cpu(ondisk
->snap_count
);
1062 size
= SIZE_MAX
- sizeof (struct ceph_snap_context
);
1063 if (snap_count
> size
/ sizeof (__le64
))
1067 * Not only that, but the size of the entire the snapshot
1068 * header must also be representable in a size_t.
1070 size
-= snap_count
* sizeof (__le64
);
1071 if ((u64
) size
< le64_to_cpu(ondisk
->snap_names_len
))
1078 * returns the size of an object in the image
1080 static u32
rbd_obj_bytes(struct rbd_image_header
*header
)
1082 return 1U << header
->obj_order
;
1085 static void rbd_init_layout(struct rbd_device
*rbd_dev
)
1087 if (rbd_dev
->header
.stripe_unit
== 0 ||
1088 rbd_dev
->header
.stripe_count
== 0) {
1089 rbd_dev
->header
.stripe_unit
= rbd_obj_bytes(&rbd_dev
->header
);
1090 rbd_dev
->header
.stripe_count
= 1;
1093 rbd_dev
->layout
.stripe_unit
= rbd_dev
->header
.stripe_unit
;
1094 rbd_dev
->layout
.stripe_count
= rbd_dev
->header
.stripe_count
;
1095 rbd_dev
->layout
.object_size
= rbd_obj_bytes(&rbd_dev
->header
);
1096 rbd_dev
->layout
.pool_id
= rbd_dev
->header
.data_pool_id
== CEPH_NOPOOL
?
1097 rbd_dev
->spec
->pool_id
: rbd_dev
->header
.data_pool_id
;
1098 RCU_INIT_POINTER(rbd_dev
->layout
.pool_ns
, NULL
);
1102 * Fill an rbd image header with information from the given format 1
1105 static int rbd_header_from_disk(struct rbd_device
*rbd_dev
,
1106 struct rbd_image_header_ondisk
*ondisk
)
1108 struct rbd_image_header
*header
= &rbd_dev
->header
;
1109 bool first_time
= header
->object_prefix
== NULL
;
1110 struct ceph_snap_context
*snapc
;
1111 char *object_prefix
= NULL
;
1112 char *snap_names
= NULL
;
1113 u64
*snap_sizes
= NULL
;
1118 /* Allocate this now to avoid having to handle failure below */
1121 object_prefix
= kstrndup(ondisk
->object_prefix
,
1122 sizeof(ondisk
->object_prefix
),
1128 /* Allocate the snapshot context and fill it in */
1130 snap_count
= le32_to_cpu(ondisk
->snap_count
);
1131 snapc
= ceph_create_snap_context(snap_count
, GFP_KERNEL
);
1134 snapc
->seq
= le64_to_cpu(ondisk
->snap_seq
);
1136 struct rbd_image_snap_ondisk
*snaps
;
1137 u64 snap_names_len
= le64_to_cpu(ondisk
->snap_names_len
);
1139 /* We'll keep a copy of the snapshot names... */
1141 if (snap_names_len
> (u64
)SIZE_MAX
)
1143 snap_names
= kmalloc(snap_names_len
, GFP_KERNEL
);
1147 /* ...as well as the array of their sizes. */
1148 snap_sizes
= kmalloc_array(snap_count
,
1149 sizeof(*header
->snap_sizes
),
1155 * Copy the names, and fill in each snapshot's id
1158 * Note that rbd_dev_v1_header_info() guarantees the
1159 * ondisk buffer we're working with has
1160 * snap_names_len bytes beyond the end of the
1161 * snapshot id array, this memcpy() is safe.
1163 memcpy(snap_names
, &ondisk
->snaps
[snap_count
], snap_names_len
);
1164 snaps
= ondisk
->snaps
;
1165 for (i
= 0; i
< snap_count
; i
++) {
1166 snapc
->snaps
[i
] = le64_to_cpu(snaps
[i
].id
);
1167 snap_sizes
[i
] = le64_to_cpu(snaps
[i
].image_size
);
1171 /* We won't fail any more, fill in the header */
1174 header
->object_prefix
= object_prefix
;
1175 header
->obj_order
= ondisk
->options
.order
;
1176 rbd_init_layout(rbd_dev
);
1178 ceph_put_snap_context(header
->snapc
);
1179 kfree(header
->snap_names
);
1180 kfree(header
->snap_sizes
);
1183 /* The remaining fields always get updated (when we refresh) */
1185 header
->image_size
= le64_to_cpu(ondisk
->image_size
);
1186 header
->snapc
= snapc
;
1187 header
->snap_names
= snap_names
;
1188 header
->snap_sizes
= snap_sizes
;
1196 ceph_put_snap_context(snapc
);
1197 kfree(object_prefix
);
1202 static const char *_rbd_dev_v1_snap_name(struct rbd_device
*rbd_dev
, u32 which
)
1204 const char *snap_name
;
1206 rbd_assert(which
< rbd_dev
->header
.snapc
->num_snaps
);
1208 /* Skip over names until we find the one we are looking for */
1210 snap_name
= rbd_dev
->header
.snap_names
;
1212 snap_name
+= strlen(snap_name
) + 1;
1214 return kstrdup(snap_name
, GFP_KERNEL
);
1218 * Snapshot id comparison function for use with qsort()/bsearch().
1219 * Note that result is for snapshots in *descending* order.
1221 static int snapid_compare_reverse(const void *s1
, const void *s2
)
1223 u64 snap_id1
= *(u64
*)s1
;
1224 u64 snap_id2
= *(u64
*)s2
;
1226 if (snap_id1
< snap_id2
)
1228 return snap_id1
== snap_id2
? 0 : -1;
1232 * Search a snapshot context to see if the given snapshot id is
1235 * Returns the position of the snapshot id in the array if it's found,
1236 * or BAD_SNAP_INDEX otherwise.
1238 * Note: The snapshot array is in kept sorted (by the osd) in
1239 * reverse order, highest snapshot id first.
1241 static u32
rbd_dev_snap_index(struct rbd_device
*rbd_dev
, u64 snap_id
)
1243 struct ceph_snap_context
*snapc
= rbd_dev
->header
.snapc
;
1246 found
= bsearch(&snap_id
, &snapc
->snaps
, snapc
->num_snaps
,
1247 sizeof (snap_id
), snapid_compare_reverse
);
1249 return found
? (u32
)(found
- &snapc
->snaps
[0]) : BAD_SNAP_INDEX
;
1252 static const char *rbd_dev_v1_snap_name(struct rbd_device
*rbd_dev
,
1256 const char *snap_name
;
1258 which
= rbd_dev_snap_index(rbd_dev
, snap_id
);
1259 if (which
== BAD_SNAP_INDEX
)
1260 return ERR_PTR(-ENOENT
);
1262 snap_name
= _rbd_dev_v1_snap_name(rbd_dev
, which
);
1263 return snap_name
? snap_name
: ERR_PTR(-ENOMEM
);
1266 static const char *rbd_snap_name(struct rbd_device
*rbd_dev
, u64 snap_id
)
1268 if (snap_id
== CEPH_NOSNAP
)
1269 return RBD_SNAP_HEAD_NAME
;
1271 rbd_assert(rbd_image_format_valid(rbd_dev
->image_format
));
1272 if (rbd_dev
->image_format
== 1)
1273 return rbd_dev_v1_snap_name(rbd_dev
, snap_id
);
1275 return rbd_dev_v2_snap_name(rbd_dev
, snap_id
);
1278 static int rbd_snap_size(struct rbd_device
*rbd_dev
, u64 snap_id
,
1281 rbd_assert(rbd_image_format_valid(rbd_dev
->image_format
));
1282 if (snap_id
== CEPH_NOSNAP
) {
1283 *snap_size
= rbd_dev
->header
.image_size
;
1284 } else if (rbd_dev
->image_format
== 1) {
1287 which
= rbd_dev_snap_index(rbd_dev
, snap_id
);
1288 if (which
== BAD_SNAP_INDEX
)
1291 *snap_size
= rbd_dev
->header
.snap_sizes
[which
];
1296 ret
= _rbd_dev_v2_snap_size(rbd_dev
, snap_id
, NULL
, &size
);
1305 static int rbd_snap_features(struct rbd_device
*rbd_dev
, u64 snap_id
,
1308 rbd_assert(rbd_image_format_valid(rbd_dev
->image_format
));
1309 if (snap_id
== CEPH_NOSNAP
) {
1310 *snap_features
= rbd_dev
->header
.features
;
1311 } else if (rbd_dev
->image_format
== 1) {
1312 *snap_features
= 0; /* No features for format 1 */
1317 ret
= _rbd_dev_v2_snap_features(rbd_dev
, snap_id
, &features
);
1321 *snap_features
= features
;
1326 static int rbd_dev_mapping_set(struct rbd_device
*rbd_dev
)
1328 u64 snap_id
= rbd_dev
->spec
->snap_id
;
1333 ret
= rbd_snap_size(rbd_dev
, snap_id
, &size
);
1336 ret
= rbd_snap_features(rbd_dev
, snap_id
, &features
);
1340 rbd_dev
->mapping
.size
= size
;
1341 rbd_dev
->mapping
.features
= features
;
1346 static void rbd_dev_mapping_clear(struct rbd_device
*rbd_dev
)
1348 rbd_dev
->mapping
.size
= 0;
1349 rbd_dev
->mapping
.features
= 0;
1352 static void zero_bvec(struct bio_vec
*bv
)
1355 unsigned long flags
;
1357 buf
= bvec_kmap_irq(bv
, &flags
);
1358 memset(buf
, 0, bv
->bv_len
);
1359 flush_dcache_page(bv
->bv_page
);
1360 bvec_kunmap_irq(buf
, &flags
);
1363 static void zero_bios(struct ceph_bio_iter
*bio_pos
, u32 off
, u32 bytes
)
1365 struct ceph_bio_iter it
= *bio_pos
;
1367 ceph_bio_iter_advance(&it
, off
);
1368 ceph_bio_iter_advance_step(&it
, bytes
, ({
1373 static void zero_bvecs(struct ceph_bvec_iter
*bvec_pos
, u32 off
, u32 bytes
)
1375 struct ceph_bvec_iter it
= *bvec_pos
;
1377 ceph_bvec_iter_advance(&it
, off
);
1378 ceph_bvec_iter_advance_step(&it
, bytes
, ({
1384 * Zero a range in @obj_req data buffer defined by a bio (list) or
1385 * (private) bio_vec array.
1387 * @off is relative to the start of the data buffer.
1389 static void rbd_obj_zero_range(struct rbd_obj_request
*obj_req
, u32 off
,
1392 dout("%s %p data buf %u~%u\n", __func__
, obj_req
, off
, bytes
);
1394 switch (obj_req
->img_request
->data_type
) {
1395 case OBJ_REQUEST_BIO
:
1396 zero_bios(&obj_req
->bio_pos
, off
, bytes
);
1398 case OBJ_REQUEST_BVECS
:
1399 case OBJ_REQUEST_OWN_BVECS
:
1400 zero_bvecs(&obj_req
->bvec_pos
, off
, bytes
);
1407 static void rbd_obj_request_destroy(struct kref
*kref
);
1408 static void rbd_obj_request_put(struct rbd_obj_request
*obj_request
)
1410 rbd_assert(obj_request
!= NULL
);
1411 dout("%s: obj %p (was %d)\n", __func__
, obj_request
,
1412 kref_read(&obj_request
->kref
));
1413 kref_put(&obj_request
->kref
, rbd_obj_request_destroy
);
1416 static void rbd_img_request_destroy(struct kref
*kref
);
1417 static void rbd_img_request_put(struct rbd_img_request
*img_request
)
1419 rbd_assert(img_request
!= NULL
);
1420 dout("%s: img %p (was %d)\n", __func__
, img_request
,
1421 kref_read(&img_request
->kref
));
1422 kref_put(&img_request
->kref
, rbd_img_request_destroy
);
1425 static inline void rbd_img_obj_request_add(struct rbd_img_request
*img_request
,
1426 struct rbd_obj_request
*obj_request
)
1428 rbd_assert(obj_request
->img_request
== NULL
);
1430 /* Image request now owns object's original reference */
1431 obj_request
->img_request
= img_request
;
1432 dout("%s: img %p obj %p\n", __func__
, img_request
, obj_request
);
1435 static inline void rbd_img_obj_request_del(struct rbd_img_request
*img_request
,
1436 struct rbd_obj_request
*obj_request
)
1438 dout("%s: img %p obj %p\n", __func__
, img_request
, obj_request
);
1439 list_del(&obj_request
->ex
.oe_item
);
1440 rbd_assert(obj_request
->img_request
== img_request
);
1441 rbd_obj_request_put(obj_request
);
1444 static void rbd_osd_submit(struct ceph_osd_request
*osd_req
)
1446 struct rbd_obj_request
*obj_req
= osd_req
->r_priv
;
1448 dout("%s osd_req %p for obj_req %p objno %llu %llu~%llu\n",
1449 __func__
, osd_req
, obj_req
, obj_req
->ex
.oe_objno
,
1450 obj_req
->ex
.oe_off
, obj_req
->ex
.oe_len
);
1451 ceph_osdc_start_request(osd_req
->r_osdc
, osd_req
, false);
1455 * The default/initial value for all image request flags is 0. Each
1456 * is conditionally set to 1 at image request initialization time
1457 * and currently never change thereafter.
1459 static void img_request_layered_set(struct rbd_img_request
*img_request
)
1461 set_bit(IMG_REQ_LAYERED
, &img_request
->flags
);
1465 static void img_request_layered_clear(struct rbd_img_request
*img_request
)
1467 clear_bit(IMG_REQ_LAYERED
, &img_request
->flags
);
1471 static bool img_request_layered_test(struct rbd_img_request
*img_request
)
1474 return test_bit(IMG_REQ_LAYERED
, &img_request
->flags
) != 0;
1477 static bool rbd_obj_is_entire(struct rbd_obj_request
*obj_req
)
1479 struct rbd_device
*rbd_dev
= obj_req
->img_request
->rbd_dev
;
1481 return !obj_req
->ex
.oe_off
&&
1482 obj_req
->ex
.oe_len
== rbd_dev
->layout
.object_size
;
1485 static bool rbd_obj_is_tail(struct rbd_obj_request
*obj_req
)
1487 struct rbd_device
*rbd_dev
= obj_req
->img_request
->rbd_dev
;
1489 return obj_req
->ex
.oe_off
+ obj_req
->ex
.oe_len
==
1490 rbd_dev
->layout
.object_size
;
1494 * Must be called after rbd_obj_calc_img_extents().
1496 static bool rbd_obj_copyup_enabled(struct rbd_obj_request
*obj_req
)
1498 if (!obj_req
->num_img_extents
||
1499 (rbd_obj_is_entire(obj_req
) &&
1500 !obj_req
->img_request
->snapc
->num_snaps
))
1506 static u64
rbd_obj_img_extents_bytes(struct rbd_obj_request
*obj_req
)
1508 return ceph_file_extents_bytes(obj_req
->img_extents
,
1509 obj_req
->num_img_extents
);
1512 static bool rbd_img_is_write(struct rbd_img_request
*img_req
)
1514 switch (img_req
->op_type
) {
1518 case OBJ_OP_DISCARD
:
1519 case OBJ_OP_ZEROOUT
:
1526 static void rbd_osd_req_callback(struct ceph_osd_request
*osd_req
)
1528 struct rbd_obj_request
*obj_req
= osd_req
->r_priv
;
1531 dout("%s osd_req %p result %d for obj_req %p\n", __func__
, osd_req
,
1532 osd_req
->r_result
, obj_req
);
1535 * Writes aren't allowed to return a data payload. In some
1536 * guarded write cases (e.g. stat + zero on an empty object)
1537 * a stat response makes it through, but we don't care.
1539 if (osd_req
->r_result
> 0 && rbd_img_is_write(obj_req
->img_request
))
1542 result
= osd_req
->r_result
;
1544 rbd_obj_handle_request(obj_req
, result
);
1547 static void rbd_osd_format_read(struct ceph_osd_request
*osd_req
)
1549 struct rbd_obj_request
*obj_request
= osd_req
->r_priv
;
1551 osd_req
->r_flags
= CEPH_OSD_FLAG_READ
;
1552 osd_req
->r_snapid
= obj_request
->img_request
->snap_id
;
1555 static void rbd_osd_format_write(struct ceph_osd_request
*osd_req
)
1557 struct rbd_obj_request
*obj_request
= osd_req
->r_priv
;
1559 osd_req
->r_flags
= CEPH_OSD_FLAG_WRITE
;
1560 ktime_get_real_ts64(&osd_req
->r_mtime
);
1561 osd_req
->r_data_offset
= obj_request
->ex
.oe_off
;
1564 static struct ceph_osd_request
*
1565 __rbd_obj_add_osd_request(struct rbd_obj_request
*obj_req
,
1566 struct ceph_snap_context
*snapc
, int num_ops
)
1568 struct rbd_device
*rbd_dev
= obj_req
->img_request
->rbd_dev
;
1569 struct ceph_osd_client
*osdc
= &rbd_dev
->rbd_client
->client
->osdc
;
1570 struct ceph_osd_request
*req
;
1571 const char *name_format
= rbd_dev
->image_format
== 1 ?
1572 RBD_V1_DATA_FORMAT
: RBD_V2_DATA_FORMAT
;
1575 req
= ceph_osdc_alloc_request(osdc
, snapc
, num_ops
, false, GFP_NOIO
);
1577 return ERR_PTR(-ENOMEM
);
1579 list_add_tail(&req
->r_private_item
, &obj_req
->osd_reqs
);
1580 req
->r_callback
= rbd_osd_req_callback
;
1581 req
->r_priv
= obj_req
;
1584 * Data objects may be stored in a separate pool, but always in
1585 * the same namespace in that pool as the header in its pool.
1587 ceph_oloc_copy(&req
->r_base_oloc
, &rbd_dev
->header_oloc
);
1588 req
->r_base_oloc
.pool
= rbd_dev
->layout
.pool_id
;
1590 ret
= ceph_oid_aprintf(&req
->r_base_oid
, GFP_NOIO
, name_format
,
1591 rbd_dev
->header
.object_prefix
,
1592 obj_req
->ex
.oe_objno
);
1594 return ERR_PTR(ret
);
1599 static struct ceph_osd_request
*
1600 rbd_obj_add_osd_request(struct rbd_obj_request
*obj_req
, int num_ops
)
1602 return __rbd_obj_add_osd_request(obj_req
, obj_req
->img_request
->snapc
,
1606 static struct rbd_obj_request
*rbd_obj_request_create(void)
1608 struct rbd_obj_request
*obj_request
;
1610 obj_request
= kmem_cache_zalloc(rbd_obj_request_cache
, GFP_NOIO
);
1614 ceph_object_extent_init(&obj_request
->ex
);
1615 INIT_LIST_HEAD(&obj_request
->osd_reqs
);
1616 mutex_init(&obj_request
->state_mutex
);
1617 kref_init(&obj_request
->kref
);
1619 dout("%s %p\n", __func__
, obj_request
);
1623 static void rbd_obj_request_destroy(struct kref
*kref
)
1625 struct rbd_obj_request
*obj_request
;
1626 struct ceph_osd_request
*osd_req
;
1629 obj_request
= container_of(kref
, struct rbd_obj_request
, kref
);
1631 dout("%s: obj %p\n", __func__
, obj_request
);
1633 while (!list_empty(&obj_request
->osd_reqs
)) {
1634 osd_req
= list_first_entry(&obj_request
->osd_reqs
,
1635 struct ceph_osd_request
, r_private_item
);
1636 list_del_init(&osd_req
->r_private_item
);
1637 ceph_osdc_put_request(osd_req
);
1640 switch (obj_request
->img_request
->data_type
) {
1641 case OBJ_REQUEST_NODATA
:
1642 case OBJ_REQUEST_BIO
:
1643 case OBJ_REQUEST_BVECS
:
1644 break; /* Nothing to do */
1645 case OBJ_REQUEST_OWN_BVECS
:
1646 kfree(obj_request
->bvec_pos
.bvecs
);
1652 kfree(obj_request
->img_extents
);
1653 if (obj_request
->copyup_bvecs
) {
1654 for (i
= 0; i
< obj_request
->copyup_bvec_count
; i
++) {
1655 if (obj_request
->copyup_bvecs
[i
].bv_page
)
1656 __free_page(obj_request
->copyup_bvecs
[i
].bv_page
);
1658 kfree(obj_request
->copyup_bvecs
);
1661 kmem_cache_free(rbd_obj_request_cache
, obj_request
);
1664 /* It's OK to call this for a device with no parent */
1666 static void rbd_spec_put(struct rbd_spec
*spec
);
1667 static void rbd_dev_unparent(struct rbd_device
*rbd_dev
)
1669 rbd_dev_remove_parent(rbd_dev
);
1670 rbd_spec_put(rbd_dev
->parent_spec
);
1671 rbd_dev
->parent_spec
= NULL
;
1672 rbd_dev
->parent_overlap
= 0;
1676 * Parent image reference counting is used to determine when an
1677 * image's parent fields can be safely torn down--after there are no
1678 * more in-flight requests to the parent image. When the last
1679 * reference is dropped, cleaning them up is safe.
1681 static void rbd_dev_parent_put(struct rbd_device
*rbd_dev
)
1685 if (!rbd_dev
->parent_spec
)
1688 counter
= atomic_dec_return_safe(&rbd_dev
->parent_ref
);
1692 /* Last reference; clean up parent data structures */
1695 rbd_dev_unparent(rbd_dev
);
1697 rbd_warn(rbd_dev
, "parent reference underflow");
1701 * If an image has a non-zero parent overlap, get a reference to its
1704 * Returns true if the rbd device has a parent with a non-zero
1705 * overlap and a reference for it was successfully taken, or
1708 static bool rbd_dev_parent_get(struct rbd_device
*rbd_dev
)
1712 if (!rbd_dev
->parent_spec
)
1715 down_read(&rbd_dev
->header_rwsem
);
1716 if (rbd_dev
->parent_overlap
)
1717 counter
= atomic_inc_return_safe(&rbd_dev
->parent_ref
);
1718 up_read(&rbd_dev
->header_rwsem
);
1721 rbd_warn(rbd_dev
, "parent reference overflow");
1727 * Caller is responsible for filling in the list of object requests
1728 * that comprises the image request, and the Linux request pointer
1729 * (if there is one).
1731 static struct rbd_img_request
*rbd_img_request_create(
1732 struct rbd_device
*rbd_dev
,
1733 enum obj_operation_type op_type
,
1734 struct ceph_snap_context
*snapc
)
1736 struct rbd_img_request
*img_request
;
1738 img_request
= kmem_cache_zalloc(rbd_img_request_cache
, GFP_NOIO
);
1742 img_request
->rbd_dev
= rbd_dev
;
1743 img_request
->op_type
= op_type
;
1744 if (!rbd_img_is_write(img_request
))
1745 img_request
->snap_id
= rbd_dev
->spec
->snap_id
;
1747 img_request
->snapc
= snapc
;
1749 if (rbd_dev_parent_get(rbd_dev
))
1750 img_request_layered_set(img_request
);
1752 INIT_LIST_HEAD(&img_request
->lock_item
);
1753 INIT_LIST_HEAD(&img_request
->object_extents
);
1754 mutex_init(&img_request
->state_mutex
);
1755 kref_init(&img_request
->kref
);
1760 static void rbd_img_request_destroy(struct kref
*kref
)
1762 struct rbd_img_request
*img_request
;
1763 struct rbd_obj_request
*obj_request
;
1764 struct rbd_obj_request
*next_obj_request
;
1766 img_request
= container_of(kref
, struct rbd_img_request
, kref
);
1768 dout("%s: img %p\n", __func__
, img_request
);
1770 WARN_ON(!list_empty(&img_request
->lock_item
));
1771 for_each_obj_request_safe(img_request
, obj_request
, next_obj_request
)
1772 rbd_img_obj_request_del(img_request
, obj_request
);
1774 if (img_request_layered_test(img_request
)) {
1775 img_request_layered_clear(img_request
);
1776 rbd_dev_parent_put(img_request
->rbd_dev
);
1779 if (rbd_img_is_write(img_request
))
1780 ceph_put_snap_context(img_request
->snapc
);
1782 kmem_cache_free(rbd_img_request_cache
, img_request
);
1785 #define BITS_PER_OBJ 2
1786 #define OBJS_PER_BYTE (BITS_PER_BYTE / BITS_PER_OBJ)
1787 #define OBJ_MASK ((1 << BITS_PER_OBJ) - 1)
1789 static void __rbd_object_map_index(struct rbd_device
*rbd_dev
, u64 objno
,
1790 u64
*index
, u8
*shift
)
1794 rbd_assert(objno
< rbd_dev
->object_map_size
);
1795 *index
= div_u64_rem(objno
, OBJS_PER_BYTE
, &off
);
1796 *shift
= (OBJS_PER_BYTE
- off
- 1) * BITS_PER_OBJ
;
1799 static u8
__rbd_object_map_get(struct rbd_device
*rbd_dev
, u64 objno
)
1804 lockdep_assert_held(&rbd_dev
->object_map_lock
);
1805 __rbd_object_map_index(rbd_dev
, objno
, &index
, &shift
);
1806 return (rbd_dev
->object_map
[index
] >> shift
) & OBJ_MASK
;
1809 static void __rbd_object_map_set(struct rbd_device
*rbd_dev
, u64 objno
, u8 val
)
1815 lockdep_assert_held(&rbd_dev
->object_map_lock
);
1816 rbd_assert(!(val
& ~OBJ_MASK
));
1818 __rbd_object_map_index(rbd_dev
, objno
, &index
, &shift
);
1819 p
= &rbd_dev
->object_map
[index
];
1820 *p
= (*p
& ~(OBJ_MASK
<< shift
)) | (val
<< shift
);
1823 static u8
rbd_object_map_get(struct rbd_device
*rbd_dev
, u64 objno
)
1827 spin_lock(&rbd_dev
->object_map_lock
);
1828 state
= __rbd_object_map_get(rbd_dev
, objno
);
1829 spin_unlock(&rbd_dev
->object_map_lock
);
1833 static bool use_object_map(struct rbd_device
*rbd_dev
)
1835 return ((rbd_dev
->header
.features
& RBD_FEATURE_OBJECT_MAP
) &&
1836 !(rbd_dev
->object_map_flags
& RBD_FLAG_OBJECT_MAP_INVALID
));
1839 static bool rbd_object_map_may_exist(struct rbd_device
*rbd_dev
, u64 objno
)
1843 /* fall back to default logic if object map is disabled or invalid */
1844 if (!use_object_map(rbd_dev
))
1847 state
= rbd_object_map_get(rbd_dev
, objno
);
1848 return state
!= OBJECT_NONEXISTENT
;
1851 static void rbd_object_map_name(struct rbd_device
*rbd_dev
, u64 snap_id
,
1852 struct ceph_object_id
*oid
)
1854 if (snap_id
== CEPH_NOSNAP
)
1855 ceph_oid_printf(oid
, "%s%s", RBD_OBJECT_MAP_PREFIX
,
1856 rbd_dev
->spec
->image_id
);
1858 ceph_oid_printf(oid
, "%s%s.%016llx", RBD_OBJECT_MAP_PREFIX
,
1859 rbd_dev
->spec
->image_id
, snap_id
);
1862 static int rbd_object_map_lock(struct rbd_device
*rbd_dev
)
1864 struct ceph_osd_client
*osdc
= &rbd_dev
->rbd_client
->client
->osdc
;
1865 CEPH_DEFINE_OID_ONSTACK(oid
);
1868 struct ceph_locker
*lockers
;
1870 bool broke_lock
= false;
1873 rbd_object_map_name(rbd_dev
, CEPH_NOSNAP
, &oid
);
1876 ret
= ceph_cls_lock(osdc
, &oid
, &rbd_dev
->header_oloc
, RBD_LOCK_NAME
,
1877 CEPH_CLS_LOCK_EXCLUSIVE
, "", "", "", 0);
1878 if (ret
!= -EBUSY
|| broke_lock
) {
1880 ret
= 0; /* already locked by myself */
1882 rbd_warn(rbd_dev
, "failed to lock object map: %d", ret
);
1886 ret
= ceph_cls_lock_info(osdc
, &oid
, &rbd_dev
->header_oloc
,
1887 RBD_LOCK_NAME
, &lock_type
, &lock_tag
,
1888 &lockers
, &num_lockers
);
1893 rbd_warn(rbd_dev
, "failed to get object map lockers: %d", ret
);
1898 if (num_lockers
== 0)
1901 rbd_warn(rbd_dev
, "breaking object map lock owned by %s%llu",
1902 ENTITY_NAME(lockers
[0].id
.name
));
1904 ret
= ceph_cls_break_lock(osdc
, &oid
, &rbd_dev
->header_oloc
,
1905 RBD_LOCK_NAME
, lockers
[0].id
.cookie
,
1906 &lockers
[0].id
.name
);
1907 ceph_free_lockers(lockers
, num_lockers
);
1912 rbd_warn(rbd_dev
, "failed to break object map lock: %d", ret
);
1920 static void rbd_object_map_unlock(struct rbd_device
*rbd_dev
)
1922 struct ceph_osd_client
*osdc
= &rbd_dev
->rbd_client
->client
->osdc
;
1923 CEPH_DEFINE_OID_ONSTACK(oid
);
1926 rbd_object_map_name(rbd_dev
, CEPH_NOSNAP
, &oid
);
1928 ret
= ceph_cls_unlock(osdc
, &oid
, &rbd_dev
->header_oloc
, RBD_LOCK_NAME
,
1930 if (ret
&& ret
!= -ENOENT
)
1931 rbd_warn(rbd_dev
, "failed to unlock object map: %d", ret
);
1934 static int decode_object_map_header(void **p
, void *end
, u64
*object_map_size
)
1942 ceph_decode_32_safe(p
, end
, header_len
, e_inval
);
1943 header_end
= *p
+ header_len
;
1945 ret
= ceph_start_decoding(p
, end
, 1, "BitVector header", &struct_v
,
1950 ceph_decode_64_safe(p
, end
, *object_map_size
, e_inval
);
1959 static int __rbd_object_map_load(struct rbd_device
*rbd_dev
)
1961 struct ceph_osd_client
*osdc
= &rbd_dev
->rbd_client
->client
->osdc
;
1962 CEPH_DEFINE_OID_ONSTACK(oid
);
1963 struct page
**pages
;
1967 u64 object_map_bytes
;
1968 u64 object_map_size
;
1972 rbd_assert(!rbd_dev
->object_map
&& !rbd_dev
->object_map_size
);
1974 num_objects
= ceph_get_num_objects(&rbd_dev
->layout
,
1975 rbd_dev
->mapping
.size
);
1976 object_map_bytes
= DIV_ROUND_UP_ULL(num_objects
* BITS_PER_OBJ
,
1978 num_pages
= calc_pages_for(0, object_map_bytes
) + 1;
1979 pages
= ceph_alloc_page_vector(num_pages
, GFP_KERNEL
);
1981 return PTR_ERR(pages
);
1983 reply_len
= num_pages
* PAGE_SIZE
;
1984 rbd_object_map_name(rbd_dev
, rbd_dev
->spec
->snap_id
, &oid
);
1985 ret
= ceph_osdc_call(osdc
, &oid
, &rbd_dev
->header_oloc
,
1986 "rbd", "object_map_load", CEPH_OSD_FLAG_READ
,
1987 NULL
, 0, pages
, &reply_len
);
1991 p
= page_address(pages
[0]);
1992 end
= p
+ min(reply_len
, (size_t)PAGE_SIZE
);
1993 ret
= decode_object_map_header(&p
, end
, &object_map_size
);
1997 if (object_map_size
!= num_objects
) {
1998 rbd_warn(rbd_dev
, "object map size mismatch: %llu vs %llu",
1999 object_map_size
, num_objects
);
2004 if (offset_in_page(p
) + object_map_bytes
> reply_len
) {
2009 rbd_dev
->object_map
= kvmalloc(object_map_bytes
, GFP_KERNEL
);
2010 if (!rbd_dev
->object_map
) {
2015 rbd_dev
->object_map_size
= object_map_size
;
2016 ceph_copy_from_page_vector(pages
, rbd_dev
->object_map
,
2017 offset_in_page(p
), object_map_bytes
);
2020 ceph_release_page_vector(pages
, num_pages
);
2024 static void rbd_object_map_free(struct rbd_device
*rbd_dev
)
2026 kvfree(rbd_dev
->object_map
);
2027 rbd_dev
->object_map
= NULL
;
2028 rbd_dev
->object_map_size
= 0;
2031 static int rbd_object_map_load(struct rbd_device
*rbd_dev
)
2035 ret
= __rbd_object_map_load(rbd_dev
);
2039 ret
= rbd_dev_v2_get_flags(rbd_dev
);
2041 rbd_object_map_free(rbd_dev
);
2045 if (rbd_dev
->object_map_flags
& RBD_FLAG_OBJECT_MAP_INVALID
)
2046 rbd_warn(rbd_dev
, "object map is invalid");
2051 static int rbd_object_map_open(struct rbd_device
*rbd_dev
)
2055 ret
= rbd_object_map_lock(rbd_dev
);
2059 ret
= rbd_object_map_load(rbd_dev
);
2061 rbd_object_map_unlock(rbd_dev
);
2068 static void rbd_object_map_close(struct rbd_device
*rbd_dev
)
2070 rbd_object_map_free(rbd_dev
);
2071 rbd_object_map_unlock(rbd_dev
);
2075 * This function needs snap_id (or more precisely just something to
2076 * distinguish between HEAD and snapshot object maps), new_state and
2077 * current_state that were passed to rbd_object_map_update().
2079 * To avoid allocating and stashing a context we piggyback on the OSD
2080 * request. A HEAD update has two ops (assert_locked). For new_state
2081 * and current_state we decode our own object_map_update op, encoded in
2082 * rbd_cls_object_map_update().
2084 static int rbd_object_map_update_finish(struct rbd_obj_request
*obj_req
,
2085 struct ceph_osd_request
*osd_req
)
2087 struct rbd_device
*rbd_dev
= obj_req
->img_request
->rbd_dev
;
2088 struct ceph_osd_data
*osd_data
;
2090 u8 state
, new_state
, uninitialized_var(current_state
);
2091 bool has_current_state
;
2094 if (osd_req
->r_result
)
2095 return osd_req
->r_result
;
2098 * Nothing to do for a snapshot object map.
2100 if (osd_req
->r_num_ops
== 1)
2104 * Update in-memory HEAD object map.
2106 rbd_assert(osd_req
->r_num_ops
== 2);
2107 osd_data
= osd_req_op_data(osd_req
, 1, cls
, request_data
);
2108 rbd_assert(osd_data
->type
== CEPH_OSD_DATA_TYPE_PAGES
);
2110 p
= page_address(osd_data
->pages
[0]);
2111 objno
= ceph_decode_64(&p
);
2112 rbd_assert(objno
== obj_req
->ex
.oe_objno
);
2113 rbd_assert(ceph_decode_64(&p
) == objno
+ 1);
2114 new_state
= ceph_decode_8(&p
);
2115 has_current_state
= ceph_decode_8(&p
);
2116 if (has_current_state
)
2117 current_state
= ceph_decode_8(&p
);
2119 spin_lock(&rbd_dev
->object_map_lock
);
2120 state
= __rbd_object_map_get(rbd_dev
, objno
);
2121 if (!has_current_state
|| current_state
== state
||
2122 (current_state
== OBJECT_EXISTS
&& state
== OBJECT_EXISTS_CLEAN
))
2123 __rbd_object_map_set(rbd_dev
, objno
, new_state
);
2124 spin_unlock(&rbd_dev
->object_map_lock
);
2129 static void rbd_object_map_callback(struct ceph_osd_request
*osd_req
)
2131 struct rbd_obj_request
*obj_req
= osd_req
->r_priv
;
2134 dout("%s osd_req %p result %d for obj_req %p\n", __func__
, osd_req
,
2135 osd_req
->r_result
, obj_req
);
2137 result
= rbd_object_map_update_finish(obj_req
, osd_req
);
2138 rbd_obj_handle_request(obj_req
, result
);
2141 static bool update_needed(struct rbd_device
*rbd_dev
, u64 objno
, u8 new_state
)
2143 u8 state
= rbd_object_map_get(rbd_dev
, objno
);
2145 if (state
== new_state
||
2146 (new_state
== OBJECT_PENDING
&& state
== OBJECT_NONEXISTENT
) ||
2147 (new_state
== OBJECT_NONEXISTENT
&& state
!= OBJECT_PENDING
))
2153 static int rbd_cls_object_map_update(struct ceph_osd_request
*req
,
2154 int which
, u64 objno
, u8 new_state
,
2155 const u8
*current_state
)
2157 struct page
**pages
;
2161 ret
= osd_req_op_cls_init(req
, which
, "rbd", "object_map_update");
2165 pages
= ceph_alloc_page_vector(1, GFP_NOIO
);
2167 return PTR_ERR(pages
);
2169 p
= start
= page_address(pages
[0]);
2170 ceph_encode_64(&p
, objno
);
2171 ceph_encode_64(&p
, objno
+ 1);
2172 ceph_encode_8(&p
, new_state
);
2173 if (current_state
) {
2174 ceph_encode_8(&p
, 1);
2175 ceph_encode_8(&p
, *current_state
);
2177 ceph_encode_8(&p
, 0);
2180 osd_req_op_cls_request_data_pages(req
, which
, pages
, p
- start
, 0,
2187 * 0 - object map update sent
2188 * 1 - object map update isn't needed
2191 static int rbd_object_map_update(struct rbd_obj_request
*obj_req
, u64 snap_id
,
2192 u8 new_state
, const u8
*current_state
)
2194 struct rbd_device
*rbd_dev
= obj_req
->img_request
->rbd_dev
;
2195 struct ceph_osd_client
*osdc
= &rbd_dev
->rbd_client
->client
->osdc
;
2196 struct ceph_osd_request
*req
;
2201 if (snap_id
== CEPH_NOSNAP
) {
2202 if (!update_needed(rbd_dev
, obj_req
->ex
.oe_objno
, new_state
))
2205 num_ops
++; /* assert_locked */
2208 req
= ceph_osdc_alloc_request(osdc
, NULL
, num_ops
, false, GFP_NOIO
);
2212 list_add_tail(&req
->r_private_item
, &obj_req
->osd_reqs
);
2213 req
->r_callback
= rbd_object_map_callback
;
2214 req
->r_priv
= obj_req
;
2216 rbd_object_map_name(rbd_dev
, snap_id
, &req
->r_base_oid
);
2217 ceph_oloc_copy(&req
->r_base_oloc
, &rbd_dev
->header_oloc
);
2218 req
->r_flags
= CEPH_OSD_FLAG_WRITE
;
2219 ktime_get_real_ts64(&req
->r_mtime
);
2221 if (snap_id
== CEPH_NOSNAP
) {
2223 * Protect against possible race conditions during lock
2224 * ownership transitions.
2226 ret
= ceph_cls_assert_locked(req
, which
++, RBD_LOCK_NAME
,
2227 CEPH_CLS_LOCK_EXCLUSIVE
, "", "");
2232 ret
= rbd_cls_object_map_update(req
, which
, obj_req
->ex
.oe_objno
,
2233 new_state
, current_state
);
2237 ret
= ceph_osdc_alloc_messages(req
, GFP_NOIO
);
2241 ceph_osdc_start_request(osdc
, req
, false);
2245 static void prune_extents(struct ceph_file_extent
*img_extents
,
2246 u32
*num_img_extents
, u64 overlap
)
2248 u32 cnt
= *num_img_extents
;
2250 /* drop extents completely beyond the overlap */
2251 while (cnt
&& img_extents
[cnt
- 1].fe_off
>= overlap
)
2255 struct ceph_file_extent
*ex
= &img_extents
[cnt
- 1];
2257 /* trim final overlapping extent */
2258 if (ex
->fe_off
+ ex
->fe_len
> overlap
)
2259 ex
->fe_len
= overlap
- ex
->fe_off
;
2262 *num_img_extents
= cnt
;
2266 * Determine the byte range(s) covered by either just the object extent
2267 * or the entire object in the parent image.
2269 static int rbd_obj_calc_img_extents(struct rbd_obj_request
*obj_req
,
2272 struct rbd_device
*rbd_dev
= obj_req
->img_request
->rbd_dev
;
2275 if (!rbd_dev
->parent_overlap
)
2278 ret
= ceph_extent_to_file(&rbd_dev
->layout
, obj_req
->ex
.oe_objno
,
2279 entire
? 0 : obj_req
->ex
.oe_off
,
2280 entire
? rbd_dev
->layout
.object_size
:
2282 &obj_req
->img_extents
,
2283 &obj_req
->num_img_extents
);
2287 prune_extents(obj_req
->img_extents
, &obj_req
->num_img_extents
,
2288 rbd_dev
->parent_overlap
);
2292 static void rbd_osd_setup_data(struct ceph_osd_request
*osd_req
, int which
)
2294 struct rbd_obj_request
*obj_req
= osd_req
->r_priv
;
2296 switch (obj_req
->img_request
->data_type
) {
2297 case OBJ_REQUEST_BIO
:
2298 osd_req_op_extent_osd_data_bio(osd_req
, which
,
2300 obj_req
->ex
.oe_len
);
2302 case OBJ_REQUEST_BVECS
:
2303 case OBJ_REQUEST_OWN_BVECS
:
2304 rbd_assert(obj_req
->bvec_pos
.iter
.bi_size
==
2305 obj_req
->ex
.oe_len
);
2306 rbd_assert(obj_req
->bvec_idx
== obj_req
->bvec_count
);
2307 osd_req_op_extent_osd_data_bvec_pos(osd_req
, which
,
2308 &obj_req
->bvec_pos
);
2315 static int rbd_osd_setup_stat(struct ceph_osd_request
*osd_req
, int which
)
2317 struct page
**pages
;
2320 * The response data for a STAT call consists of:
2327 pages
= ceph_alloc_page_vector(1, GFP_NOIO
);
2329 return PTR_ERR(pages
);
2331 osd_req_op_init(osd_req
, which
, CEPH_OSD_OP_STAT
, 0);
2332 osd_req_op_raw_data_in_pages(osd_req
, which
, pages
,
2333 8 + sizeof(struct ceph_timespec
),
2338 static int rbd_osd_setup_copyup(struct ceph_osd_request
*osd_req
, int which
,
2341 struct rbd_obj_request
*obj_req
= osd_req
->r_priv
;
2344 ret
= osd_req_op_cls_init(osd_req
, which
, "rbd", "copyup");
2348 osd_req_op_cls_request_data_bvecs(osd_req
, which
, obj_req
->copyup_bvecs
,
2349 obj_req
->copyup_bvec_count
, bytes
);
2353 static int rbd_obj_init_read(struct rbd_obj_request
*obj_req
)
2355 obj_req
->read_state
= RBD_OBJ_READ_START
;
2359 static void __rbd_osd_setup_write_ops(struct ceph_osd_request
*osd_req
,
2362 struct rbd_obj_request
*obj_req
= osd_req
->r_priv
;
2363 struct rbd_device
*rbd_dev
= obj_req
->img_request
->rbd_dev
;
2366 if (!use_object_map(rbd_dev
) ||
2367 !(obj_req
->flags
& RBD_OBJ_FLAG_MAY_EXIST
)) {
2368 osd_req_op_alloc_hint_init(osd_req
, which
++,
2369 rbd_dev
->layout
.object_size
,
2370 rbd_dev
->layout
.object_size
);
2373 if (rbd_obj_is_entire(obj_req
))
2374 opcode
= CEPH_OSD_OP_WRITEFULL
;
2376 opcode
= CEPH_OSD_OP_WRITE
;
2378 osd_req_op_extent_init(osd_req
, which
, opcode
,
2379 obj_req
->ex
.oe_off
, obj_req
->ex
.oe_len
, 0, 0);
2380 rbd_osd_setup_data(osd_req
, which
);
2383 static int rbd_obj_init_write(struct rbd_obj_request
*obj_req
)
2387 /* reverse map the entire object onto the parent */
2388 ret
= rbd_obj_calc_img_extents(obj_req
, true);
2392 if (rbd_obj_copyup_enabled(obj_req
))
2393 obj_req
->flags
|= RBD_OBJ_FLAG_COPYUP_ENABLED
;
2395 obj_req
->write_state
= RBD_OBJ_WRITE_START
;
2399 static u16
truncate_or_zero_opcode(struct rbd_obj_request
*obj_req
)
2401 return rbd_obj_is_tail(obj_req
) ? CEPH_OSD_OP_TRUNCATE
:
2405 static void __rbd_osd_setup_discard_ops(struct ceph_osd_request
*osd_req
,
2408 struct rbd_obj_request
*obj_req
= osd_req
->r_priv
;
2410 if (rbd_obj_is_entire(obj_req
) && !obj_req
->num_img_extents
) {
2411 rbd_assert(obj_req
->flags
& RBD_OBJ_FLAG_DELETION
);
2412 osd_req_op_init(osd_req
, which
, CEPH_OSD_OP_DELETE
, 0);
2414 osd_req_op_extent_init(osd_req
, which
,
2415 truncate_or_zero_opcode(obj_req
),
2416 obj_req
->ex
.oe_off
, obj_req
->ex
.oe_len
,
2421 static int rbd_obj_init_discard(struct rbd_obj_request
*obj_req
)
2423 struct rbd_device
*rbd_dev
= obj_req
->img_request
->rbd_dev
;
2428 * Align the range to alloc_size boundary and punt on discards
2429 * that are too small to free up any space.
2431 * alloc_size == object_size && is_tail() is a special case for
2432 * filestore with filestore_punch_hole = false, needed to allow
2433 * truncate (in addition to delete).
2435 if (rbd_dev
->opts
->alloc_size
!= rbd_dev
->layout
.object_size
||
2436 !rbd_obj_is_tail(obj_req
)) {
2437 off
= round_up(obj_req
->ex
.oe_off
, rbd_dev
->opts
->alloc_size
);
2438 next_off
= round_down(obj_req
->ex
.oe_off
+ obj_req
->ex
.oe_len
,
2439 rbd_dev
->opts
->alloc_size
);
2440 if (off
>= next_off
)
2443 dout("%s %p %llu~%llu -> %llu~%llu\n", __func__
,
2444 obj_req
, obj_req
->ex
.oe_off
, obj_req
->ex
.oe_len
,
2445 off
, next_off
- off
);
2446 obj_req
->ex
.oe_off
= off
;
2447 obj_req
->ex
.oe_len
= next_off
- off
;
2450 /* reverse map the entire object onto the parent */
2451 ret
= rbd_obj_calc_img_extents(obj_req
, true);
2455 obj_req
->flags
|= RBD_OBJ_FLAG_NOOP_FOR_NONEXISTENT
;
2456 if (rbd_obj_is_entire(obj_req
) && !obj_req
->num_img_extents
)
2457 obj_req
->flags
|= RBD_OBJ_FLAG_DELETION
;
2459 obj_req
->write_state
= RBD_OBJ_WRITE_START
;
2463 static void __rbd_osd_setup_zeroout_ops(struct ceph_osd_request
*osd_req
,
2466 struct rbd_obj_request
*obj_req
= osd_req
->r_priv
;
2469 if (rbd_obj_is_entire(obj_req
)) {
2470 if (obj_req
->num_img_extents
) {
2471 if (!(obj_req
->flags
& RBD_OBJ_FLAG_COPYUP_ENABLED
))
2472 osd_req_op_init(osd_req
, which
++,
2473 CEPH_OSD_OP_CREATE
, 0);
2474 opcode
= CEPH_OSD_OP_TRUNCATE
;
2476 rbd_assert(obj_req
->flags
& RBD_OBJ_FLAG_DELETION
);
2477 osd_req_op_init(osd_req
, which
++,
2478 CEPH_OSD_OP_DELETE
, 0);
2482 opcode
= truncate_or_zero_opcode(obj_req
);
2486 osd_req_op_extent_init(osd_req
, which
, opcode
,
2487 obj_req
->ex
.oe_off
, obj_req
->ex
.oe_len
,
2491 static int rbd_obj_init_zeroout(struct rbd_obj_request
*obj_req
)
2495 /* reverse map the entire object onto the parent */
2496 ret
= rbd_obj_calc_img_extents(obj_req
, true);
2500 if (rbd_obj_copyup_enabled(obj_req
))
2501 obj_req
->flags
|= RBD_OBJ_FLAG_COPYUP_ENABLED
;
2502 if (!obj_req
->num_img_extents
) {
2503 obj_req
->flags
|= RBD_OBJ_FLAG_NOOP_FOR_NONEXISTENT
;
2504 if (rbd_obj_is_entire(obj_req
))
2505 obj_req
->flags
|= RBD_OBJ_FLAG_DELETION
;
2508 obj_req
->write_state
= RBD_OBJ_WRITE_START
;
2512 static int count_write_ops(struct rbd_obj_request
*obj_req
)
2514 struct rbd_img_request
*img_req
= obj_req
->img_request
;
2516 switch (img_req
->op_type
) {
2518 if (!use_object_map(img_req
->rbd_dev
) ||
2519 !(obj_req
->flags
& RBD_OBJ_FLAG_MAY_EXIST
))
2520 return 2; /* setallochint + write/writefull */
2522 return 1; /* write/writefull */
2523 case OBJ_OP_DISCARD
:
2524 return 1; /* delete/truncate/zero */
2525 case OBJ_OP_ZEROOUT
:
2526 if (rbd_obj_is_entire(obj_req
) && obj_req
->num_img_extents
&&
2527 !(obj_req
->flags
& RBD_OBJ_FLAG_COPYUP_ENABLED
))
2528 return 2; /* create + truncate */
2530 return 1; /* delete/truncate/zero */
2536 static void rbd_osd_setup_write_ops(struct ceph_osd_request
*osd_req
,
2539 struct rbd_obj_request
*obj_req
= osd_req
->r_priv
;
2541 switch (obj_req
->img_request
->op_type
) {
2543 __rbd_osd_setup_write_ops(osd_req
, which
);
2545 case OBJ_OP_DISCARD
:
2546 __rbd_osd_setup_discard_ops(osd_req
, which
);
2548 case OBJ_OP_ZEROOUT
:
2549 __rbd_osd_setup_zeroout_ops(osd_req
, which
);
2557 * Prune the list of object requests (adjust offset and/or length, drop
2558 * redundant requests). Prepare object request state machines and image
2559 * request state machine for execution.
2561 static int __rbd_img_fill_request(struct rbd_img_request
*img_req
)
2563 struct rbd_obj_request
*obj_req
, *next_obj_req
;
2566 for_each_obj_request_safe(img_req
, obj_req
, next_obj_req
) {
2567 switch (img_req
->op_type
) {
2569 ret
= rbd_obj_init_read(obj_req
);
2572 ret
= rbd_obj_init_write(obj_req
);
2574 case OBJ_OP_DISCARD
:
2575 ret
= rbd_obj_init_discard(obj_req
);
2577 case OBJ_OP_ZEROOUT
:
2578 ret
= rbd_obj_init_zeroout(obj_req
);
2586 rbd_img_obj_request_del(img_req
, obj_req
);
2591 img_req
->state
= RBD_IMG_START
;
2595 union rbd_img_fill_iter
{
2596 struct ceph_bio_iter bio_iter
;
2597 struct ceph_bvec_iter bvec_iter
;
2600 struct rbd_img_fill_ctx
{
2601 enum obj_request_type pos_type
;
2602 union rbd_img_fill_iter
*pos
;
2603 union rbd_img_fill_iter iter
;
2604 ceph_object_extent_fn_t set_pos_fn
;
2605 ceph_object_extent_fn_t count_fn
;
2606 ceph_object_extent_fn_t copy_fn
;
2609 static struct ceph_object_extent
*alloc_object_extent(void *arg
)
2611 struct rbd_img_request
*img_req
= arg
;
2612 struct rbd_obj_request
*obj_req
;
2614 obj_req
= rbd_obj_request_create();
2618 rbd_img_obj_request_add(img_req
, obj_req
);
2619 return &obj_req
->ex
;
2623 * While su != os && sc == 1 is technically not fancy (it's the same
2624 * layout as su == os && sc == 1), we can't use the nocopy path for it
2625 * because ->set_pos_fn() should be called only once per object.
2626 * ceph_file_to_extents() invokes action_fn once per stripe unit, so
2627 * treat su != os && sc == 1 as fancy.
2629 static bool rbd_layout_is_fancy(struct ceph_file_layout
*l
)
2631 return l
->stripe_unit
!= l
->object_size
;
2634 static int rbd_img_fill_request_nocopy(struct rbd_img_request
*img_req
,
2635 struct ceph_file_extent
*img_extents
,
2636 u32 num_img_extents
,
2637 struct rbd_img_fill_ctx
*fctx
)
2642 img_req
->data_type
= fctx
->pos_type
;
2645 * Create object requests and set each object request's starting
2646 * position in the provided bio (list) or bio_vec array.
2648 fctx
->iter
= *fctx
->pos
;
2649 for (i
= 0; i
< num_img_extents
; i
++) {
2650 ret
= ceph_file_to_extents(&img_req
->rbd_dev
->layout
,
2651 img_extents
[i
].fe_off
,
2652 img_extents
[i
].fe_len
,
2653 &img_req
->object_extents
,
2654 alloc_object_extent
, img_req
,
2655 fctx
->set_pos_fn
, &fctx
->iter
);
2660 return __rbd_img_fill_request(img_req
);
2664 * Map a list of image extents to a list of object extents, create the
2665 * corresponding object requests (normally each to a different object,
2666 * but not always) and add them to @img_req. For each object request,
2667 * set up its data descriptor to point to the corresponding chunk(s) of
2668 * @fctx->pos data buffer.
2670 * Because ceph_file_to_extents() will merge adjacent object extents
2671 * together, each object request's data descriptor may point to multiple
2672 * different chunks of @fctx->pos data buffer.
2674 * @fctx->pos data buffer is assumed to be large enough.
2676 static int rbd_img_fill_request(struct rbd_img_request
*img_req
,
2677 struct ceph_file_extent
*img_extents
,
2678 u32 num_img_extents
,
2679 struct rbd_img_fill_ctx
*fctx
)
2681 struct rbd_device
*rbd_dev
= img_req
->rbd_dev
;
2682 struct rbd_obj_request
*obj_req
;
2686 if (fctx
->pos_type
== OBJ_REQUEST_NODATA
||
2687 !rbd_layout_is_fancy(&rbd_dev
->layout
))
2688 return rbd_img_fill_request_nocopy(img_req
, img_extents
,
2689 num_img_extents
, fctx
);
2691 img_req
->data_type
= OBJ_REQUEST_OWN_BVECS
;
2694 * Create object requests and determine ->bvec_count for each object
2695 * request. Note that ->bvec_count sum over all object requests may
2696 * be greater than the number of bio_vecs in the provided bio (list)
2697 * or bio_vec array because when mapped, those bio_vecs can straddle
2698 * stripe unit boundaries.
2700 fctx
->iter
= *fctx
->pos
;
2701 for (i
= 0; i
< num_img_extents
; i
++) {
2702 ret
= ceph_file_to_extents(&rbd_dev
->layout
,
2703 img_extents
[i
].fe_off
,
2704 img_extents
[i
].fe_len
,
2705 &img_req
->object_extents
,
2706 alloc_object_extent
, img_req
,
2707 fctx
->count_fn
, &fctx
->iter
);
2712 for_each_obj_request(img_req
, obj_req
) {
2713 obj_req
->bvec_pos
.bvecs
= kmalloc_array(obj_req
->bvec_count
,
2714 sizeof(*obj_req
->bvec_pos
.bvecs
),
2716 if (!obj_req
->bvec_pos
.bvecs
)
2721 * Fill in each object request's private bio_vec array, splitting and
2722 * rearranging the provided bio_vecs in stripe unit chunks as needed.
2724 fctx
->iter
= *fctx
->pos
;
2725 for (i
= 0; i
< num_img_extents
; i
++) {
2726 ret
= ceph_iterate_extents(&rbd_dev
->layout
,
2727 img_extents
[i
].fe_off
,
2728 img_extents
[i
].fe_len
,
2729 &img_req
->object_extents
,
2730 fctx
->copy_fn
, &fctx
->iter
);
2735 return __rbd_img_fill_request(img_req
);
2738 static int rbd_img_fill_nodata(struct rbd_img_request
*img_req
,
2741 struct ceph_file_extent ex
= { off
, len
};
2742 union rbd_img_fill_iter dummy
= {};
2743 struct rbd_img_fill_ctx fctx
= {
2744 .pos_type
= OBJ_REQUEST_NODATA
,
2748 return rbd_img_fill_request(img_req
, &ex
, 1, &fctx
);
2751 static void set_bio_pos(struct ceph_object_extent
*ex
, u32 bytes
, void *arg
)
2753 struct rbd_obj_request
*obj_req
=
2754 container_of(ex
, struct rbd_obj_request
, ex
);
2755 struct ceph_bio_iter
*it
= arg
;
2757 dout("%s objno %llu bytes %u\n", __func__
, ex
->oe_objno
, bytes
);
2758 obj_req
->bio_pos
= *it
;
2759 ceph_bio_iter_advance(it
, bytes
);
2762 static void count_bio_bvecs(struct ceph_object_extent
*ex
, u32 bytes
, void *arg
)
2764 struct rbd_obj_request
*obj_req
=
2765 container_of(ex
, struct rbd_obj_request
, ex
);
2766 struct ceph_bio_iter
*it
= arg
;
2768 dout("%s objno %llu bytes %u\n", __func__
, ex
->oe_objno
, bytes
);
2769 ceph_bio_iter_advance_step(it
, bytes
, ({
2770 obj_req
->bvec_count
++;
2775 static void copy_bio_bvecs(struct ceph_object_extent
*ex
, u32 bytes
, void *arg
)
2777 struct rbd_obj_request
*obj_req
=
2778 container_of(ex
, struct rbd_obj_request
, ex
);
2779 struct ceph_bio_iter
*it
= arg
;
2781 dout("%s objno %llu bytes %u\n", __func__
, ex
->oe_objno
, bytes
);
2782 ceph_bio_iter_advance_step(it
, bytes
, ({
2783 obj_req
->bvec_pos
.bvecs
[obj_req
->bvec_idx
++] = bv
;
2784 obj_req
->bvec_pos
.iter
.bi_size
+= bv
.bv_len
;
2788 static int __rbd_img_fill_from_bio(struct rbd_img_request
*img_req
,
2789 struct ceph_file_extent
*img_extents
,
2790 u32 num_img_extents
,
2791 struct ceph_bio_iter
*bio_pos
)
2793 struct rbd_img_fill_ctx fctx
= {
2794 .pos_type
= OBJ_REQUEST_BIO
,
2795 .pos
= (union rbd_img_fill_iter
*)bio_pos
,
2796 .set_pos_fn
= set_bio_pos
,
2797 .count_fn
= count_bio_bvecs
,
2798 .copy_fn
= copy_bio_bvecs
,
2801 return rbd_img_fill_request(img_req
, img_extents
, num_img_extents
,
2805 static int rbd_img_fill_from_bio(struct rbd_img_request
*img_req
,
2806 u64 off
, u64 len
, struct bio
*bio
)
2808 struct ceph_file_extent ex
= { off
, len
};
2809 struct ceph_bio_iter it
= { .bio
= bio
, .iter
= bio
->bi_iter
};
2811 return __rbd_img_fill_from_bio(img_req
, &ex
, 1, &it
);
2814 static void set_bvec_pos(struct ceph_object_extent
*ex
, u32 bytes
, void *arg
)
2816 struct rbd_obj_request
*obj_req
=
2817 container_of(ex
, struct rbd_obj_request
, ex
);
2818 struct ceph_bvec_iter
*it
= arg
;
2820 obj_req
->bvec_pos
= *it
;
2821 ceph_bvec_iter_shorten(&obj_req
->bvec_pos
, bytes
);
2822 ceph_bvec_iter_advance(it
, bytes
);
2825 static void count_bvecs(struct ceph_object_extent
*ex
, u32 bytes
, void *arg
)
2827 struct rbd_obj_request
*obj_req
=
2828 container_of(ex
, struct rbd_obj_request
, ex
);
2829 struct ceph_bvec_iter
*it
= arg
;
2831 ceph_bvec_iter_advance_step(it
, bytes
, ({
2832 obj_req
->bvec_count
++;
2836 static void copy_bvecs(struct ceph_object_extent
*ex
, u32 bytes
, void *arg
)
2838 struct rbd_obj_request
*obj_req
=
2839 container_of(ex
, struct rbd_obj_request
, ex
);
2840 struct ceph_bvec_iter
*it
= arg
;
2842 ceph_bvec_iter_advance_step(it
, bytes
, ({
2843 obj_req
->bvec_pos
.bvecs
[obj_req
->bvec_idx
++] = bv
;
2844 obj_req
->bvec_pos
.iter
.bi_size
+= bv
.bv_len
;
2848 static int __rbd_img_fill_from_bvecs(struct rbd_img_request
*img_req
,
2849 struct ceph_file_extent
*img_extents
,
2850 u32 num_img_extents
,
2851 struct ceph_bvec_iter
*bvec_pos
)
2853 struct rbd_img_fill_ctx fctx
= {
2854 .pos_type
= OBJ_REQUEST_BVECS
,
2855 .pos
= (union rbd_img_fill_iter
*)bvec_pos
,
2856 .set_pos_fn
= set_bvec_pos
,
2857 .count_fn
= count_bvecs
,
2858 .copy_fn
= copy_bvecs
,
2861 return rbd_img_fill_request(img_req
, img_extents
, num_img_extents
,
2865 static int rbd_img_fill_from_bvecs(struct rbd_img_request
*img_req
,
2866 struct ceph_file_extent
*img_extents
,
2867 u32 num_img_extents
,
2868 struct bio_vec
*bvecs
)
2870 struct ceph_bvec_iter it
= {
2872 .iter
= { .bi_size
= ceph_file_extents_bytes(img_extents
,
2876 return __rbd_img_fill_from_bvecs(img_req
, img_extents
, num_img_extents
,
2880 static void rbd_img_handle_request_work(struct work_struct
*work
)
2882 struct rbd_img_request
*img_req
=
2883 container_of(work
, struct rbd_img_request
, work
);
2885 rbd_img_handle_request(img_req
, img_req
->work_result
);
2888 static void rbd_img_schedule(struct rbd_img_request
*img_req
, int result
)
2890 INIT_WORK(&img_req
->work
, rbd_img_handle_request_work
);
2891 img_req
->work_result
= result
;
2892 queue_work(rbd_wq
, &img_req
->work
);
2895 static bool rbd_obj_may_exist(struct rbd_obj_request
*obj_req
)
2897 struct rbd_device
*rbd_dev
= obj_req
->img_request
->rbd_dev
;
2899 if (rbd_object_map_may_exist(rbd_dev
, obj_req
->ex
.oe_objno
)) {
2900 obj_req
->flags
|= RBD_OBJ_FLAG_MAY_EXIST
;
2904 dout("%s %p objno %llu assuming dne\n", __func__
, obj_req
,
2905 obj_req
->ex
.oe_objno
);
2909 static int rbd_obj_read_object(struct rbd_obj_request
*obj_req
)
2911 struct ceph_osd_request
*osd_req
;
2914 osd_req
= __rbd_obj_add_osd_request(obj_req
, NULL
, 1);
2915 if (IS_ERR(osd_req
))
2916 return PTR_ERR(osd_req
);
2918 osd_req_op_extent_init(osd_req
, 0, CEPH_OSD_OP_READ
,
2919 obj_req
->ex
.oe_off
, obj_req
->ex
.oe_len
, 0, 0);
2920 rbd_osd_setup_data(osd_req
, 0);
2921 rbd_osd_format_read(osd_req
);
2923 ret
= ceph_osdc_alloc_messages(osd_req
, GFP_NOIO
);
2927 rbd_osd_submit(osd_req
);
2931 static int rbd_obj_read_from_parent(struct rbd_obj_request
*obj_req
)
2933 struct rbd_img_request
*img_req
= obj_req
->img_request
;
2934 struct rbd_img_request
*child_img_req
;
2937 child_img_req
= rbd_img_request_create(img_req
->rbd_dev
->parent
,
2942 __set_bit(IMG_REQ_CHILD
, &child_img_req
->flags
);
2943 child_img_req
->obj_request
= obj_req
;
2945 dout("%s child_img_req %p for obj_req %p\n", __func__
, child_img_req
,
2948 if (!rbd_img_is_write(img_req
)) {
2949 switch (img_req
->data_type
) {
2950 case OBJ_REQUEST_BIO
:
2951 ret
= __rbd_img_fill_from_bio(child_img_req
,
2952 obj_req
->img_extents
,
2953 obj_req
->num_img_extents
,
2956 case OBJ_REQUEST_BVECS
:
2957 case OBJ_REQUEST_OWN_BVECS
:
2958 ret
= __rbd_img_fill_from_bvecs(child_img_req
,
2959 obj_req
->img_extents
,
2960 obj_req
->num_img_extents
,
2961 &obj_req
->bvec_pos
);
2967 ret
= rbd_img_fill_from_bvecs(child_img_req
,
2968 obj_req
->img_extents
,
2969 obj_req
->num_img_extents
,
2970 obj_req
->copyup_bvecs
);
2973 rbd_img_request_put(child_img_req
);
2977 /* avoid parent chain recursion */
2978 rbd_img_schedule(child_img_req
, 0);
2982 static bool rbd_obj_advance_read(struct rbd_obj_request
*obj_req
, int *result
)
2984 struct rbd_device
*rbd_dev
= obj_req
->img_request
->rbd_dev
;
2988 switch (obj_req
->read_state
) {
2989 case RBD_OBJ_READ_START
:
2990 rbd_assert(!*result
);
2992 if (!rbd_obj_may_exist(obj_req
)) {
2994 obj_req
->read_state
= RBD_OBJ_READ_OBJECT
;
2998 ret
= rbd_obj_read_object(obj_req
);
3003 obj_req
->read_state
= RBD_OBJ_READ_OBJECT
;
3005 case RBD_OBJ_READ_OBJECT
:
3006 if (*result
== -ENOENT
&& rbd_dev
->parent_overlap
) {
3007 /* reverse map this object extent onto the parent */
3008 ret
= rbd_obj_calc_img_extents(obj_req
, false);
3013 if (obj_req
->num_img_extents
) {
3014 ret
= rbd_obj_read_from_parent(obj_req
);
3019 obj_req
->read_state
= RBD_OBJ_READ_PARENT
;
3025 * -ENOENT means a hole in the image -- zero-fill the entire
3026 * length of the request. A short read also implies zero-fill
3027 * to the end of the request.
3029 if (*result
== -ENOENT
) {
3030 rbd_obj_zero_range(obj_req
, 0, obj_req
->ex
.oe_len
);
3032 } else if (*result
>= 0) {
3033 if (*result
< obj_req
->ex
.oe_len
)
3034 rbd_obj_zero_range(obj_req
, *result
,
3035 obj_req
->ex
.oe_len
- *result
);
3037 rbd_assert(*result
== obj_req
->ex
.oe_len
);
3041 case RBD_OBJ_READ_PARENT
:
3043 * The parent image is read only up to the overlap -- zero-fill
3044 * from the overlap to the end of the request.
3047 u32 obj_overlap
= rbd_obj_img_extents_bytes(obj_req
);
3049 if (obj_overlap
< obj_req
->ex
.oe_len
)
3050 rbd_obj_zero_range(obj_req
, obj_overlap
,
3051 obj_req
->ex
.oe_len
- obj_overlap
);
3059 static bool rbd_obj_write_is_noop(struct rbd_obj_request
*obj_req
)
3061 struct rbd_device
*rbd_dev
= obj_req
->img_request
->rbd_dev
;
3063 if (rbd_object_map_may_exist(rbd_dev
, obj_req
->ex
.oe_objno
))
3064 obj_req
->flags
|= RBD_OBJ_FLAG_MAY_EXIST
;
3066 if (!(obj_req
->flags
& RBD_OBJ_FLAG_MAY_EXIST
) &&
3067 (obj_req
->flags
& RBD_OBJ_FLAG_NOOP_FOR_NONEXISTENT
)) {
3068 dout("%s %p noop for nonexistent\n", __func__
, obj_req
);
3077 * 0 - object map update sent
3078 * 1 - object map update isn't needed
3081 static int rbd_obj_write_pre_object_map(struct rbd_obj_request
*obj_req
)
3083 struct rbd_device
*rbd_dev
= obj_req
->img_request
->rbd_dev
;
3086 if (!(rbd_dev
->header
.features
& RBD_FEATURE_OBJECT_MAP
))
3089 if (obj_req
->flags
& RBD_OBJ_FLAG_DELETION
)
3090 new_state
= OBJECT_PENDING
;
3092 new_state
= OBJECT_EXISTS
;
3094 return rbd_object_map_update(obj_req
, CEPH_NOSNAP
, new_state
, NULL
);
3097 static int rbd_obj_write_object(struct rbd_obj_request
*obj_req
)
3099 struct ceph_osd_request
*osd_req
;
3100 int num_ops
= count_write_ops(obj_req
);
3104 if (obj_req
->flags
& RBD_OBJ_FLAG_COPYUP_ENABLED
)
3105 num_ops
++; /* stat */
3107 osd_req
= rbd_obj_add_osd_request(obj_req
, num_ops
);
3108 if (IS_ERR(osd_req
))
3109 return PTR_ERR(osd_req
);
3111 if (obj_req
->flags
& RBD_OBJ_FLAG_COPYUP_ENABLED
) {
3112 ret
= rbd_osd_setup_stat(osd_req
, which
++);
3117 rbd_osd_setup_write_ops(osd_req
, which
);
3118 rbd_osd_format_write(osd_req
);
3120 ret
= ceph_osdc_alloc_messages(osd_req
, GFP_NOIO
);
3124 rbd_osd_submit(osd_req
);
3129 * copyup_bvecs pages are never highmem pages
3131 static bool is_zero_bvecs(struct bio_vec
*bvecs
, u32 bytes
)
3133 struct ceph_bvec_iter it
= {
3135 .iter
= { .bi_size
= bytes
},
3138 ceph_bvec_iter_advance_step(&it
, bytes
, ({
3139 if (memchr_inv(page_address(bv
.bv_page
) + bv
.bv_offset
, 0,
3146 #define MODS_ONLY U32_MAX
3148 static int rbd_obj_copyup_empty_snapc(struct rbd_obj_request
*obj_req
,
3151 struct ceph_osd_request
*osd_req
;
3154 dout("%s obj_req %p bytes %u\n", __func__
, obj_req
, bytes
);
3155 rbd_assert(bytes
> 0 && bytes
!= MODS_ONLY
);
3157 osd_req
= __rbd_obj_add_osd_request(obj_req
, &rbd_empty_snapc
, 1);
3158 if (IS_ERR(osd_req
))
3159 return PTR_ERR(osd_req
);
3161 ret
= rbd_osd_setup_copyup(osd_req
, 0, bytes
);
3165 rbd_osd_format_write(osd_req
);
3167 ret
= ceph_osdc_alloc_messages(osd_req
, GFP_NOIO
);
3171 rbd_osd_submit(osd_req
);
3175 static int rbd_obj_copyup_current_snapc(struct rbd_obj_request
*obj_req
,
3178 struct ceph_osd_request
*osd_req
;
3179 int num_ops
= count_write_ops(obj_req
);
3183 dout("%s obj_req %p bytes %u\n", __func__
, obj_req
, bytes
);
3185 if (bytes
!= MODS_ONLY
)
3186 num_ops
++; /* copyup */
3188 osd_req
= rbd_obj_add_osd_request(obj_req
, num_ops
);
3189 if (IS_ERR(osd_req
))
3190 return PTR_ERR(osd_req
);
3192 if (bytes
!= MODS_ONLY
) {
3193 ret
= rbd_osd_setup_copyup(osd_req
, which
++, bytes
);
3198 rbd_osd_setup_write_ops(osd_req
, which
);
3199 rbd_osd_format_write(osd_req
);
3201 ret
= ceph_osdc_alloc_messages(osd_req
, GFP_NOIO
);
3205 rbd_osd_submit(osd_req
);
3209 static int setup_copyup_bvecs(struct rbd_obj_request
*obj_req
, u64 obj_overlap
)
3213 rbd_assert(!obj_req
->copyup_bvecs
);
3214 obj_req
->copyup_bvec_count
= calc_pages_for(0, obj_overlap
);
3215 obj_req
->copyup_bvecs
= kcalloc(obj_req
->copyup_bvec_count
,
3216 sizeof(*obj_req
->copyup_bvecs
),
3218 if (!obj_req
->copyup_bvecs
)
3221 for (i
= 0; i
< obj_req
->copyup_bvec_count
; i
++) {
3222 unsigned int len
= min(obj_overlap
, (u64
)PAGE_SIZE
);
3224 obj_req
->copyup_bvecs
[i
].bv_page
= alloc_page(GFP_NOIO
);
3225 if (!obj_req
->copyup_bvecs
[i
].bv_page
)
3228 obj_req
->copyup_bvecs
[i
].bv_offset
= 0;
3229 obj_req
->copyup_bvecs
[i
].bv_len
= len
;
3233 rbd_assert(!obj_overlap
);
3238 * The target object doesn't exist. Read the data for the entire
3239 * target object up to the overlap point (if any) from the parent,
3240 * so we can use it for a copyup.
3242 static int rbd_obj_copyup_read_parent(struct rbd_obj_request
*obj_req
)
3244 struct rbd_device
*rbd_dev
= obj_req
->img_request
->rbd_dev
;
3247 rbd_assert(obj_req
->num_img_extents
);
3248 prune_extents(obj_req
->img_extents
, &obj_req
->num_img_extents
,
3249 rbd_dev
->parent_overlap
);
3250 if (!obj_req
->num_img_extents
) {
3252 * The overlap has become 0 (most likely because the
3253 * image has been flattened). Re-submit the original write
3254 * request -- pass MODS_ONLY since the copyup isn't needed
3257 return rbd_obj_copyup_current_snapc(obj_req
, MODS_ONLY
);
3260 ret
= setup_copyup_bvecs(obj_req
, rbd_obj_img_extents_bytes(obj_req
));
3264 return rbd_obj_read_from_parent(obj_req
);
3267 static void rbd_obj_copyup_object_maps(struct rbd_obj_request
*obj_req
)
3269 struct rbd_device
*rbd_dev
= obj_req
->img_request
->rbd_dev
;
3270 struct ceph_snap_context
*snapc
= obj_req
->img_request
->snapc
;
3275 rbd_assert(!obj_req
->pending
.result
&& !obj_req
->pending
.num_pending
);
3277 if (!(rbd_dev
->header
.features
& RBD_FEATURE_OBJECT_MAP
))
3280 if (obj_req
->flags
& RBD_OBJ_FLAG_COPYUP_ZEROS
)
3283 for (i
= 0; i
< snapc
->num_snaps
; i
++) {
3284 if ((rbd_dev
->header
.features
& RBD_FEATURE_FAST_DIFF
) &&
3285 i
+ 1 < snapc
->num_snaps
)
3286 new_state
= OBJECT_EXISTS_CLEAN
;
3288 new_state
= OBJECT_EXISTS
;
3290 ret
= rbd_object_map_update(obj_req
, snapc
->snaps
[i
],
3293 obj_req
->pending
.result
= ret
;
3298 obj_req
->pending
.num_pending
++;
3302 static void rbd_obj_copyup_write_object(struct rbd_obj_request
*obj_req
)
3304 u32 bytes
= rbd_obj_img_extents_bytes(obj_req
);
3307 rbd_assert(!obj_req
->pending
.result
&& !obj_req
->pending
.num_pending
);
3310 * Only send non-zero copyup data to save some I/O and network
3311 * bandwidth -- zero copyup data is equivalent to the object not
3314 if (obj_req
->flags
& RBD_OBJ_FLAG_COPYUP_ZEROS
)
3317 if (obj_req
->img_request
->snapc
->num_snaps
&& bytes
> 0) {
3319 * Send a copyup request with an empty snapshot context to
3320 * deep-copyup the object through all existing snapshots.
3321 * A second request with the current snapshot context will be
3322 * sent for the actual modification.
3324 ret
= rbd_obj_copyup_empty_snapc(obj_req
, bytes
);
3326 obj_req
->pending
.result
= ret
;
3330 obj_req
->pending
.num_pending
++;
3334 ret
= rbd_obj_copyup_current_snapc(obj_req
, bytes
);
3336 obj_req
->pending
.result
= ret
;
3340 obj_req
->pending
.num_pending
++;
3343 static bool rbd_obj_advance_copyup(struct rbd_obj_request
*obj_req
, int *result
)
3345 struct rbd_device
*rbd_dev
= obj_req
->img_request
->rbd_dev
;
3349 switch (obj_req
->copyup_state
) {
3350 case RBD_OBJ_COPYUP_START
:
3351 rbd_assert(!*result
);
3353 ret
= rbd_obj_copyup_read_parent(obj_req
);
3358 if (obj_req
->num_img_extents
)
3359 obj_req
->copyup_state
= RBD_OBJ_COPYUP_READ_PARENT
;
3361 obj_req
->copyup_state
= RBD_OBJ_COPYUP_WRITE_OBJECT
;
3363 case RBD_OBJ_COPYUP_READ_PARENT
:
3367 if (is_zero_bvecs(obj_req
->copyup_bvecs
,
3368 rbd_obj_img_extents_bytes(obj_req
))) {
3369 dout("%s %p detected zeros\n", __func__
, obj_req
);
3370 obj_req
->flags
|= RBD_OBJ_FLAG_COPYUP_ZEROS
;
3373 rbd_obj_copyup_object_maps(obj_req
);
3374 if (!obj_req
->pending
.num_pending
) {
3375 *result
= obj_req
->pending
.result
;
3376 obj_req
->copyup_state
= RBD_OBJ_COPYUP_OBJECT_MAPS
;
3379 obj_req
->copyup_state
= __RBD_OBJ_COPYUP_OBJECT_MAPS
;
3381 case __RBD_OBJ_COPYUP_OBJECT_MAPS
:
3382 if (!pending_result_dec(&obj_req
->pending
, result
))
3385 case RBD_OBJ_COPYUP_OBJECT_MAPS
:
3387 rbd_warn(rbd_dev
, "snap object map update failed: %d",
3392 rbd_obj_copyup_write_object(obj_req
);
3393 if (!obj_req
->pending
.num_pending
) {
3394 *result
= obj_req
->pending
.result
;
3395 obj_req
->copyup_state
= RBD_OBJ_COPYUP_WRITE_OBJECT
;
3398 obj_req
->copyup_state
= __RBD_OBJ_COPYUP_WRITE_OBJECT
;
3400 case __RBD_OBJ_COPYUP_WRITE_OBJECT
:
3401 if (!pending_result_dec(&obj_req
->pending
, result
))
3404 case RBD_OBJ_COPYUP_WRITE_OBJECT
:
3413 * 0 - object map update sent
3414 * 1 - object map update isn't needed
3417 static int rbd_obj_write_post_object_map(struct rbd_obj_request
*obj_req
)
3419 struct rbd_device
*rbd_dev
= obj_req
->img_request
->rbd_dev
;
3420 u8 current_state
= OBJECT_PENDING
;
3422 if (!(rbd_dev
->header
.features
& RBD_FEATURE_OBJECT_MAP
))
3425 if (!(obj_req
->flags
& RBD_OBJ_FLAG_DELETION
))
3428 return rbd_object_map_update(obj_req
, CEPH_NOSNAP
, OBJECT_NONEXISTENT
,
3432 static bool rbd_obj_advance_write(struct rbd_obj_request
*obj_req
, int *result
)
3434 struct rbd_device
*rbd_dev
= obj_req
->img_request
->rbd_dev
;
3438 switch (obj_req
->write_state
) {
3439 case RBD_OBJ_WRITE_START
:
3440 rbd_assert(!*result
);
3442 if (rbd_obj_write_is_noop(obj_req
))
3445 ret
= rbd_obj_write_pre_object_map(obj_req
);
3450 obj_req
->write_state
= RBD_OBJ_WRITE_PRE_OBJECT_MAP
;
3454 case RBD_OBJ_WRITE_PRE_OBJECT_MAP
:
3456 rbd_warn(rbd_dev
, "pre object map update failed: %d",
3460 ret
= rbd_obj_write_object(obj_req
);
3465 obj_req
->write_state
= RBD_OBJ_WRITE_OBJECT
;
3467 case RBD_OBJ_WRITE_OBJECT
:
3468 if (*result
== -ENOENT
) {
3469 if (obj_req
->flags
& RBD_OBJ_FLAG_COPYUP_ENABLED
) {
3471 obj_req
->copyup_state
= RBD_OBJ_COPYUP_START
;
3472 obj_req
->write_state
= __RBD_OBJ_WRITE_COPYUP
;
3476 * On a non-existent object:
3477 * delete - -ENOENT, truncate/zero - 0
3479 if (obj_req
->flags
& RBD_OBJ_FLAG_DELETION
)
3485 obj_req
->write_state
= RBD_OBJ_WRITE_COPYUP
;
3487 case __RBD_OBJ_WRITE_COPYUP
:
3488 if (!rbd_obj_advance_copyup(obj_req
, result
))
3491 case RBD_OBJ_WRITE_COPYUP
:
3493 rbd_warn(rbd_dev
, "copyup failed: %d", *result
);
3496 ret
= rbd_obj_write_post_object_map(obj_req
);
3501 obj_req
->write_state
= RBD_OBJ_WRITE_POST_OBJECT_MAP
;
3505 case RBD_OBJ_WRITE_POST_OBJECT_MAP
:
3507 rbd_warn(rbd_dev
, "post object map update failed: %d",
3516 * Return true if @obj_req is completed.
3518 static bool __rbd_obj_handle_request(struct rbd_obj_request
*obj_req
,
3521 struct rbd_img_request
*img_req
= obj_req
->img_request
;
3522 struct rbd_device
*rbd_dev
= img_req
->rbd_dev
;
3525 mutex_lock(&obj_req
->state_mutex
);
3526 if (!rbd_img_is_write(img_req
))
3527 done
= rbd_obj_advance_read(obj_req
, result
);
3529 done
= rbd_obj_advance_write(obj_req
, result
);
3530 mutex_unlock(&obj_req
->state_mutex
);
3532 if (done
&& *result
) {
3533 rbd_assert(*result
< 0);
3534 rbd_warn(rbd_dev
, "%s at objno %llu %llu~%llu result %d",
3535 obj_op_name(img_req
->op_type
), obj_req
->ex
.oe_objno
,
3536 obj_req
->ex
.oe_off
, obj_req
->ex
.oe_len
, *result
);
3542 * This is open-coded in rbd_img_handle_request() to avoid parent chain
3545 static void rbd_obj_handle_request(struct rbd_obj_request
*obj_req
, int result
)
3547 if (__rbd_obj_handle_request(obj_req
, &result
))
3548 rbd_img_handle_request(obj_req
->img_request
, result
);
3551 static bool need_exclusive_lock(struct rbd_img_request
*img_req
)
3553 struct rbd_device
*rbd_dev
= img_req
->rbd_dev
;
3555 if (!(rbd_dev
->header
.features
& RBD_FEATURE_EXCLUSIVE_LOCK
))
3558 if (rbd_dev
->spec
->snap_id
!= CEPH_NOSNAP
)
3561 rbd_assert(!test_bit(IMG_REQ_CHILD
, &img_req
->flags
));
3562 if (rbd_dev
->opts
->lock_on_read
||
3563 (rbd_dev
->header
.features
& RBD_FEATURE_OBJECT_MAP
))
3566 return rbd_img_is_write(img_req
);
3569 static bool rbd_lock_add_request(struct rbd_img_request
*img_req
)
3571 struct rbd_device
*rbd_dev
= img_req
->rbd_dev
;
3574 lockdep_assert_held(&rbd_dev
->lock_rwsem
);
3575 locked
= rbd_dev
->lock_state
== RBD_LOCK_STATE_LOCKED
;
3576 spin_lock(&rbd_dev
->lock_lists_lock
);
3577 rbd_assert(list_empty(&img_req
->lock_item
));
3579 list_add_tail(&img_req
->lock_item
, &rbd_dev
->acquiring_list
);
3581 list_add_tail(&img_req
->lock_item
, &rbd_dev
->running_list
);
3582 spin_unlock(&rbd_dev
->lock_lists_lock
);
3586 static void rbd_lock_del_request(struct rbd_img_request
*img_req
)
3588 struct rbd_device
*rbd_dev
= img_req
->rbd_dev
;
3591 lockdep_assert_held(&rbd_dev
->lock_rwsem
);
3592 spin_lock(&rbd_dev
->lock_lists_lock
);
3593 rbd_assert(!list_empty(&img_req
->lock_item
));
3594 list_del_init(&img_req
->lock_item
);
3595 need_wakeup
= (rbd_dev
->lock_state
== RBD_LOCK_STATE_RELEASING
&&
3596 list_empty(&rbd_dev
->running_list
));
3597 spin_unlock(&rbd_dev
->lock_lists_lock
);
3599 complete(&rbd_dev
->releasing_wait
);
3602 static int rbd_img_exclusive_lock(struct rbd_img_request
*img_req
)
3604 struct rbd_device
*rbd_dev
= img_req
->rbd_dev
;
3606 if (!need_exclusive_lock(img_req
))
3609 if (rbd_lock_add_request(img_req
))
3612 if (rbd_dev
->opts
->exclusive
) {
3613 WARN_ON(1); /* lock got released? */
3618 * Note the use of mod_delayed_work() in rbd_acquire_lock()
3619 * and cancel_delayed_work() in wake_lock_waiters().
3621 dout("%s rbd_dev %p queueing lock_dwork\n", __func__
, rbd_dev
);
3622 queue_delayed_work(rbd_dev
->task_wq
, &rbd_dev
->lock_dwork
, 0);
3626 static void rbd_img_object_requests(struct rbd_img_request
*img_req
)
3628 struct rbd_obj_request
*obj_req
;
3630 rbd_assert(!img_req
->pending
.result
&& !img_req
->pending
.num_pending
);
3632 for_each_obj_request(img_req
, obj_req
) {
3635 if (__rbd_obj_handle_request(obj_req
, &result
)) {
3637 img_req
->pending
.result
= result
;
3641 img_req
->pending
.num_pending
++;
3646 static bool rbd_img_advance(struct rbd_img_request
*img_req
, int *result
)
3648 struct rbd_device
*rbd_dev
= img_req
->rbd_dev
;
3652 switch (img_req
->state
) {
3654 rbd_assert(!*result
);
3656 ret
= rbd_img_exclusive_lock(img_req
);
3661 img_req
->state
= RBD_IMG_EXCLUSIVE_LOCK
;
3665 case RBD_IMG_EXCLUSIVE_LOCK
:
3669 rbd_assert(!need_exclusive_lock(img_req
) ||
3670 __rbd_is_lock_owner(rbd_dev
));
3672 rbd_img_object_requests(img_req
);
3673 if (!img_req
->pending
.num_pending
) {
3674 *result
= img_req
->pending
.result
;
3675 img_req
->state
= RBD_IMG_OBJECT_REQUESTS
;
3678 img_req
->state
= __RBD_IMG_OBJECT_REQUESTS
;
3680 case __RBD_IMG_OBJECT_REQUESTS
:
3681 if (!pending_result_dec(&img_req
->pending
, result
))
3684 case RBD_IMG_OBJECT_REQUESTS
:
3692 * Return true if @img_req is completed.
3694 static bool __rbd_img_handle_request(struct rbd_img_request
*img_req
,
3697 struct rbd_device
*rbd_dev
= img_req
->rbd_dev
;
3700 if (need_exclusive_lock(img_req
)) {
3701 down_read(&rbd_dev
->lock_rwsem
);
3702 mutex_lock(&img_req
->state_mutex
);
3703 done
= rbd_img_advance(img_req
, result
);
3705 rbd_lock_del_request(img_req
);
3706 mutex_unlock(&img_req
->state_mutex
);
3707 up_read(&rbd_dev
->lock_rwsem
);
3709 mutex_lock(&img_req
->state_mutex
);
3710 done
= rbd_img_advance(img_req
, result
);
3711 mutex_unlock(&img_req
->state_mutex
);
3714 if (done
&& *result
) {
3715 rbd_assert(*result
< 0);
3716 rbd_warn(rbd_dev
, "%s%s result %d",
3717 test_bit(IMG_REQ_CHILD
, &img_req
->flags
) ? "child " : "",
3718 obj_op_name(img_req
->op_type
), *result
);
3723 static void rbd_img_handle_request(struct rbd_img_request
*img_req
, int result
)
3726 if (!__rbd_img_handle_request(img_req
, &result
))
3729 if (test_bit(IMG_REQ_CHILD
, &img_req
->flags
)) {
3730 struct rbd_obj_request
*obj_req
= img_req
->obj_request
;
3732 rbd_img_request_put(img_req
);
3733 if (__rbd_obj_handle_request(obj_req
, &result
)) {
3734 img_req
= obj_req
->img_request
;
3738 struct request
*rq
= img_req
->rq
;
3740 rbd_img_request_put(img_req
);
3741 blk_mq_end_request(rq
, errno_to_blk_status(result
));
3745 static const struct rbd_client_id rbd_empty_cid
;
3747 static bool rbd_cid_equal(const struct rbd_client_id
*lhs
,
3748 const struct rbd_client_id
*rhs
)
3750 return lhs
->gid
== rhs
->gid
&& lhs
->handle
== rhs
->handle
;
3753 static struct rbd_client_id
rbd_get_cid(struct rbd_device
*rbd_dev
)
3755 struct rbd_client_id cid
;
3757 mutex_lock(&rbd_dev
->watch_mutex
);
3758 cid
.gid
= ceph_client_gid(rbd_dev
->rbd_client
->client
);
3759 cid
.handle
= rbd_dev
->watch_cookie
;
3760 mutex_unlock(&rbd_dev
->watch_mutex
);
3765 * lock_rwsem must be held for write
3767 static void rbd_set_owner_cid(struct rbd_device
*rbd_dev
,
3768 const struct rbd_client_id
*cid
)
3770 dout("%s rbd_dev %p %llu-%llu -> %llu-%llu\n", __func__
, rbd_dev
,
3771 rbd_dev
->owner_cid
.gid
, rbd_dev
->owner_cid
.handle
,
3772 cid
->gid
, cid
->handle
);
3773 rbd_dev
->owner_cid
= *cid
; /* struct */
3776 static void format_lock_cookie(struct rbd_device
*rbd_dev
, char *buf
)
3778 mutex_lock(&rbd_dev
->watch_mutex
);
3779 sprintf(buf
, "%s %llu", RBD_LOCK_COOKIE_PREFIX
, rbd_dev
->watch_cookie
);
3780 mutex_unlock(&rbd_dev
->watch_mutex
);
3783 static void __rbd_lock(struct rbd_device
*rbd_dev
, const char *cookie
)
3785 struct rbd_client_id cid
= rbd_get_cid(rbd_dev
);
3787 rbd_dev
->lock_state
= RBD_LOCK_STATE_LOCKED
;
3788 strcpy(rbd_dev
->lock_cookie
, cookie
);
3789 rbd_set_owner_cid(rbd_dev
, &cid
);
3790 queue_work(rbd_dev
->task_wq
, &rbd_dev
->acquired_lock_work
);
3794 * lock_rwsem must be held for write
3796 static int rbd_lock(struct rbd_device
*rbd_dev
)
3798 struct ceph_osd_client
*osdc
= &rbd_dev
->rbd_client
->client
->osdc
;
3802 WARN_ON(__rbd_is_lock_owner(rbd_dev
) ||
3803 rbd_dev
->lock_cookie
[0] != '\0');
3805 format_lock_cookie(rbd_dev
, cookie
);
3806 ret
= ceph_cls_lock(osdc
, &rbd_dev
->header_oid
, &rbd_dev
->header_oloc
,
3807 RBD_LOCK_NAME
, CEPH_CLS_LOCK_EXCLUSIVE
, cookie
,
3808 RBD_LOCK_TAG
, "", 0);
3812 __rbd_lock(rbd_dev
, cookie
);
3817 * lock_rwsem must be held for write
3819 static void rbd_unlock(struct rbd_device
*rbd_dev
)
3821 struct ceph_osd_client
*osdc
= &rbd_dev
->rbd_client
->client
->osdc
;
3824 WARN_ON(!__rbd_is_lock_owner(rbd_dev
) ||
3825 rbd_dev
->lock_cookie
[0] == '\0');
3827 ret
= ceph_cls_unlock(osdc
, &rbd_dev
->header_oid
, &rbd_dev
->header_oloc
,
3828 RBD_LOCK_NAME
, rbd_dev
->lock_cookie
);
3829 if (ret
&& ret
!= -ENOENT
)
3830 rbd_warn(rbd_dev
, "failed to unlock header: %d", ret
);
3832 /* treat errors as the image is unlocked */
3833 rbd_dev
->lock_state
= RBD_LOCK_STATE_UNLOCKED
;
3834 rbd_dev
->lock_cookie
[0] = '\0';
3835 rbd_set_owner_cid(rbd_dev
, &rbd_empty_cid
);
3836 queue_work(rbd_dev
->task_wq
, &rbd_dev
->released_lock_work
);
3839 static int __rbd_notify_op_lock(struct rbd_device
*rbd_dev
,
3840 enum rbd_notify_op notify_op
,
3841 struct page
***preply_pages
,
3844 struct ceph_osd_client
*osdc
= &rbd_dev
->rbd_client
->client
->osdc
;
3845 struct rbd_client_id cid
= rbd_get_cid(rbd_dev
);
3846 char buf
[4 + 8 + 8 + CEPH_ENCODING_START_BLK_LEN
];
3847 int buf_size
= sizeof(buf
);
3850 dout("%s rbd_dev %p notify_op %d\n", __func__
, rbd_dev
, notify_op
);
3852 /* encode *LockPayload NotifyMessage (op + ClientId) */
3853 ceph_start_encoding(&p
, 2, 1, buf_size
- CEPH_ENCODING_START_BLK_LEN
);
3854 ceph_encode_32(&p
, notify_op
);
3855 ceph_encode_64(&p
, cid
.gid
);
3856 ceph_encode_64(&p
, cid
.handle
);
3858 return ceph_osdc_notify(osdc
, &rbd_dev
->header_oid
,
3859 &rbd_dev
->header_oloc
, buf
, buf_size
,
3860 RBD_NOTIFY_TIMEOUT
, preply_pages
, preply_len
);
3863 static void rbd_notify_op_lock(struct rbd_device
*rbd_dev
,
3864 enum rbd_notify_op notify_op
)
3866 struct page
**reply_pages
;
3869 __rbd_notify_op_lock(rbd_dev
, notify_op
, &reply_pages
, &reply_len
);
3870 ceph_release_page_vector(reply_pages
, calc_pages_for(0, reply_len
));
3873 static void rbd_notify_acquired_lock(struct work_struct
*work
)
3875 struct rbd_device
*rbd_dev
= container_of(work
, struct rbd_device
,
3876 acquired_lock_work
);
3878 rbd_notify_op_lock(rbd_dev
, RBD_NOTIFY_OP_ACQUIRED_LOCK
);
3881 static void rbd_notify_released_lock(struct work_struct
*work
)
3883 struct rbd_device
*rbd_dev
= container_of(work
, struct rbd_device
,
3884 released_lock_work
);
3886 rbd_notify_op_lock(rbd_dev
, RBD_NOTIFY_OP_RELEASED_LOCK
);
3889 static int rbd_request_lock(struct rbd_device
*rbd_dev
)
3891 struct page
**reply_pages
;
3893 bool lock_owner_responded
= false;
3896 dout("%s rbd_dev %p\n", __func__
, rbd_dev
);
3898 ret
= __rbd_notify_op_lock(rbd_dev
, RBD_NOTIFY_OP_REQUEST_LOCK
,
3899 &reply_pages
, &reply_len
);
3900 if (ret
&& ret
!= -ETIMEDOUT
) {
3901 rbd_warn(rbd_dev
, "failed to request lock: %d", ret
);
3905 if (reply_len
> 0 && reply_len
<= PAGE_SIZE
) {
3906 void *p
= page_address(reply_pages
[0]);
3907 void *const end
= p
+ reply_len
;
3910 ceph_decode_32_safe(&p
, end
, n
, e_inval
); /* num_acks */
3915 ceph_decode_need(&p
, end
, 8 + 8, e_inval
);
3916 p
+= 8 + 8; /* skip gid and cookie */
3918 ceph_decode_32_safe(&p
, end
, len
, e_inval
);
3922 if (lock_owner_responded
) {
3924 "duplicate lock owners detected");
3929 lock_owner_responded
= true;
3930 ret
= ceph_start_decoding(&p
, end
, 1, "ResponseMessage",
3934 "failed to decode ResponseMessage: %d",
3939 ret
= ceph_decode_32(&p
);
3943 if (!lock_owner_responded
) {
3944 rbd_warn(rbd_dev
, "no lock owners detected");
3949 ceph_release_page_vector(reply_pages
, calc_pages_for(0, reply_len
));
3958 * Either image request state machine(s) or rbd_add_acquire_lock()
3961 static void wake_lock_waiters(struct rbd_device
*rbd_dev
, int result
)
3963 struct rbd_img_request
*img_req
;
3965 dout("%s rbd_dev %p result %d\n", __func__
, rbd_dev
, result
);
3966 lockdep_assert_held_write(&rbd_dev
->lock_rwsem
);
3968 cancel_delayed_work(&rbd_dev
->lock_dwork
);
3969 if (!completion_done(&rbd_dev
->acquire_wait
)) {
3970 rbd_assert(list_empty(&rbd_dev
->acquiring_list
) &&
3971 list_empty(&rbd_dev
->running_list
));
3972 rbd_dev
->acquire_err
= result
;
3973 complete_all(&rbd_dev
->acquire_wait
);
3977 list_for_each_entry(img_req
, &rbd_dev
->acquiring_list
, lock_item
) {
3978 mutex_lock(&img_req
->state_mutex
);
3979 rbd_assert(img_req
->state
== RBD_IMG_EXCLUSIVE_LOCK
);
3980 rbd_img_schedule(img_req
, result
);
3981 mutex_unlock(&img_req
->state_mutex
);
3984 list_splice_tail_init(&rbd_dev
->acquiring_list
, &rbd_dev
->running_list
);
3987 static int get_lock_owner_info(struct rbd_device
*rbd_dev
,
3988 struct ceph_locker
**lockers
, u32
*num_lockers
)
3990 struct ceph_osd_client
*osdc
= &rbd_dev
->rbd_client
->client
->osdc
;
3995 dout("%s rbd_dev %p\n", __func__
, rbd_dev
);
3997 ret
= ceph_cls_lock_info(osdc
, &rbd_dev
->header_oid
,
3998 &rbd_dev
->header_oloc
, RBD_LOCK_NAME
,
3999 &lock_type
, &lock_tag
, lockers
, num_lockers
);
4003 if (*num_lockers
== 0) {
4004 dout("%s rbd_dev %p no lockers detected\n", __func__
, rbd_dev
);
4008 if (strcmp(lock_tag
, RBD_LOCK_TAG
)) {
4009 rbd_warn(rbd_dev
, "locked by external mechanism, tag %s",
4015 if (lock_type
== CEPH_CLS_LOCK_SHARED
) {
4016 rbd_warn(rbd_dev
, "shared lock type detected");
4021 if (strncmp((*lockers
)[0].id
.cookie
, RBD_LOCK_COOKIE_PREFIX
,
4022 strlen(RBD_LOCK_COOKIE_PREFIX
))) {
4023 rbd_warn(rbd_dev
, "locked by external mechanism, cookie %s",
4024 (*lockers
)[0].id
.cookie
);
4034 static int find_watcher(struct rbd_device
*rbd_dev
,
4035 const struct ceph_locker
*locker
)
4037 struct ceph_osd_client
*osdc
= &rbd_dev
->rbd_client
->client
->osdc
;
4038 struct ceph_watch_item
*watchers
;
4044 ret
= ceph_osdc_list_watchers(osdc
, &rbd_dev
->header_oid
,
4045 &rbd_dev
->header_oloc
, &watchers
,
4050 sscanf(locker
->id
.cookie
, RBD_LOCK_COOKIE_PREFIX
" %llu", &cookie
);
4051 for (i
= 0; i
< num_watchers
; i
++) {
4052 if (!memcmp(&watchers
[i
].addr
, &locker
->info
.addr
,
4053 sizeof(locker
->info
.addr
)) &&
4054 watchers
[i
].cookie
== cookie
) {
4055 struct rbd_client_id cid
= {
4056 .gid
= le64_to_cpu(watchers
[i
].name
.num
),
4060 dout("%s rbd_dev %p found cid %llu-%llu\n", __func__
,
4061 rbd_dev
, cid
.gid
, cid
.handle
);
4062 rbd_set_owner_cid(rbd_dev
, &cid
);
4068 dout("%s rbd_dev %p no watchers\n", __func__
, rbd_dev
);
4076 * lock_rwsem must be held for write
4078 static int rbd_try_lock(struct rbd_device
*rbd_dev
)
4080 struct ceph_client
*client
= rbd_dev
->rbd_client
->client
;
4081 struct ceph_locker
*lockers
;
4086 ret
= rbd_lock(rbd_dev
);
4090 /* determine if the current lock holder is still alive */
4091 ret
= get_lock_owner_info(rbd_dev
, &lockers
, &num_lockers
);
4095 if (num_lockers
== 0)
4098 ret
= find_watcher(rbd_dev
, lockers
);
4100 goto out
; /* request lock or error */
4102 rbd_warn(rbd_dev
, "breaking header lock owned by %s%llu",
4103 ENTITY_NAME(lockers
[0].id
.name
));
4105 ret
= ceph_monc_blacklist_add(&client
->monc
,
4106 &lockers
[0].info
.addr
);
4108 rbd_warn(rbd_dev
, "blacklist of %s%llu failed: %d",
4109 ENTITY_NAME(lockers
[0].id
.name
), ret
);
4113 ret
= ceph_cls_break_lock(&client
->osdc
, &rbd_dev
->header_oid
,
4114 &rbd_dev
->header_oloc
, RBD_LOCK_NAME
,
4115 lockers
[0].id
.cookie
,
4116 &lockers
[0].id
.name
);
4117 if (ret
&& ret
!= -ENOENT
)
4121 ceph_free_lockers(lockers
, num_lockers
);
4125 ceph_free_lockers(lockers
, num_lockers
);
4129 static int rbd_post_acquire_action(struct rbd_device
*rbd_dev
)
4133 if (rbd_dev
->header
.features
& RBD_FEATURE_OBJECT_MAP
) {
4134 ret
= rbd_object_map_open(rbd_dev
);
4145 * 1 - caller should call rbd_request_lock()
4148 static int rbd_try_acquire_lock(struct rbd_device
*rbd_dev
)
4152 down_read(&rbd_dev
->lock_rwsem
);
4153 dout("%s rbd_dev %p read lock_state %d\n", __func__
, rbd_dev
,
4154 rbd_dev
->lock_state
);
4155 if (__rbd_is_lock_owner(rbd_dev
)) {
4156 up_read(&rbd_dev
->lock_rwsem
);
4160 up_read(&rbd_dev
->lock_rwsem
);
4161 down_write(&rbd_dev
->lock_rwsem
);
4162 dout("%s rbd_dev %p write lock_state %d\n", __func__
, rbd_dev
,
4163 rbd_dev
->lock_state
);
4164 if (__rbd_is_lock_owner(rbd_dev
)) {
4165 up_write(&rbd_dev
->lock_rwsem
);
4169 ret
= rbd_try_lock(rbd_dev
);
4171 rbd_warn(rbd_dev
, "failed to lock header: %d", ret
);
4172 if (ret
== -EBLACKLISTED
)
4175 ret
= 1; /* request lock anyway */
4178 up_write(&rbd_dev
->lock_rwsem
);
4182 rbd_assert(rbd_dev
->lock_state
== RBD_LOCK_STATE_LOCKED
);
4183 rbd_assert(list_empty(&rbd_dev
->running_list
));
4185 ret
= rbd_post_acquire_action(rbd_dev
);
4187 rbd_warn(rbd_dev
, "post-acquire action failed: %d", ret
);
4189 * Can't stay in RBD_LOCK_STATE_LOCKED because
4190 * rbd_lock_add_request() would let the request through,
4191 * assuming that e.g. object map is locked and loaded.
4193 rbd_unlock(rbd_dev
);
4197 wake_lock_waiters(rbd_dev
, ret
);
4198 up_write(&rbd_dev
->lock_rwsem
);
4202 static void rbd_acquire_lock(struct work_struct
*work
)
4204 struct rbd_device
*rbd_dev
= container_of(to_delayed_work(work
),
4205 struct rbd_device
, lock_dwork
);
4208 dout("%s rbd_dev %p\n", __func__
, rbd_dev
);
4210 ret
= rbd_try_acquire_lock(rbd_dev
);
4212 dout("%s rbd_dev %p ret %d - done\n", __func__
, rbd_dev
, ret
);
4216 ret
= rbd_request_lock(rbd_dev
);
4217 if (ret
== -ETIMEDOUT
) {
4218 goto again
; /* treat this as a dead client */
4219 } else if (ret
== -EROFS
) {
4220 rbd_warn(rbd_dev
, "peer will not release lock");
4221 down_write(&rbd_dev
->lock_rwsem
);
4222 wake_lock_waiters(rbd_dev
, ret
);
4223 up_write(&rbd_dev
->lock_rwsem
);
4224 } else if (ret
< 0) {
4225 rbd_warn(rbd_dev
, "error requesting lock: %d", ret
);
4226 mod_delayed_work(rbd_dev
->task_wq
, &rbd_dev
->lock_dwork
,
4230 * lock owner acked, but resend if we don't see them
4233 dout("%s rbd_dev %p requeueing lock_dwork\n", __func__
,
4235 mod_delayed_work(rbd_dev
->task_wq
, &rbd_dev
->lock_dwork
,
4236 msecs_to_jiffies(2 * RBD_NOTIFY_TIMEOUT
* MSEC_PER_SEC
));
4240 static bool rbd_quiesce_lock(struct rbd_device
*rbd_dev
)
4244 dout("%s rbd_dev %p\n", __func__
, rbd_dev
);
4245 lockdep_assert_held_write(&rbd_dev
->lock_rwsem
);
4247 if (rbd_dev
->lock_state
!= RBD_LOCK_STATE_LOCKED
)
4251 * Ensure that all in-flight IO is flushed.
4253 rbd_dev
->lock_state
= RBD_LOCK_STATE_RELEASING
;
4254 rbd_assert(!completion_done(&rbd_dev
->releasing_wait
));
4255 need_wait
= !list_empty(&rbd_dev
->running_list
);
4256 downgrade_write(&rbd_dev
->lock_rwsem
);
4258 wait_for_completion(&rbd_dev
->releasing_wait
);
4259 up_read(&rbd_dev
->lock_rwsem
);
4261 down_write(&rbd_dev
->lock_rwsem
);
4262 if (rbd_dev
->lock_state
!= RBD_LOCK_STATE_RELEASING
)
4265 rbd_assert(list_empty(&rbd_dev
->running_list
));
4269 static void rbd_pre_release_action(struct rbd_device
*rbd_dev
)
4271 if (rbd_dev
->header
.features
& RBD_FEATURE_OBJECT_MAP
)
4272 rbd_object_map_close(rbd_dev
);
4275 static void __rbd_release_lock(struct rbd_device
*rbd_dev
)
4277 rbd_assert(list_empty(&rbd_dev
->running_list
));
4279 rbd_pre_release_action(rbd_dev
);
4280 rbd_unlock(rbd_dev
);
4284 * lock_rwsem must be held for write
4286 static void rbd_release_lock(struct rbd_device
*rbd_dev
)
4288 if (!rbd_quiesce_lock(rbd_dev
))
4291 __rbd_release_lock(rbd_dev
);
4294 * Give others a chance to grab the lock - we would re-acquire
4295 * almost immediately if we got new IO while draining the running
4296 * list otherwise. We need to ack our own notifications, so this
4297 * lock_dwork will be requeued from rbd_handle_released_lock() by
4298 * way of maybe_kick_acquire().
4300 cancel_delayed_work(&rbd_dev
->lock_dwork
);
4303 static void rbd_release_lock_work(struct work_struct
*work
)
4305 struct rbd_device
*rbd_dev
= container_of(work
, struct rbd_device
,
4308 down_write(&rbd_dev
->lock_rwsem
);
4309 rbd_release_lock(rbd_dev
);
4310 up_write(&rbd_dev
->lock_rwsem
);
4313 static void maybe_kick_acquire(struct rbd_device
*rbd_dev
)
4317 dout("%s rbd_dev %p\n", __func__
, rbd_dev
);
4318 if (__rbd_is_lock_owner(rbd_dev
))
4321 spin_lock(&rbd_dev
->lock_lists_lock
);
4322 have_requests
= !list_empty(&rbd_dev
->acquiring_list
);
4323 spin_unlock(&rbd_dev
->lock_lists_lock
);
4324 if (have_requests
|| delayed_work_pending(&rbd_dev
->lock_dwork
)) {
4325 dout("%s rbd_dev %p kicking lock_dwork\n", __func__
, rbd_dev
);
4326 mod_delayed_work(rbd_dev
->task_wq
, &rbd_dev
->lock_dwork
, 0);
4330 static void rbd_handle_acquired_lock(struct rbd_device
*rbd_dev
, u8 struct_v
,
4333 struct rbd_client_id cid
= { 0 };
4335 if (struct_v
>= 2) {
4336 cid
.gid
= ceph_decode_64(p
);
4337 cid
.handle
= ceph_decode_64(p
);
4340 dout("%s rbd_dev %p cid %llu-%llu\n", __func__
, rbd_dev
, cid
.gid
,
4342 if (!rbd_cid_equal(&cid
, &rbd_empty_cid
)) {
4343 down_write(&rbd_dev
->lock_rwsem
);
4344 if (rbd_cid_equal(&cid
, &rbd_dev
->owner_cid
)) {
4346 * we already know that the remote client is
4349 up_write(&rbd_dev
->lock_rwsem
);
4353 rbd_set_owner_cid(rbd_dev
, &cid
);
4354 downgrade_write(&rbd_dev
->lock_rwsem
);
4356 down_read(&rbd_dev
->lock_rwsem
);
4359 maybe_kick_acquire(rbd_dev
);
4360 up_read(&rbd_dev
->lock_rwsem
);
4363 static void rbd_handle_released_lock(struct rbd_device
*rbd_dev
, u8 struct_v
,
4366 struct rbd_client_id cid
= { 0 };
4368 if (struct_v
>= 2) {
4369 cid
.gid
= ceph_decode_64(p
);
4370 cid
.handle
= ceph_decode_64(p
);
4373 dout("%s rbd_dev %p cid %llu-%llu\n", __func__
, rbd_dev
, cid
.gid
,
4375 if (!rbd_cid_equal(&cid
, &rbd_empty_cid
)) {
4376 down_write(&rbd_dev
->lock_rwsem
);
4377 if (!rbd_cid_equal(&cid
, &rbd_dev
->owner_cid
)) {
4378 dout("%s rbd_dev %p unexpected owner, cid %llu-%llu != owner_cid %llu-%llu\n",
4379 __func__
, rbd_dev
, cid
.gid
, cid
.handle
,
4380 rbd_dev
->owner_cid
.gid
, rbd_dev
->owner_cid
.handle
);
4381 up_write(&rbd_dev
->lock_rwsem
);
4385 rbd_set_owner_cid(rbd_dev
, &rbd_empty_cid
);
4386 downgrade_write(&rbd_dev
->lock_rwsem
);
4388 down_read(&rbd_dev
->lock_rwsem
);
4391 maybe_kick_acquire(rbd_dev
);
4392 up_read(&rbd_dev
->lock_rwsem
);
4396 * Returns result for ResponseMessage to be encoded (<= 0), or 1 if no
4397 * ResponseMessage is needed.
4399 static int rbd_handle_request_lock(struct rbd_device
*rbd_dev
, u8 struct_v
,
4402 struct rbd_client_id my_cid
= rbd_get_cid(rbd_dev
);
4403 struct rbd_client_id cid
= { 0 };
4406 if (struct_v
>= 2) {
4407 cid
.gid
= ceph_decode_64(p
);
4408 cid
.handle
= ceph_decode_64(p
);
4411 dout("%s rbd_dev %p cid %llu-%llu\n", __func__
, rbd_dev
, cid
.gid
,
4413 if (rbd_cid_equal(&cid
, &my_cid
))
4416 down_read(&rbd_dev
->lock_rwsem
);
4417 if (__rbd_is_lock_owner(rbd_dev
)) {
4418 if (rbd_dev
->lock_state
== RBD_LOCK_STATE_LOCKED
&&
4419 rbd_cid_equal(&rbd_dev
->owner_cid
, &rbd_empty_cid
))
4423 * encode ResponseMessage(0) so the peer can detect
4428 if (rbd_dev
->lock_state
== RBD_LOCK_STATE_LOCKED
) {
4429 if (!rbd_dev
->opts
->exclusive
) {
4430 dout("%s rbd_dev %p queueing unlock_work\n",
4432 queue_work(rbd_dev
->task_wq
,
4433 &rbd_dev
->unlock_work
);
4435 /* refuse to release the lock */
4442 up_read(&rbd_dev
->lock_rwsem
);
4446 static void __rbd_acknowledge_notify(struct rbd_device
*rbd_dev
,
4447 u64 notify_id
, u64 cookie
, s32
*result
)
4449 struct ceph_osd_client
*osdc
= &rbd_dev
->rbd_client
->client
->osdc
;
4450 char buf
[4 + CEPH_ENCODING_START_BLK_LEN
];
4451 int buf_size
= sizeof(buf
);
4457 /* encode ResponseMessage */
4458 ceph_start_encoding(&p
, 1, 1,
4459 buf_size
- CEPH_ENCODING_START_BLK_LEN
);
4460 ceph_encode_32(&p
, *result
);
4465 ret
= ceph_osdc_notify_ack(osdc
, &rbd_dev
->header_oid
,
4466 &rbd_dev
->header_oloc
, notify_id
, cookie
,
4469 rbd_warn(rbd_dev
, "acknowledge_notify failed: %d", ret
);
4472 static void rbd_acknowledge_notify(struct rbd_device
*rbd_dev
, u64 notify_id
,
4475 dout("%s rbd_dev %p\n", __func__
, rbd_dev
);
4476 __rbd_acknowledge_notify(rbd_dev
, notify_id
, cookie
, NULL
);
4479 static void rbd_acknowledge_notify_result(struct rbd_device
*rbd_dev
,
4480 u64 notify_id
, u64 cookie
, s32 result
)
4482 dout("%s rbd_dev %p result %d\n", __func__
, rbd_dev
, result
);
4483 __rbd_acknowledge_notify(rbd_dev
, notify_id
, cookie
, &result
);
4486 static void rbd_watch_cb(void *arg
, u64 notify_id
, u64 cookie
,
4487 u64 notifier_id
, void *data
, size_t data_len
)
4489 struct rbd_device
*rbd_dev
= arg
;
4491 void *const end
= p
+ data_len
;
4497 dout("%s rbd_dev %p cookie %llu notify_id %llu data_len %zu\n",
4498 __func__
, rbd_dev
, cookie
, notify_id
, data_len
);
4500 ret
= ceph_start_decoding(&p
, end
, 1, "NotifyMessage",
4503 rbd_warn(rbd_dev
, "failed to decode NotifyMessage: %d",
4508 notify_op
= ceph_decode_32(&p
);
4510 /* legacy notification for header updates */
4511 notify_op
= RBD_NOTIFY_OP_HEADER_UPDATE
;
4515 dout("%s rbd_dev %p notify_op %u\n", __func__
, rbd_dev
, notify_op
);
4516 switch (notify_op
) {
4517 case RBD_NOTIFY_OP_ACQUIRED_LOCK
:
4518 rbd_handle_acquired_lock(rbd_dev
, struct_v
, &p
);
4519 rbd_acknowledge_notify(rbd_dev
, notify_id
, cookie
);
4521 case RBD_NOTIFY_OP_RELEASED_LOCK
:
4522 rbd_handle_released_lock(rbd_dev
, struct_v
, &p
);
4523 rbd_acknowledge_notify(rbd_dev
, notify_id
, cookie
);
4525 case RBD_NOTIFY_OP_REQUEST_LOCK
:
4526 ret
= rbd_handle_request_lock(rbd_dev
, struct_v
, &p
);
4528 rbd_acknowledge_notify_result(rbd_dev
, notify_id
,
4531 rbd_acknowledge_notify(rbd_dev
, notify_id
, cookie
);
4533 case RBD_NOTIFY_OP_HEADER_UPDATE
:
4534 ret
= rbd_dev_refresh(rbd_dev
);
4536 rbd_warn(rbd_dev
, "refresh failed: %d", ret
);
4538 rbd_acknowledge_notify(rbd_dev
, notify_id
, cookie
);
4541 if (rbd_is_lock_owner(rbd_dev
))
4542 rbd_acknowledge_notify_result(rbd_dev
, notify_id
,
4543 cookie
, -EOPNOTSUPP
);
4545 rbd_acknowledge_notify(rbd_dev
, notify_id
, cookie
);
4550 static void __rbd_unregister_watch(struct rbd_device
*rbd_dev
);
4552 static void rbd_watch_errcb(void *arg
, u64 cookie
, int err
)
4554 struct rbd_device
*rbd_dev
= arg
;
4556 rbd_warn(rbd_dev
, "encountered watch error: %d", err
);
4558 down_write(&rbd_dev
->lock_rwsem
);
4559 rbd_set_owner_cid(rbd_dev
, &rbd_empty_cid
);
4560 up_write(&rbd_dev
->lock_rwsem
);
4562 mutex_lock(&rbd_dev
->watch_mutex
);
4563 if (rbd_dev
->watch_state
== RBD_WATCH_STATE_REGISTERED
) {
4564 __rbd_unregister_watch(rbd_dev
);
4565 rbd_dev
->watch_state
= RBD_WATCH_STATE_ERROR
;
4567 queue_delayed_work(rbd_dev
->task_wq
, &rbd_dev
->watch_dwork
, 0);
4569 mutex_unlock(&rbd_dev
->watch_mutex
);
4573 * watch_mutex must be locked
4575 static int __rbd_register_watch(struct rbd_device
*rbd_dev
)
4577 struct ceph_osd_client
*osdc
= &rbd_dev
->rbd_client
->client
->osdc
;
4578 struct ceph_osd_linger_request
*handle
;
4580 rbd_assert(!rbd_dev
->watch_handle
);
4581 dout("%s rbd_dev %p\n", __func__
, rbd_dev
);
4583 handle
= ceph_osdc_watch(osdc
, &rbd_dev
->header_oid
,
4584 &rbd_dev
->header_oloc
, rbd_watch_cb
,
4585 rbd_watch_errcb
, rbd_dev
);
4587 return PTR_ERR(handle
);
4589 rbd_dev
->watch_handle
= handle
;
4594 * watch_mutex must be locked
4596 static void __rbd_unregister_watch(struct rbd_device
*rbd_dev
)
4598 struct ceph_osd_client
*osdc
= &rbd_dev
->rbd_client
->client
->osdc
;
4601 rbd_assert(rbd_dev
->watch_handle
);
4602 dout("%s rbd_dev %p\n", __func__
, rbd_dev
);
4604 ret
= ceph_osdc_unwatch(osdc
, rbd_dev
->watch_handle
);
4606 rbd_warn(rbd_dev
, "failed to unwatch: %d", ret
);
4608 rbd_dev
->watch_handle
= NULL
;
4611 static int rbd_register_watch(struct rbd_device
*rbd_dev
)
4615 mutex_lock(&rbd_dev
->watch_mutex
);
4616 rbd_assert(rbd_dev
->watch_state
== RBD_WATCH_STATE_UNREGISTERED
);
4617 ret
= __rbd_register_watch(rbd_dev
);
4621 rbd_dev
->watch_state
= RBD_WATCH_STATE_REGISTERED
;
4622 rbd_dev
->watch_cookie
= rbd_dev
->watch_handle
->linger_id
;
4625 mutex_unlock(&rbd_dev
->watch_mutex
);
4629 static void cancel_tasks_sync(struct rbd_device
*rbd_dev
)
4631 dout("%s rbd_dev %p\n", __func__
, rbd_dev
);
4633 cancel_work_sync(&rbd_dev
->acquired_lock_work
);
4634 cancel_work_sync(&rbd_dev
->released_lock_work
);
4635 cancel_delayed_work_sync(&rbd_dev
->lock_dwork
);
4636 cancel_work_sync(&rbd_dev
->unlock_work
);
4640 * header_rwsem must not be held to avoid a deadlock with
4641 * rbd_dev_refresh() when flushing notifies.
4643 static void rbd_unregister_watch(struct rbd_device
*rbd_dev
)
4645 cancel_tasks_sync(rbd_dev
);
4647 mutex_lock(&rbd_dev
->watch_mutex
);
4648 if (rbd_dev
->watch_state
== RBD_WATCH_STATE_REGISTERED
)
4649 __rbd_unregister_watch(rbd_dev
);
4650 rbd_dev
->watch_state
= RBD_WATCH_STATE_UNREGISTERED
;
4651 mutex_unlock(&rbd_dev
->watch_mutex
);
4653 cancel_delayed_work_sync(&rbd_dev
->watch_dwork
);
4654 ceph_osdc_flush_notifies(&rbd_dev
->rbd_client
->client
->osdc
);
4658 * lock_rwsem must be held for write
4660 static void rbd_reacquire_lock(struct rbd_device
*rbd_dev
)
4662 struct ceph_osd_client
*osdc
= &rbd_dev
->rbd_client
->client
->osdc
;
4666 if (!rbd_quiesce_lock(rbd_dev
))
4669 format_lock_cookie(rbd_dev
, cookie
);
4670 ret
= ceph_cls_set_cookie(osdc
, &rbd_dev
->header_oid
,
4671 &rbd_dev
->header_oloc
, RBD_LOCK_NAME
,
4672 CEPH_CLS_LOCK_EXCLUSIVE
, rbd_dev
->lock_cookie
,
4673 RBD_LOCK_TAG
, cookie
);
4675 if (ret
!= -EOPNOTSUPP
)
4676 rbd_warn(rbd_dev
, "failed to update lock cookie: %d",
4680 * Lock cookie cannot be updated on older OSDs, so do
4681 * a manual release and queue an acquire.
4683 __rbd_release_lock(rbd_dev
);
4684 queue_delayed_work(rbd_dev
->task_wq
, &rbd_dev
->lock_dwork
, 0);
4686 __rbd_lock(rbd_dev
, cookie
);
4687 wake_lock_waiters(rbd_dev
, 0);
4691 static void rbd_reregister_watch(struct work_struct
*work
)
4693 struct rbd_device
*rbd_dev
= container_of(to_delayed_work(work
),
4694 struct rbd_device
, watch_dwork
);
4697 dout("%s rbd_dev %p\n", __func__
, rbd_dev
);
4699 mutex_lock(&rbd_dev
->watch_mutex
);
4700 if (rbd_dev
->watch_state
!= RBD_WATCH_STATE_ERROR
) {
4701 mutex_unlock(&rbd_dev
->watch_mutex
);
4705 ret
= __rbd_register_watch(rbd_dev
);
4707 rbd_warn(rbd_dev
, "failed to reregister watch: %d", ret
);
4708 if (ret
!= -EBLACKLISTED
&& ret
!= -ENOENT
) {
4709 queue_delayed_work(rbd_dev
->task_wq
,
4710 &rbd_dev
->watch_dwork
,
4712 mutex_unlock(&rbd_dev
->watch_mutex
);
4716 mutex_unlock(&rbd_dev
->watch_mutex
);
4717 down_write(&rbd_dev
->lock_rwsem
);
4718 wake_lock_waiters(rbd_dev
, ret
);
4719 up_write(&rbd_dev
->lock_rwsem
);
4723 rbd_dev
->watch_state
= RBD_WATCH_STATE_REGISTERED
;
4724 rbd_dev
->watch_cookie
= rbd_dev
->watch_handle
->linger_id
;
4725 mutex_unlock(&rbd_dev
->watch_mutex
);
4727 down_write(&rbd_dev
->lock_rwsem
);
4728 if (rbd_dev
->lock_state
== RBD_LOCK_STATE_LOCKED
)
4729 rbd_reacquire_lock(rbd_dev
);
4730 up_write(&rbd_dev
->lock_rwsem
);
4732 ret
= rbd_dev_refresh(rbd_dev
);
4734 rbd_warn(rbd_dev
, "reregistration refresh failed: %d", ret
);
4738 * Synchronous osd object method call. Returns the number of bytes
4739 * returned in the outbound buffer, or a negative error code.
4741 static int rbd_obj_method_sync(struct rbd_device
*rbd_dev
,
4742 struct ceph_object_id
*oid
,
4743 struct ceph_object_locator
*oloc
,
4744 const char *method_name
,
4745 const void *outbound
,
4746 size_t outbound_size
,
4748 size_t inbound_size
)
4750 struct ceph_osd_client
*osdc
= &rbd_dev
->rbd_client
->client
->osdc
;
4751 struct page
*req_page
= NULL
;
4752 struct page
*reply_page
;
4756 * Method calls are ultimately read operations. The result
4757 * should placed into the inbound buffer provided. They
4758 * also supply outbound data--parameters for the object
4759 * method. Currently if this is present it will be a
4763 if (outbound_size
> PAGE_SIZE
)
4766 req_page
= alloc_page(GFP_KERNEL
);
4770 memcpy(page_address(req_page
), outbound
, outbound_size
);
4773 reply_page
= alloc_page(GFP_KERNEL
);
4776 __free_page(req_page
);
4780 ret
= ceph_osdc_call(osdc
, oid
, oloc
, RBD_DRV_NAME
, method_name
,
4781 CEPH_OSD_FLAG_READ
, req_page
, outbound_size
,
4782 &reply_page
, &inbound_size
);
4784 memcpy(inbound
, page_address(reply_page
), inbound_size
);
4789 __free_page(req_page
);
4790 __free_page(reply_page
);
4794 static void rbd_queue_workfn(struct work_struct
*work
)
4796 struct request
*rq
= blk_mq_rq_from_pdu(work
);
4797 struct rbd_device
*rbd_dev
= rq
->q
->queuedata
;
4798 struct rbd_img_request
*img_request
;
4799 struct ceph_snap_context
*snapc
= NULL
;
4800 u64 offset
= (u64
)blk_rq_pos(rq
) << SECTOR_SHIFT
;
4801 u64 length
= blk_rq_bytes(rq
);
4802 enum obj_operation_type op_type
;
4806 switch (req_op(rq
)) {
4807 case REQ_OP_DISCARD
:
4808 op_type
= OBJ_OP_DISCARD
;
4810 case REQ_OP_WRITE_ZEROES
:
4811 op_type
= OBJ_OP_ZEROOUT
;
4814 op_type
= OBJ_OP_WRITE
;
4817 op_type
= OBJ_OP_READ
;
4820 dout("%s: non-fs request type %d\n", __func__
, req_op(rq
));
4825 /* Ignore/skip any zero-length requests */
4828 dout("%s: zero-length request\n", __func__
);
4833 if (op_type
!= OBJ_OP_READ
&& rbd_dev
->spec
->snap_id
!= CEPH_NOSNAP
) {
4834 rbd_warn(rbd_dev
, "%s on read-only snapshot",
4835 obj_op_name(op_type
));
4841 * Quit early if the mapped snapshot no longer exists. It's
4842 * still possible the snapshot will have disappeared by the
4843 * time our request arrives at the osd, but there's no sense in
4844 * sending it if we already know.
4846 if (!test_bit(RBD_DEV_FLAG_EXISTS
, &rbd_dev
->flags
)) {
4847 dout("request for non-existent snapshot");
4848 rbd_assert(rbd_dev
->spec
->snap_id
!= CEPH_NOSNAP
);
4853 if (offset
&& length
> U64_MAX
- offset
+ 1) {
4854 rbd_warn(rbd_dev
, "bad request range (%llu~%llu)", offset
,
4857 goto err_rq
; /* Shouldn't happen */
4860 blk_mq_start_request(rq
);
4862 down_read(&rbd_dev
->header_rwsem
);
4863 mapping_size
= rbd_dev
->mapping
.size
;
4864 if (op_type
!= OBJ_OP_READ
) {
4865 snapc
= rbd_dev
->header
.snapc
;
4866 ceph_get_snap_context(snapc
);
4868 up_read(&rbd_dev
->header_rwsem
);
4870 if (offset
+ length
> mapping_size
) {
4871 rbd_warn(rbd_dev
, "beyond EOD (%llu~%llu > %llu)", offset
,
4872 length
, mapping_size
);
4877 img_request
= rbd_img_request_create(rbd_dev
, op_type
, snapc
);
4882 img_request
->rq
= rq
;
4883 snapc
= NULL
; /* img_request consumes a ref */
4885 dout("%s rbd_dev %p img_req %p %s %llu~%llu\n", __func__
, rbd_dev
,
4886 img_request
, obj_op_name(op_type
), offset
, length
);
4888 if (op_type
== OBJ_OP_DISCARD
|| op_type
== OBJ_OP_ZEROOUT
)
4889 result
= rbd_img_fill_nodata(img_request
, offset
, length
);
4891 result
= rbd_img_fill_from_bio(img_request
, offset
, length
,
4894 goto err_img_request
;
4896 rbd_img_handle_request(img_request
, 0);
4900 rbd_img_request_put(img_request
);
4903 rbd_warn(rbd_dev
, "%s %llx at %llx result %d",
4904 obj_op_name(op_type
), length
, offset
, result
);
4905 ceph_put_snap_context(snapc
);
4907 blk_mq_end_request(rq
, errno_to_blk_status(result
));
4910 static blk_status_t
rbd_queue_rq(struct blk_mq_hw_ctx
*hctx
,
4911 const struct blk_mq_queue_data
*bd
)
4913 struct request
*rq
= bd
->rq
;
4914 struct work_struct
*work
= blk_mq_rq_to_pdu(rq
);
4916 queue_work(rbd_wq
, work
);
4920 static void rbd_free_disk(struct rbd_device
*rbd_dev
)
4922 blk_cleanup_queue(rbd_dev
->disk
->queue
);
4923 blk_mq_free_tag_set(&rbd_dev
->tag_set
);
4924 put_disk(rbd_dev
->disk
);
4925 rbd_dev
->disk
= NULL
;
4928 static int rbd_obj_read_sync(struct rbd_device
*rbd_dev
,
4929 struct ceph_object_id
*oid
,
4930 struct ceph_object_locator
*oloc
,
4931 void *buf
, int buf_len
)
4934 struct ceph_osd_client
*osdc
= &rbd_dev
->rbd_client
->client
->osdc
;
4935 struct ceph_osd_request
*req
;
4936 struct page
**pages
;
4937 int num_pages
= calc_pages_for(0, buf_len
);
4940 req
= ceph_osdc_alloc_request(osdc
, NULL
, 1, false, GFP_KERNEL
);
4944 ceph_oid_copy(&req
->r_base_oid
, oid
);
4945 ceph_oloc_copy(&req
->r_base_oloc
, oloc
);
4946 req
->r_flags
= CEPH_OSD_FLAG_READ
;
4948 pages
= ceph_alloc_page_vector(num_pages
, GFP_KERNEL
);
4949 if (IS_ERR(pages
)) {
4950 ret
= PTR_ERR(pages
);
4954 osd_req_op_extent_init(req
, 0, CEPH_OSD_OP_READ
, 0, buf_len
, 0, 0);
4955 osd_req_op_extent_osd_data_pages(req
, 0, pages
, buf_len
, 0, false,
4958 ret
= ceph_osdc_alloc_messages(req
, GFP_KERNEL
);
4962 ceph_osdc_start_request(osdc
, req
, false);
4963 ret
= ceph_osdc_wait_request(osdc
, req
);
4965 ceph_copy_from_page_vector(pages
, buf
, 0, ret
);
4968 ceph_osdc_put_request(req
);
4973 * Read the complete header for the given rbd device. On successful
4974 * return, the rbd_dev->header field will contain up-to-date
4975 * information about the image.
4977 static int rbd_dev_v1_header_info(struct rbd_device
*rbd_dev
)
4979 struct rbd_image_header_ondisk
*ondisk
= NULL
;
4986 * The complete header will include an array of its 64-bit
4987 * snapshot ids, followed by the names of those snapshots as
4988 * a contiguous block of NUL-terminated strings. Note that
4989 * the number of snapshots could change by the time we read
4990 * it in, in which case we re-read it.
4997 size
= sizeof (*ondisk
);
4998 size
+= snap_count
* sizeof (struct rbd_image_snap_ondisk
);
5000 ondisk
= kmalloc(size
, GFP_KERNEL
);
5004 ret
= rbd_obj_read_sync(rbd_dev
, &rbd_dev
->header_oid
,
5005 &rbd_dev
->header_oloc
, ondisk
, size
);
5008 if ((size_t)ret
< size
) {
5010 rbd_warn(rbd_dev
, "short header read (want %zd got %d)",
5014 if (!rbd_dev_ondisk_valid(ondisk
)) {
5016 rbd_warn(rbd_dev
, "invalid header");
5020 names_size
= le64_to_cpu(ondisk
->snap_names_len
);
5021 want_count
= snap_count
;
5022 snap_count
= le32_to_cpu(ondisk
->snap_count
);
5023 } while (snap_count
!= want_count
);
5025 ret
= rbd_header_from_disk(rbd_dev
, ondisk
);
5033 * Clear the rbd device's EXISTS flag if the snapshot it's mapped to
5034 * has disappeared from the (just updated) snapshot context.
5036 static void rbd_exists_validate(struct rbd_device
*rbd_dev
)
5040 if (!test_bit(RBD_DEV_FLAG_EXISTS
, &rbd_dev
->flags
))
5043 snap_id
= rbd_dev
->spec
->snap_id
;
5044 if (snap_id
== CEPH_NOSNAP
)
5047 if (rbd_dev_snap_index(rbd_dev
, snap_id
) == BAD_SNAP_INDEX
)
5048 clear_bit(RBD_DEV_FLAG_EXISTS
, &rbd_dev
->flags
);
5051 static void rbd_dev_update_size(struct rbd_device
*rbd_dev
)
5056 * If EXISTS is not set, rbd_dev->disk may be NULL, so don't
5057 * try to update its size. If REMOVING is set, updating size
5058 * is just useless work since the device can't be opened.
5060 if (test_bit(RBD_DEV_FLAG_EXISTS
, &rbd_dev
->flags
) &&
5061 !test_bit(RBD_DEV_FLAG_REMOVING
, &rbd_dev
->flags
)) {
5062 size
= (sector_t
)rbd_dev
->mapping
.size
/ SECTOR_SIZE
;
5063 dout("setting size to %llu sectors", (unsigned long long)size
);
5064 set_capacity(rbd_dev
->disk
, size
);
5065 revalidate_disk(rbd_dev
->disk
);
5069 static int rbd_dev_refresh(struct rbd_device
*rbd_dev
)
5074 down_write(&rbd_dev
->header_rwsem
);
5075 mapping_size
= rbd_dev
->mapping
.size
;
5077 ret
= rbd_dev_header_info(rbd_dev
);
5082 * If there is a parent, see if it has disappeared due to the
5083 * mapped image getting flattened.
5085 if (rbd_dev
->parent
) {
5086 ret
= rbd_dev_v2_parent_info(rbd_dev
);
5091 if (rbd_dev
->spec
->snap_id
== CEPH_NOSNAP
) {
5092 rbd_dev
->mapping
.size
= rbd_dev
->header
.image_size
;
5094 /* validate mapped snapshot's EXISTS flag */
5095 rbd_exists_validate(rbd_dev
);
5099 up_write(&rbd_dev
->header_rwsem
);
5100 if (!ret
&& mapping_size
!= rbd_dev
->mapping
.size
)
5101 rbd_dev_update_size(rbd_dev
);
5106 static int rbd_init_request(struct blk_mq_tag_set
*set
, struct request
*rq
,
5107 unsigned int hctx_idx
, unsigned int numa_node
)
5109 struct work_struct
*work
= blk_mq_rq_to_pdu(rq
);
5111 INIT_WORK(work
, rbd_queue_workfn
);
5115 static const struct blk_mq_ops rbd_mq_ops
= {
5116 .queue_rq
= rbd_queue_rq
,
5117 .init_request
= rbd_init_request
,
5120 static int rbd_init_disk(struct rbd_device
*rbd_dev
)
5122 struct gendisk
*disk
;
5123 struct request_queue
*q
;
5124 unsigned int objset_bytes
=
5125 rbd_dev
->layout
.object_size
* rbd_dev
->layout
.stripe_count
;
5128 /* create gendisk info */
5129 disk
= alloc_disk(single_major
?
5130 (1 << RBD_SINGLE_MAJOR_PART_SHIFT
) :
5131 RBD_MINORS_PER_MAJOR
);
5135 snprintf(disk
->disk_name
, sizeof(disk
->disk_name
), RBD_DRV_NAME
"%d",
5137 disk
->major
= rbd_dev
->major
;
5138 disk
->first_minor
= rbd_dev
->minor
;
5140 disk
->flags
|= GENHD_FL_EXT_DEVT
;
5141 disk
->fops
= &rbd_bd_ops
;
5142 disk
->private_data
= rbd_dev
;
5144 memset(&rbd_dev
->tag_set
, 0, sizeof(rbd_dev
->tag_set
));
5145 rbd_dev
->tag_set
.ops
= &rbd_mq_ops
;
5146 rbd_dev
->tag_set
.queue_depth
= rbd_dev
->opts
->queue_depth
;
5147 rbd_dev
->tag_set
.numa_node
= NUMA_NO_NODE
;
5148 rbd_dev
->tag_set
.flags
= BLK_MQ_F_SHOULD_MERGE
;
5149 rbd_dev
->tag_set
.nr_hw_queues
= 1;
5150 rbd_dev
->tag_set
.cmd_size
= sizeof(struct work_struct
);
5152 err
= blk_mq_alloc_tag_set(&rbd_dev
->tag_set
);
5156 q
= blk_mq_init_queue(&rbd_dev
->tag_set
);
5162 blk_queue_flag_set(QUEUE_FLAG_NONROT
, q
);
5163 /* QUEUE_FLAG_ADD_RANDOM is off by default for blk-mq */
5165 blk_queue_max_hw_sectors(q
, objset_bytes
>> SECTOR_SHIFT
);
5166 q
->limits
.max_sectors
= queue_max_hw_sectors(q
);
5167 blk_queue_max_segments(q
, USHRT_MAX
);
5168 blk_queue_max_segment_size(q
, UINT_MAX
);
5169 blk_queue_io_min(q
, rbd_dev
->opts
->alloc_size
);
5170 blk_queue_io_opt(q
, rbd_dev
->opts
->alloc_size
);
5172 if (rbd_dev
->opts
->trim
) {
5173 blk_queue_flag_set(QUEUE_FLAG_DISCARD
, q
);
5174 q
->limits
.discard_granularity
= rbd_dev
->opts
->alloc_size
;
5175 blk_queue_max_discard_sectors(q
, objset_bytes
>> SECTOR_SHIFT
);
5176 blk_queue_max_write_zeroes_sectors(q
, objset_bytes
>> SECTOR_SHIFT
);
5179 if (!ceph_test_opt(rbd_dev
->rbd_client
->client
, NOCRC
))
5180 q
->backing_dev_info
->capabilities
|= BDI_CAP_STABLE_WRITES
;
5183 * disk_release() expects a queue ref from add_disk() and will
5184 * put it. Hold an extra ref until add_disk() is called.
5186 WARN_ON(!blk_get_queue(q
));
5188 q
->queuedata
= rbd_dev
;
5190 rbd_dev
->disk
= disk
;
5194 blk_mq_free_tag_set(&rbd_dev
->tag_set
);
5204 static struct rbd_device
*dev_to_rbd_dev(struct device
*dev
)
5206 return container_of(dev
, struct rbd_device
, dev
);
5209 static ssize_t
rbd_size_show(struct device
*dev
,
5210 struct device_attribute
*attr
, char *buf
)
5212 struct rbd_device
*rbd_dev
= dev_to_rbd_dev(dev
);
5214 return sprintf(buf
, "%llu\n",
5215 (unsigned long long)rbd_dev
->mapping
.size
);
5219 * Note this shows the features for whatever's mapped, which is not
5220 * necessarily the base image.
5222 static ssize_t
rbd_features_show(struct device
*dev
,
5223 struct device_attribute
*attr
, char *buf
)
5225 struct rbd_device
*rbd_dev
= dev_to_rbd_dev(dev
);
5227 return sprintf(buf
, "0x%016llx\n",
5228 (unsigned long long)rbd_dev
->mapping
.features
);
5231 static ssize_t
rbd_major_show(struct device
*dev
,
5232 struct device_attribute
*attr
, char *buf
)
5234 struct rbd_device
*rbd_dev
= dev_to_rbd_dev(dev
);
5237 return sprintf(buf
, "%d\n", rbd_dev
->major
);
5239 return sprintf(buf
, "(none)\n");
5242 static ssize_t
rbd_minor_show(struct device
*dev
,
5243 struct device_attribute
*attr
, char *buf
)
5245 struct rbd_device
*rbd_dev
= dev_to_rbd_dev(dev
);
5247 return sprintf(buf
, "%d\n", rbd_dev
->minor
);
5250 static ssize_t
rbd_client_addr_show(struct device
*dev
,
5251 struct device_attribute
*attr
, char *buf
)
5253 struct rbd_device
*rbd_dev
= dev_to_rbd_dev(dev
);
5254 struct ceph_entity_addr
*client_addr
=
5255 ceph_client_addr(rbd_dev
->rbd_client
->client
);
5257 return sprintf(buf
, "%pISpc/%u\n", &client_addr
->in_addr
,
5258 le32_to_cpu(client_addr
->nonce
));
5261 static ssize_t
rbd_client_id_show(struct device
*dev
,
5262 struct device_attribute
*attr
, char *buf
)
5264 struct rbd_device
*rbd_dev
= dev_to_rbd_dev(dev
);
5266 return sprintf(buf
, "client%lld\n",
5267 ceph_client_gid(rbd_dev
->rbd_client
->client
));
5270 static ssize_t
rbd_cluster_fsid_show(struct device
*dev
,
5271 struct device_attribute
*attr
, char *buf
)
5273 struct rbd_device
*rbd_dev
= dev_to_rbd_dev(dev
);
5275 return sprintf(buf
, "%pU\n", &rbd_dev
->rbd_client
->client
->fsid
);
5278 static ssize_t
rbd_config_info_show(struct device
*dev
,
5279 struct device_attribute
*attr
, char *buf
)
5281 struct rbd_device
*rbd_dev
= dev_to_rbd_dev(dev
);
5283 if (!capable(CAP_SYS_ADMIN
))
5286 return sprintf(buf
, "%s\n", rbd_dev
->config_info
);
5289 static ssize_t
rbd_pool_show(struct device
*dev
,
5290 struct device_attribute
*attr
, char *buf
)
5292 struct rbd_device
*rbd_dev
= dev_to_rbd_dev(dev
);
5294 return sprintf(buf
, "%s\n", rbd_dev
->spec
->pool_name
);
5297 static ssize_t
rbd_pool_id_show(struct device
*dev
,
5298 struct device_attribute
*attr
, char *buf
)
5300 struct rbd_device
*rbd_dev
= dev_to_rbd_dev(dev
);
5302 return sprintf(buf
, "%llu\n",
5303 (unsigned long long) rbd_dev
->spec
->pool_id
);
5306 static ssize_t
rbd_pool_ns_show(struct device
*dev
,
5307 struct device_attribute
*attr
, char *buf
)
5309 struct rbd_device
*rbd_dev
= dev_to_rbd_dev(dev
);
5311 return sprintf(buf
, "%s\n", rbd_dev
->spec
->pool_ns
?: "");
5314 static ssize_t
rbd_name_show(struct device
*dev
,
5315 struct device_attribute
*attr
, char *buf
)
5317 struct rbd_device
*rbd_dev
= dev_to_rbd_dev(dev
);
5319 if (rbd_dev
->spec
->image_name
)
5320 return sprintf(buf
, "%s\n", rbd_dev
->spec
->image_name
);
5322 return sprintf(buf
, "(unknown)\n");
5325 static ssize_t
rbd_image_id_show(struct device
*dev
,
5326 struct device_attribute
*attr
, char *buf
)
5328 struct rbd_device
*rbd_dev
= dev_to_rbd_dev(dev
);
5330 return sprintf(buf
, "%s\n", rbd_dev
->spec
->image_id
);
5334 * Shows the name of the currently-mapped snapshot (or
5335 * RBD_SNAP_HEAD_NAME for the base image).
5337 static ssize_t
rbd_snap_show(struct device
*dev
,
5338 struct device_attribute
*attr
,
5341 struct rbd_device
*rbd_dev
= dev_to_rbd_dev(dev
);
5343 return sprintf(buf
, "%s\n", rbd_dev
->spec
->snap_name
);
5346 static ssize_t
rbd_snap_id_show(struct device
*dev
,
5347 struct device_attribute
*attr
, char *buf
)
5349 struct rbd_device
*rbd_dev
= dev_to_rbd_dev(dev
);
5351 return sprintf(buf
, "%llu\n", rbd_dev
->spec
->snap_id
);
5355 * For a v2 image, shows the chain of parent images, separated by empty
5356 * lines. For v1 images or if there is no parent, shows "(no parent
5359 static ssize_t
rbd_parent_show(struct device
*dev
,
5360 struct device_attribute
*attr
,
5363 struct rbd_device
*rbd_dev
= dev_to_rbd_dev(dev
);
5366 if (!rbd_dev
->parent
)
5367 return sprintf(buf
, "(no parent image)\n");
5369 for ( ; rbd_dev
->parent
; rbd_dev
= rbd_dev
->parent
) {
5370 struct rbd_spec
*spec
= rbd_dev
->parent_spec
;
5372 count
+= sprintf(&buf
[count
], "%s"
5373 "pool_id %llu\npool_name %s\n"
5375 "image_id %s\nimage_name %s\n"
5376 "snap_id %llu\nsnap_name %s\n"
5378 !count
? "" : "\n", /* first? */
5379 spec
->pool_id
, spec
->pool_name
,
5380 spec
->pool_ns
?: "",
5381 spec
->image_id
, spec
->image_name
?: "(unknown)",
5382 spec
->snap_id
, spec
->snap_name
,
5383 rbd_dev
->parent_overlap
);
5389 static ssize_t
rbd_image_refresh(struct device
*dev
,
5390 struct device_attribute
*attr
,
5394 struct rbd_device
*rbd_dev
= dev_to_rbd_dev(dev
);
5397 if (!capable(CAP_SYS_ADMIN
))
5400 ret
= rbd_dev_refresh(rbd_dev
);
5407 static DEVICE_ATTR(size
, 0444, rbd_size_show
, NULL
);
5408 static DEVICE_ATTR(features
, 0444, rbd_features_show
, NULL
);
5409 static DEVICE_ATTR(major
, 0444, rbd_major_show
, NULL
);
5410 static DEVICE_ATTR(minor
, 0444, rbd_minor_show
, NULL
);
5411 static DEVICE_ATTR(client_addr
, 0444, rbd_client_addr_show
, NULL
);
5412 static DEVICE_ATTR(client_id
, 0444, rbd_client_id_show
, NULL
);
5413 static DEVICE_ATTR(cluster_fsid
, 0444, rbd_cluster_fsid_show
, NULL
);
5414 static DEVICE_ATTR(config_info
, 0400, rbd_config_info_show
, NULL
);
5415 static DEVICE_ATTR(pool
, 0444, rbd_pool_show
, NULL
);
5416 static DEVICE_ATTR(pool_id
, 0444, rbd_pool_id_show
, NULL
);
5417 static DEVICE_ATTR(pool_ns
, 0444, rbd_pool_ns_show
, NULL
);
5418 static DEVICE_ATTR(name
, 0444, rbd_name_show
, NULL
);
5419 static DEVICE_ATTR(image_id
, 0444, rbd_image_id_show
, NULL
);
5420 static DEVICE_ATTR(refresh
, 0200, NULL
, rbd_image_refresh
);
5421 static DEVICE_ATTR(current_snap
, 0444, rbd_snap_show
, NULL
);
5422 static DEVICE_ATTR(snap_id
, 0444, rbd_snap_id_show
, NULL
);
5423 static DEVICE_ATTR(parent
, 0444, rbd_parent_show
, NULL
);
5425 static struct attribute
*rbd_attrs
[] = {
5426 &dev_attr_size
.attr
,
5427 &dev_attr_features
.attr
,
5428 &dev_attr_major
.attr
,
5429 &dev_attr_minor
.attr
,
5430 &dev_attr_client_addr
.attr
,
5431 &dev_attr_client_id
.attr
,
5432 &dev_attr_cluster_fsid
.attr
,
5433 &dev_attr_config_info
.attr
,
5434 &dev_attr_pool
.attr
,
5435 &dev_attr_pool_id
.attr
,
5436 &dev_attr_pool_ns
.attr
,
5437 &dev_attr_name
.attr
,
5438 &dev_attr_image_id
.attr
,
5439 &dev_attr_current_snap
.attr
,
5440 &dev_attr_snap_id
.attr
,
5441 &dev_attr_parent
.attr
,
5442 &dev_attr_refresh
.attr
,
5446 static struct attribute_group rbd_attr_group
= {
5450 static const struct attribute_group
*rbd_attr_groups
[] = {
5455 static void rbd_dev_release(struct device
*dev
);
5457 static const struct device_type rbd_device_type
= {
5459 .groups
= rbd_attr_groups
,
5460 .release
= rbd_dev_release
,
5463 static struct rbd_spec
*rbd_spec_get(struct rbd_spec
*spec
)
5465 kref_get(&spec
->kref
);
5470 static void rbd_spec_free(struct kref
*kref
);
5471 static void rbd_spec_put(struct rbd_spec
*spec
)
5474 kref_put(&spec
->kref
, rbd_spec_free
);
5477 static struct rbd_spec
*rbd_spec_alloc(void)
5479 struct rbd_spec
*spec
;
5481 spec
= kzalloc(sizeof (*spec
), GFP_KERNEL
);
5485 spec
->pool_id
= CEPH_NOPOOL
;
5486 spec
->snap_id
= CEPH_NOSNAP
;
5487 kref_init(&spec
->kref
);
5492 static void rbd_spec_free(struct kref
*kref
)
5494 struct rbd_spec
*spec
= container_of(kref
, struct rbd_spec
, kref
);
5496 kfree(spec
->pool_name
);
5497 kfree(spec
->pool_ns
);
5498 kfree(spec
->image_id
);
5499 kfree(spec
->image_name
);
5500 kfree(spec
->snap_name
);
5504 static void rbd_dev_free(struct rbd_device
*rbd_dev
)
5506 WARN_ON(rbd_dev
->watch_state
!= RBD_WATCH_STATE_UNREGISTERED
);
5507 WARN_ON(rbd_dev
->lock_state
!= RBD_LOCK_STATE_UNLOCKED
);
5509 ceph_oid_destroy(&rbd_dev
->header_oid
);
5510 ceph_oloc_destroy(&rbd_dev
->header_oloc
);
5511 kfree(rbd_dev
->config_info
);
5513 rbd_put_client(rbd_dev
->rbd_client
);
5514 rbd_spec_put(rbd_dev
->spec
);
5515 kfree(rbd_dev
->opts
);
5519 static void rbd_dev_release(struct device
*dev
)
5521 struct rbd_device
*rbd_dev
= dev_to_rbd_dev(dev
);
5522 bool need_put
= !!rbd_dev
->opts
;
5525 destroy_workqueue(rbd_dev
->task_wq
);
5526 ida_simple_remove(&rbd_dev_id_ida
, rbd_dev
->dev_id
);
5529 rbd_dev_free(rbd_dev
);
5532 * This is racy, but way better than putting module outside of
5533 * the release callback. The race window is pretty small, so
5534 * doing something similar to dm (dm-builtin.c) is overkill.
5537 module_put(THIS_MODULE
);
5540 static struct rbd_device
*__rbd_dev_create(struct rbd_client
*rbdc
,
5541 struct rbd_spec
*spec
)
5543 struct rbd_device
*rbd_dev
;
5545 rbd_dev
= kzalloc(sizeof(*rbd_dev
), GFP_KERNEL
);
5549 spin_lock_init(&rbd_dev
->lock
);
5550 INIT_LIST_HEAD(&rbd_dev
->node
);
5551 init_rwsem(&rbd_dev
->header_rwsem
);
5553 rbd_dev
->header
.data_pool_id
= CEPH_NOPOOL
;
5554 ceph_oid_init(&rbd_dev
->header_oid
);
5555 rbd_dev
->header_oloc
.pool
= spec
->pool_id
;
5556 if (spec
->pool_ns
) {
5557 WARN_ON(!*spec
->pool_ns
);
5558 rbd_dev
->header_oloc
.pool_ns
=
5559 ceph_find_or_create_string(spec
->pool_ns
,
5560 strlen(spec
->pool_ns
));
5563 mutex_init(&rbd_dev
->watch_mutex
);
5564 rbd_dev
->watch_state
= RBD_WATCH_STATE_UNREGISTERED
;
5565 INIT_DELAYED_WORK(&rbd_dev
->watch_dwork
, rbd_reregister_watch
);
5567 init_rwsem(&rbd_dev
->lock_rwsem
);
5568 rbd_dev
->lock_state
= RBD_LOCK_STATE_UNLOCKED
;
5569 INIT_WORK(&rbd_dev
->acquired_lock_work
, rbd_notify_acquired_lock
);
5570 INIT_WORK(&rbd_dev
->released_lock_work
, rbd_notify_released_lock
);
5571 INIT_DELAYED_WORK(&rbd_dev
->lock_dwork
, rbd_acquire_lock
);
5572 INIT_WORK(&rbd_dev
->unlock_work
, rbd_release_lock_work
);
5573 spin_lock_init(&rbd_dev
->lock_lists_lock
);
5574 INIT_LIST_HEAD(&rbd_dev
->acquiring_list
);
5575 INIT_LIST_HEAD(&rbd_dev
->running_list
);
5576 init_completion(&rbd_dev
->acquire_wait
);
5577 init_completion(&rbd_dev
->releasing_wait
);
5579 spin_lock_init(&rbd_dev
->object_map_lock
);
5581 rbd_dev
->dev
.bus
= &rbd_bus_type
;
5582 rbd_dev
->dev
.type
= &rbd_device_type
;
5583 rbd_dev
->dev
.parent
= &rbd_root_dev
;
5584 device_initialize(&rbd_dev
->dev
);
5586 rbd_dev
->rbd_client
= rbdc
;
5587 rbd_dev
->spec
= spec
;
5593 * Create a mapping rbd_dev.
5595 static struct rbd_device
*rbd_dev_create(struct rbd_client
*rbdc
,
5596 struct rbd_spec
*spec
,
5597 struct rbd_options
*opts
)
5599 struct rbd_device
*rbd_dev
;
5601 rbd_dev
= __rbd_dev_create(rbdc
, spec
);
5605 rbd_dev
->opts
= opts
;
5607 /* get an id and fill in device name */
5608 rbd_dev
->dev_id
= ida_simple_get(&rbd_dev_id_ida
, 0,
5609 minor_to_rbd_dev_id(1 << MINORBITS
),
5611 if (rbd_dev
->dev_id
< 0)
5614 sprintf(rbd_dev
->name
, RBD_DRV_NAME
"%d", rbd_dev
->dev_id
);
5615 rbd_dev
->task_wq
= alloc_ordered_workqueue("%s-tasks", WQ_MEM_RECLAIM
,
5617 if (!rbd_dev
->task_wq
)
5620 /* we have a ref from do_rbd_add() */
5621 __module_get(THIS_MODULE
);
5623 dout("%s rbd_dev %p dev_id %d\n", __func__
, rbd_dev
, rbd_dev
->dev_id
);
5627 ida_simple_remove(&rbd_dev_id_ida
, rbd_dev
->dev_id
);
5629 rbd_dev_free(rbd_dev
);
5633 static void rbd_dev_destroy(struct rbd_device
*rbd_dev
)
5636 put_device(&rbd_dev
->dev
);
5640 * Get the size and object order for an image snapshot, or if
5641 * snap_id is CEPH_NOSNAP, gets this information for the base
5644 static int _rbd_dev_v2_snap_size(struct rbd_device
*rbd_dev
, u64 snap_id
,
5645 u8
*order
, u64
*snap_size
)
5647 __le64 snapid
= cpu_to_le64(snap_id
);
5652 } __attribute__ ((packed
)) size_buf
= { 0 };
5654 ret
= rbd_obj_method_sync(rbd_dev
, &rbd_dev
->header_oid
,
5655 &rbd_dev
->header_oloc
, "get_size",
5656 &snapid
, sizeof(snapid
),
5657 &size_buf
, sizeof(size_buf
));
5658 dout("%s: rbd_obj_method_sync returned %d\n", __func__
, ret
);
5661 if (ret
< sizeof (size_buf
))
5665 *order
= size_buf
.order
;
5666 dout(" order %u", (unsigned int)*order
);
5668 *snap_size
= le64_to_cpu(size_buf
.size
);
5670 dout(" snap_id 0x%016llx snap_size = %llu\n",
5671 (unsigned long long)snap_id
,
5672 (unsigned long long)*snap_size
);
5677 static int rbd_dev_v2_image_size(struct rbd_device
*rbd_dev
)
5679 return _rbd_dev_v2_snap_size(rbd_dev
, CEPH_NOSNAP
,
5680 &rbd_dev
->header
.obj_order
,
5681 &rbd_dev
->header
.image_size
);
5684 static int rbd_dev_v2_object_prefix(struct rbd_device
*rbd_dev
)
5691 /* Response will be an encoded string, which includes a length */
5692 size
= sizeof(__le32
) + RBD_OBJ_PREFIX_LEN_MAX
;
5693 reply_buf
= kzalloc(size
, GFP_KERNEL
);
5697 ret
= rbd_obj_method_sync(rbd_dev
, &rbd_dev
->header_oid
,
5698 &rbd_dev
->header_oloc
, "get_object_prefix",
5699 NULL
, 0, reply_buf
, size
);
5700 dout("%s: rbd_obj_method_sync returned %d\n", __func__
, ret
);
5705 rbd_dev
->header
.object_prefix
= ceph_extract_encoded_string(&p
,
5706 p
+ ret
, NULL
, GFP_NOIO
);
5709 if (IS_ERR(rbd_dev
->header
.object_prefix
)) {
5710 ret
= PTR_ERR(rbd_dev
->header
.object_prefix
);
5711 rbd_dev
->header
.object_prefix
= NULL
;
5713 dout(" object_prefix = %s\n", rbd_dev
->header
.object_prefix
);
5721 static int _rbd_dev_v2_snap_features(struct rbd_device
*rbd_dev
, u64 snap_id
,
5724 __le64 snapid
= cpu_to_le64(snap_id
);
5728 } __attribute__ ((packed
)) features_buf
= { 0 };
5732 ret
= rbd_obj_method_sync(rbd_dev
, &rbd_dev
->header_oid
,
5733 &rbd_dev
->header_oloc
, "get_features",
5734 &snapid
, sizeof(snapid
),
5735 &features_buf
, sizeof(features_buf
));
5736 dout("%s: rbd_obj_method_sync returned %d\n", __func__
, ret
);
5739 if (ret
< sizeof (features_buf
))
5742 unsup
= le64_to_cpu(features_buf
.incompat
) & ~RBD_FEATURES_SUPPORTED
;
5744 rbd_warn(rbd_dev
, "image uses unsupported features: 0x%llx",
5749 *snap_features
= le64_to_cpu(features_buf
.features
);
5751 dout(" snap_id 0x%016llx features = 0x%016llx incompat = 0x%016llx\n",
5752 (unsigned long long)snap_id
,
5753 (unsigned long long)*snap_features
,
5754 (unsigned long long)le64_to_cpu(features_buf
.incompat
));
5759 static int rbd_dev_v2_features(struct rbd_device
*rbd_dev
)
5761 return _rbd_dev_v2_snap_features(rbd_dev
, CEPH_NOSNAP
,
5762 &rbd_dev
->header
.features
);
5766 * These are generic image flags, but since they are used only for
5767 * object map, store them in rbd_dev->object_map_flags.
5769 * For the same reason, this function is called only on object map
5770 * (re)load and not on header refresh.
5772 static int rbd_dev_v2_get_flags(struct rbd_device
*rbd_dev
)
5774 __le64 snapid
= cpu_to_le64(rbd_dev
->spec
->snap_id
);
5778 ret
= rbd_obj_method_sync(rbd_dev
, &rbd_dev
->header_oid
,
5779 &rbd_dev
->header_oloc
, "get_flags",
5780 &snapid
, sizeof(snapid
),
5781 &flags
, sizeof(flags
));
5784 if (ret
< sizeof(flags
))
5787 rbd_dev
->object_map_flags
= le64_to_cpu(flags
);
5791 struct parent_image_info
{
5793 const char *pool_ns
;
5794 const char *image_id
;
5802 * The caller is responsible for @pii.
5804 static int decode_parent_image_spec(void **p
, void *end
,
5805 struct parent_image_info
*pii
)
5811 ret
= ceph_start_decoding(p
, end
, 1, "ParentImageSpec",
5812 &struct_v
, &struct_len
);
5816 ceph_decode_64_safe(p
, end
, pii
->pool_id
, e_inval
);
5817 pii
->pool_ns
= ceph_extract_encoded_string(p
, end
, NULL
, GFP_KERNEL
);
5818 if (IS_ERR(pii
->pool_ns
)) {
5819 ret
= PTR_ERR(pii
->pool_ns
);
5820 pii
->pool_ns
= NULL
;
5823 pii
->image_id
= ceph_extract_encoded_string(p
, end
, NULL
, GFP_KERNEL
);
5824 if (IS_ERR(pii
->image_id
)) {
5825 ret
= PTR_ERR(pii
->image_id
);
5826 pii
->image_id
= NULL
;
5829 ceph_decode_64_safe(p
, end
, pii
->snap_id
, e_inval
);
5836 static int __get_parent_info(struct rbd_device
*rbd_dev
,
5837 struct page
*req_page
,
5838 struct page
*reply_page
,
5839 struct parent_image_info
*pii
)
5841 struct ceph_osd_client
*osdc
= &rbd_dev
->rbd_client
->client
->osdc
;
5842 size_t reply_len
= PAGE_SIZE
;
5846 ret
= ceph_osdc_call(osdc
, &rbd_dev
->header_oid
, &rbd_dev
->header_oloc
,
5847 "rbd", "parent_get", CEPH_OSD_FLAG_READ
,
5848 req_page
, sizeof(u64
), &reply_page
, &reply_len
);
5850 return ret
== -EOPNOTSUPP
? 1 : ret
;
5852 p
= page_address(reply_page
);
5853 end
= p
+ reply_len
;
5854 ret
= decode_parent_image_spec(&p
, end
, pii
);
5858 ret
= ceph_osdc_call(osdc
, &rbd_dev
->header_oid
, &rbd_dev
->header_oloc
,
5859 "rbd", "parent_overlap_get", CEPH_OSD_FLAG_READ
,
5860 req_page
, sizeof(u64
), &reply_page
, &reply_len
);
5864 p
= page_address(reply_page
);
5865 end
= p
+ reply_len
;
5866 ceph_decode_8_safe(&p
, end
, pii
->has_overlap
, e_inval
);
5867 if (pii
->has_overlap
)
5868 ceph_decode_64_safe(&p
, end
, pii
->overlap
, e_inval
);
5877 * The caller is responsible for @pii.
5879 static int __get_parent_info_legacy(struct rbd_device
*rbd_dev
,
5880 struct page
*req_page
,
5881 struct page
*reply_page
,
5882 struct parent_image_info
*pii
)
5884 struct ceph_osd_client
*osdc
= &rbd_dev
->rbd_client
->client
->osdc
;
5885 size_t reply_len
= PAGE_SIZE
;
5889 ret
= ceph_osdc_call(osdc
, &rbd_dev
->header_oid
, &rbd_dev
->header_oloc
,
5890 "rbd", "get_parent", CEPH_OSD_FLAG_READ
,
5891 req_page
, sizeof(u64
), &reply_page
, &reply_len
);
5895 p
= page_address(reply_page
);
5896 end
= p
+ reply_len
;
5897 ceph_decode_64_safe(&p
, end
, pii
->pool_id
, e_inval
);
5898 pii
->image_id
= ceph_extract_encoded_string(&p
, end
, NULL
, GFP_KERNEL
);
5899 if (IS_ERR(pii
->image_id
)) {
5900 ret
= PTR_ERR(pii
->image_id
);
5901 pii
->image_id
= NULL
;
5904 ceph_decode_64_safe(&p
, end
, pii
->snap_id
, e_inval
);
5905 pii
->has_overlap
= true;
5906 ceph_decode_64_safe(&p
, end
, pii
->overlap
, e_inval
);
5914 static int get_parent_info(struct rbd_device
*rbd_dev
,
5915 struct parent_image_info
*pii
)
5917 struct page
*req_page
, *reply_page
;
5921 req_page
= alloc_page(GFP_KERNEL
);
5925 reply_page
= alloc_page(GFP_KERNEL
);
5927 __free_page(req_page
);
5931 p
= page_address(req_page
);
5932 ceph_encode_64(&p
, rbd_dev
->spec
->snap_id
);
5933 ret
= __get_parent_info(rbd_dev
, req_page
, reply_page
, pii
);
5935 ret
= __get_parent_info_legacy(rbd_dev
, req_page
, reply_page
,
5938 __free_page(req_page
);
5939 __free_page(reply_page
);
5943 static int rbd_dev_v2_parent_info(struct rbd_device
*rbd_dev
)
5945 struct rbd_spec
*parent_spec
;
5946 struct parent_image_info pii
= { 0 };
5949 parent_spec
= rbd_spec_alloc();
5953 ret
= get_parent_info(rbd_dev
, &pii
);
5957 dout("%s pool_id %llu pool_ns %s image_id %s snap_id %llu has_overlap %d overlap %llu\n",
5958 __func__
, pii
.pool_id
, pii
.pool_ns
, pii
.image_id
, pii
.snap_id
,
5959 pii
.has_overlap
, pii
.overlap
);
5961 if (pii
.pool_id
== CEPH_NOPOOL
|| !pii
.has_overlap
) {
5963 * Either the parent never existed, or we have
5964 * record of it but the image got flattened so it no
5965 * longer has a parent. When the parent of a
5966 * layered image disappears we immediately set the
5967 * overlap to 0. The effect of this is that all new
5968 * requests will be treated as if the image had no
5971 * If !pii.has_overlap, the parent image spec is not
5972 * applicable. It's there to avoid duplication in each
5975 if (rbd_dev
->parent_overlap
) {
5976 rbd_dev
->parent_overlap
= 0;
5977 rbd_dev_parent_put(rbd_dev
);
5978 pr_info("%s: clone image has been flattened\n",
5979 rbd_dev
->disk
->disk_name
);
5982 goto out
; /* No parent? No problem. */
5985 /* The ceph file layout needs to fit pool id in 32 bits */
5988 if (pii
.pool_id
> (u64
)U32_MAX
) {
5989 rbd_warn(NULL
, "parent pool id too large (%llu > %u)",
5990 (unsigned long long)pii
.pool_id
, U32_MAX
);
5995 * The parent won't change (except when the clone is
5996 * flattened, already handled that). So we only need to
5997 * record the parent spec we have not already done so.
5999 if (!rbd_dev
->parent_spec
) {
6000 parent_spec
->pool_id
= pii
.pool_id
;
6001 if (pii
.pool_ns
&& *pii
.pool_ns
) {
6002 parent_spec
->pool_ns
= pii
.pool_ns
;
6005 parent_spec
->image_id
= pii
.image_id
;
6006 pii
.image_id
= NULL
;
6007 parent_spec
->snap_id
= pii
.snap_id
;
6009 rbd_dev
->parent_spec
= parent_spec
;
6010 parent_spec
= NULL
; /* rbd_dev now owns this */
6014 * We always update the parent overlap. If it's zero we issue
6015 * a warning, as we will proceed as if there was no parent.
6019 /* refresh, careful to warn just once */
6020 if (rbd_dev
->parent_overlap
)
6022 "clone now standalone (overlap became 0)");
6025 rbd_warn(rbd_dev
, "clone is standalone (overlap 0)");
6028 rbd_dev
->parent_overlap
= pii
.overlap
;
6034 kfree(pii
.image_id
);
6035 rbd_spec_put(parent_spec
);
6039 static int rbd_dev_v2_striping_info(struct rbd_device
*rbd_dev
)
6043 __le64 stripe_count
;
6044 } __attribute__ ((packed
)) striping_info_buf
= { 0 };
6045 size_t size
= sizeof (striping_info_buf
);
6049 ret
= rbd_obj_method_sync(rbd_dev
, &rbd_dev
->header_oid
,
6050 &rbd_dev
->header_oloc
, "get_stripe_unit_count",
6051 NULL
, 0, &striping_info_buf
, size
);
6052 dout("%s: rbd_obj_method_sync returned %d\n", __func__
, ret
);
6058 p
= &striping_info_buf
;
6059 rbd_dev
->header
.stripe_unit
= ceph_decode_64(&p
);
6060 rbd_dev
->header
.stripe_count
= ceph_decode_64(&p
);
6064 static int rbd_dev_v2_data_pool(struct rbd_device
*rbd_dev
)
6066 __le64 data_pool_id
;
6069 ret
= rbd_obj_method_sync(rbd_dev
, &rbd_dev
->header_oid
,
6070 &rbd_dev
->header_oloc
, "get_data_pool",
6071 NULL
, 0, &data_pool_id
, sizeof(data_pool_id
));
6074 if (ret
< sizeof(data_pool_id
))
6077 rbd_dev
->header
.data_pool_id
= le64_to_cpu(data_pool_id
);
6078 WARN_ON(rbd_dev
->header
.data_pool_id
== CEPH_NOPOOL
);
6082 static char *rbd_dev_image_name(struct rbd_device
*rbd_dev
)
6084 CEPH_DEFINE_OID_ONSTACK(oid
);
6085 size_t image_id_size
;
6090 void *reply_buf
= NULL
;
6092 char *image_name
= NULL
;
6095 rbd_assert(!rbd_dev
->spec
->image_name
);
6097 len
= strlen(rbd_dev
->spec
->image_id
);
6098 image_id_size
= sizeof (__le32
) + len
;
6099 image_id
= kmalloc(image_id_size
, GFP_KERNEL
);
6104 end
= image_id
+ image_id_size
;
6105 ceph_encode_string(&p
, end
, rbd_dev
->spec
->image_id
, (u32
)len
);
6107 size
= sizeof (__le32
) + RBD_IMAGE_NAME_LEN_MAX
;
6108 reply_buf
= kmalloc(size
, GFP_KERNEL
);
6112 ceph_oid_printf(&oid
, "%s", RBD_DIRECTORY
);
6113 ret
= rbd_obj_method_sync(rbd_dev
, &oid
, &rbd_dev
->header_oloc
,
6114 "dir_get_name", image_id
, image_id_size
,
6119 end
= reply_buf
+ ret
;
6121 image_name
= ceph_extract_encoded_string(&p
, end
, &len
, GFP_KERNEL
);
6122 if (IS_ERR(image_name
))
6125 dout("%s: name is %s len is %zd\n", __func__
, image_name
, len
);
6133 static u64
rbd_v1_snap_id_by_name(struct rbd_device
*rbd_dev
, const char *name
)
6135 struct ceph_snap_context
*snapc
= rbd_dev
->header
.snapc
;
6136 const char *snap_name
;
6139 /* Skip over names until we find the one we are looking for */
6141 snap_name
= rbd_dev
->header
.snap_names
;
6142 while (which
< snapc
->num_snaps
) {
6143 if (!strcmp(name
, snap_name
))
6144 return snapc
->snaps
[which
];
6145 snap_name
+= strlen(snap_name
) + 1;
6151 static u64
rbd_v2_snap_id_by_name(struct rbd_device
*rbd_dev
, const char *name
)
6153 struct ceph_snap_context
*snapc
= rbd_dev
->header
.snapc
;
6158 for (which
= 0; !found
&& which
< snapc
->num_snaps
; which
++) {
6159 const char *snap_name
;
6161 snap_id
= snapc
->snaps
[which
];
6162 snap_name
= rbd_dev_v2_snap_name(rbd_dev
, snap_id
);
6163 if (IS_ERR(snap_name
)) {
6164 /* ignore no-longer existing snapshots */
6165 if (PTR_ERR(snap_name
) == -ENOENT
)
6170 found
= !strcmp(name
, snap_name
);
6173 return found
? snap_id
: CEPH_NOSNAP
;
6177 * Assumes name is never RBD_SNAP_HEAD_NAME; returns CEPH_NOSNAP if
6178 * no snapshot by that name is found, or if an error occurs.
6180 static u64
rbd_snap_id_by_name(struct rbd_device
*rbd_dev
, const char *name
)
6182 if (rbd_dev
->image_format
== 1)
6183 return rbd_v1_snap_id_by_name(rbd_dev
, name
);
6185 return rbd_v2_snap_id_by_name(rbd_dev
, name
);
6189 * An image being mapped will have everything but the snap id.
6191 static int rbd_spec_fill_snap_id(struct rbd_device
*rbd_dev
)
6193 struct rbd_spec
*spec
= rbd_dev
->spec
;
6195 rbd_assert(spec
->pool_id
!= CEPH_NOPOOL
&& spec
->pool_name
);
6196 rbd_assert(spec
->image_id
&& spec
->image_name
);
6197 rbd_assert(spec
->snap_name
);
6199 if (strcmp(spec
->snap_name
, RBD_SNAP_HEAD_NAME
)) {
6202 snap_id
= rbd_snap_id_by_name(rbd_dev
, spec
->snap_name
);
6203 if (snap_id
== CEPH_NOSNAP
)
6206 spec
->snap_id
= snap_id
;
6208 spec
->snap_id
= CEPH_NOSNAP
;
6215 * A parent image will have all ids but none of the names.
6217 * All names in an rbd spec are dynamically allocated. It's OK if we
6218 * can't figure out the name for an image id.
6220 static int rbd_spec_fill_names(struct rbd_device
*rbd_dev
)
6222 struct ceph_osd_client
*osdc
= &rbd_dev
->rbd_client
->client
->osdc
;
6223 struct rbd_spec
*spec
= rbd_dev
->spec
;
6224 const char *pool_name
;
6225 const char *image_name
;
6226 const char *snap_name
;
6229 rbd_assert(spec
->pool_id
!= CEPH_NOPOOL
);
6230 rbd_assert(spec
->image_id
);
6231 rbd_assert(spec
->snap_id
!= CEPH_NOSNAP
);
6233 /* Get the pool name; we have to make our own copy of this */
6235 pool_name
= ceph_pg_pool_name_by_id(osdc
->osdmap
, spec
->pool_id
);
6237 rbd_warn(rbd_dev
, "no pool with id %llu", spec
->pool_id
);
6240 pool_name
= kstrdup(pool_name
, GFP_KERNEL
);
6244 /* Fetch the image name; tolerate failure here */
6246 image_name
= rbd_dev_image_name(rbd_dev
);
6248 rbd_warn(rbd_dev
, "unable to get image name");
6250 /* Fetch the snapshot name */
6252 snap_name
= rbd_snap_name(rbd_dev
, spec
->snap_id
);
6253 if (IS_ERR(snap_name
)) {
6254 ret
= PTR_ERR(snap_name
);
6258 spec
->pool_name
= pool_name
;
6259 spec
->image_name
= image_name
;
6260 spec
->snap_name
= snap_name
;
6270 static int rbd_dev_v2_snap_context(struct rbd_device
*rbd_dev
)
6279 struct ceph_snap_context
*snapc
;
6283 * We'll need room for the seq value (maximum snapshot id),
6284 * snapshot count, and array of that many snapshot ids.
6285 * For now we have a fixed upper limit on the number we're
6286 * prepared to receive.
6288 size
= sizeof (__le64
) + sizeof (__le32
) +
6289 RBD_MAX_SNAP_COUNT
* sizeof (__le64
);
6290 reply_buf
= kzalloc(size
, GFP_KERNEL
);
6294 ret
= rbd_obj_method_sync(rbd_dev
, &rbd_dev
->header_oid
,
6295 &rbd_dev
->header_oloc
, "get_snapcontext",
6296 NULL
, 0, reply_buf
, size
);
6297 dout("%s: rbd_obj_method_sync returned %d\n", __func__
, ret
);
6302 end
= reply_buf
+ ret
;
6304 ceph_decode_64_safe(&p
, end
, seq
, out
);
6305 ceph_decode_32_safe(&p
, end
, snap_count
, out
);
6308 * Make sure the reported number of snapshot ids wouldn't go
6309 * beyond the end of our buffer. But before checking that,
6310 * make sure the computed size of the snapshot context we
6311 * allocate is representable in a size_t.
6313 if (snap_count
> (SIZE_MAX
- sizeof (struct ceph_snap_context
))
6318 if (!ceph_has_room(&p
, end
, snap_count
* sizeof (__le64
)))
6322 snapc
= ceph_create_snap_context(snap_count
, GFP_KERNEL
);
6328 for (i
= 0; i
< snap_count
; i
++)
6329 snapc
->snaps
[i
] = ceph_decode_64(&p
);
6331 ceph_put_snap_context(rbd_dev
->header
.snapc
);
6332 rbd_dev
->header
.snapc
= snapc
;
6334 dout(" snap context seq = %llu, snap_count = %u\n",
6335 (unsigned long long)seq
, (unsigned int)snap_count
);
6342 static const char *rbd_dev_v2_snap_name(struct rbd_device
*rbd_dev
,
6353 size
= sizeof (__le32
) + RBD_MAX_SNAP_NAME_LEN
;
6354 reply_buf
= kmalloc(size
, GFP_KERNEL
);
6356 return ERR_PTR(-ENOMEM
);
6358 snapid
= cpu_to_le64(snap_id
);
6359 ret
= rbd_obj_method_sync(rbd_dev
, &rbd_dev
->header_oid
,
6360 &rbd_dev
->header_oloc
, "get_snapshot_name",
6361 &snapid
, sizeof(snapid
), reply_buf
, size
);
6362 dout("%s: rbd_obj_method_sync returned %d\n", __func__
, ret
);
6364 snap_name
= ERR_PTR(ret
);
6369 end
= reply_buf
+ ret
;
6370 snap_name
= ceph_extract_encoded_string(&p
, end
, NULL
, GFP_KERNEL
);
6371 if (IS_ERR(snap_name
))
6374 dout(" snap_id 0x%016llx snap_name = %s\n",
6375 (unsigned long long)snap_id
, snap_name
);
6382 static int rbd_dev_v2_header_info(struct rbd_device
*rbd_dev
)
6384 bool first_time
= rbd_dev
->header
.object_prefix
== NULL
;
6387 ret
= rbd_dev_v2_image_size(rbd_dev
);
6392 ret
= rbd_dev_v2_header_onetime(rbd_dev
);
6397 ret
= rbd_dev_v2_snap_context(rbd_dev
);
6398 if (ret
&& first_time
) {
6399 kfree(rbd_dev
->header
.object_prefix
);
6400 rbd_dev
->header
.object_prefix
= NULL
;
6406 static int rbd_dev_header_info(struct rbd_device
*rbd_dev
)
6408 rbd_assert(rbd_image_format_valid(rbd_dev
->image_format
));
6410 if (rbd_dev
->image_format
== 1)
6411 return rbd_dev_v1_header_info(rbd_dev
);
6413 return rbd_dev_v2_header_info(rbd_dev
);
6417 * Skips over white space at *buf, and updates *buf to point to the
6418 * first found non-space character (if any). Returns the length of
6419 * the token (string of non-white space characters) found. Note
6420 * that *buf must be terminated with '\0'.
6422 static inline size_t next_token(const char **buf
)
6425 * These are the characters that produce nonzero for
6426 * isspace() in the "C" and "POSIX" locales.
6428 const char *spaces
= " \f\n\r\t\v";
6430 *buf
+= strspn(*buf
, spaces
); /* Find start of token */
6432 return strcspn(*buf
, spaces
); /* Return token length */
6436 * Finds the next token in *buf, dynamically allocates a buffer big
6437 * enough to hold a copy of it, and copies the token into the new
6438 * buffer. The copy is guaranteed to be terminated with '\0'. Note
6439 * that a duplicate buffer is created even for a zero-length token.
6441 * Returns a pointer to the newly-allocated duplicate, or a null
6442 * pointer if memory for the duplicate was not available. If
6443 * the lenp argument is a non-null pointer, the length of the token
6444 * (not including the '\0') is returned in *lenp.
6446 * If successful, the *buf pointer will be updated to point beyond
6447 * the end of the found token.
6449 * Note: uses GFP_KERNEL for allocation.
6451 static inline char *dup_token(const char **buf
, size_t *lenp
)
6456 len
= next_token(buf
);
6457 dup
= kmemdup(*buf
, len
+ 1, GFP_KERNEL
);
6460 *(dup
+ len
) = '\0';
6470 * Parse the options provided for an "rbd add" (i.e., rbd image
6471 * mapping) request. These arrive via a write to /sys/bus/rbd/add,
6472 * and the data written is passed here via a NUL-terminated buffer.
6473 * Returns 0 if successful or an error code otherwise.
6475 * The information extracted from these options is recorded in
6476 * the other parameters which return dynamically-allocated
6479 * The address of a pointer that will refer to a ceph options
6480 * structure. Caller must release the returned pointer using
6481 * ceph_destroy_options() when it is no longer needed.
6483 * Address of an rbd options pointer. Fully initialized by
6484 * this function; caller must release with kfree().
6486 * Address of an rbd image specification pointer. Fully
6487 * initialized by this function based on parsed options.
6488 * Caller must release with rbd_spec_put().
6490 * The options passed take this form:
6491 * <mon_addrs> <options> <pool_name> <image_name> [<snap_id>]
6494 * A comma-separated list of one or more monitor addresses.
6495 * A monitor address is an ip address, optionally followed
6496 * by a port number (separated by a colon).
6497 * I.e.: ip1[:port1][,ip2[:port2]...]
6499 * A comma-separated list of ceph and/or rbd options.
6501 * The name of the rados pool containing the rbd image.
6503 * The name of the image in that pool to map.
6505 * An optional snapshot id. If provided, the mapping will
6506 * present data from the image at the time that snapshot was
6507 * created. The image head is used if no snapshot id is
6508 * provided. Snapshot mappings are always read-only.
6510 static int rbd_add_parse_args(const char *buf
,
6511 struct ceph_options
**ceph_opts
,
6512 struct rbd_options
**opts
,
6513 struct rbd_spec
**rbd_spec
)
6517 const char *mon_addrs
;
6519 size_t mon_addrs_size
;
6520 struct parse_rbd_opts_ctx pctx
= { 0 };
6521 struct ceph_options
*copts
;
6524 /* The first four tokens are required */
6526 len
= next_token(&buf
);
6528 rbd_warn(NULL
, "no monitor address(es) provided");
6532 mon_addrs_size
= len
+ 1;
6536 options
= dup_token(&buf
, NULL
);
6540 rbd_warn(NULL
, "no options provided");
6544 pctx
.spec
= rbd_spec_alloc();
6548 pctx
.spec
->pool_name
= dup_token(&buf
, NULL
);
6549 if (!pctx
.spec
->pool_name
)
6551 if (!*pctx
.spec
->pool_name
) {
6552 rbd_warn(NULL
, "no pool name provided");
6556 pctx
.spec
->image_name
= dup_token(&buf
, NULL
);
6557 if (!pctx
.spec
->image_name
)
6559 if (!*pctx
.spec
->image_name
) {
6560 rbd_warn(NULL
, "no image name provided");
6565 * Snapshot name is optional; default is to use "-"
6566 * (indicating the head/no snapshot).
6568 len
= next_token(&buf
);
6570 buf
= RBD_SNAP_HEAD_NAME
; /* No snapshot supplied */
6571 len
= sizeof (RBD_SNAP_HEAD_NAME
) - 1;
6572 } else if (len
> RBD_MAX_SNAP_NAME_LEN
) {
6573 ret
= -ENAMETOOLONG
;
6576 snap_name
= kmemdup(buf
, len
+ 1, GFP_KERNEL
);
6579 *(snap_name
+ len
) = '\0';
6580 pctx
.spec
->snap_name
= snap_name
;
6582 /* Initialize all rbd options to the defaults */
6584 pctx
.opts
= kzalloc(sizeof(*pctx
.opts
), GFP_KERNEL
);
6588 pctx
.opts
->read_only
= RBD_READ_ONLY_DEFAULT
;
6589 pctx
.opts
->queue_depth
= RBD_QUEUE_DEPTH_DEFAULT
;
6590 pctx
.opts
->alloc_size
= RBD_ALLOC_SIZE_DEFAULT
;
6591 pctx
.opts
->lock_timeout
= RBD_LOCK_TIMEOUT_DEFAULT
;
6592 pctx
.opts
->lock_on_read
= RBD_LOCK_ON_READ_DEFAULT
;
6593 pctx
.opts
->exclusive
= RBD_EXCLUSIVE_DEFAULT
;
6594 pctx
.opts
->trim
= RBD_TRIM_DEFAULT
;
6596 copts
= ceph_parse_options(options
, mon_addrs
,
6597 mon_addrs
+ mon_addrs_size
- 1,
6598 parse_rbd_opts_token
, &pctx
);
6599 if (IS_ERR(copts
)) {
6600 ret
= PTR_ERR(copts
);
6607 *rbd_spec
= pctx
.spec
;
6614 rbd_spec_put(pctx
.spec
);
6620 static void rbd_dev_image_unlock(struct rbd_device
*rbd_dev
)
6622 down_write(&rbd_dev
->lock_rwsem
);
6623 if (__rbd_is_lock_owner(rbd_dev
))
6624 __rbd_release_lock(rbd_dev
);
6625 up_write(&rbd_dev
->lock_rwsem
);
6629 * If the wait is interrupted, an error is returned even if the lock
6630 * was successfully acquired. rbd_dev_image_unlock() will release it
6633 static int rbd_add_acquire_lock(struct rbd_device
*rbd_dev
)
6637 if (!(rbd_dev
->header
.features
& RBD_FEATURE_EXCLUSIVE_LOCK
)) {
6638 if (!rbd_dev
->opts
->exclusive
&& !rbd_dev
->opts
->lock_on_read
)
6641 rbd_warn(rbd_dev
, "exclusive-lock feature is not enabled");
6645 if (rbd_dev
->spec
->snap_id
!= CEPH_NOSNAP
)
6648 rbd_assert(!rbd_is_lock_owner(rbd_dev
));
6649 queue_delayed_work(rbd_dev
->task_wq
, &rbd_dev
->lock_dwork
, 0);
6650 ret
= wait_for_completion_killable_timeout(&rbd_dev
->acquire_wait
,
6651 ceph_timeout_jiffies(rbd_dev
->opts
->lock_timeout
));
6653 ret
= rbd_dev
->acquire_err
;
6655 cancel_delayed_work_sync(&rbd_dev
->lock_dwork
);
6661 rbd_warn(rbd_dev
, "failed to acquire exclusive lock: %ld", ret
);
6666 * The lock may have been released by now, unless automatic lock
6667 * transitions are disabled.
6669 rbd_assert(!rbd_dev
->opts
->exclusive
|| rbd_is_lock_owner(rbd_dev
));
6674 * An rbd format 2 image has a unique identifier, distinct from the
6675 * name given to it by the user. Internally, that identifier is
6676 * what's used to specify the names of objects related to the image.
6678 * A special "rbd id" object is used to map an rbd image name to its
6679 * id. If that object doesn't exist, then there is no v2 rbd image
6680 * with the supplied name.
6682 * This function will record the given rbd_dev's image_id field if
6683 * it can be determined, and in that case will return 0. If any
6684 * errors occur a negative errno will be returned and the rbd_dev's
6685 * image_id field will be unchanged (and should be NULL).
6687 static int rbd_dev_image_id(struct rbd_device
*rbd_dev
)
6691 CEPH_DEFINE_OID_ONSTACK(oid
);
6696 * When probing a parent image, the image id is already
6697 * known (and the image name likely is not). There's no
6698 * need to fetch the image id again in this case. We
6699 * do still need to set the image format though.
6701 if (rbd_dev
->spec
->image_id
) {
6702 rbd_dev
->image_format
= *rbd_dev
->spec
->image_id
? 2 : 1;
6708 * First, see if the format 2 image id file exists, and if
6709 * so, get the image's persistent id from it.
6711 ret
= ceph_oid_aprintf(&oid
, GFP_KERNEL
, "%s%s", RBD_ID_PREFIX
,
6712 rbd_dev
->spec
->image_name
);
6716 dout("rbd id object name is %s\n", oid
.name
);
6718 /* Response will be an encoded string, which includes a length */
6719 size
= sizeof (__le32
) + RBD_IMAGE_ID_LEN_MAX
;
6720 response
= kzalloc(size
, GFP_NOIO
);
6726 /* If it doesn't exist we'll assume it's a format 1 image */
6728 ret
= rbd_obj_method_sync(rbd_dev
, &oid
, &rbd_dev
->header_oloc
,
6731 dout("%s: rbd_obj_method_sync returned %d\n", __func__
, ret
);
6732 if (ret
== -ENOENT
) {
6733 image_id
= kstrdup("", GFP_KERNEL
);
6734 ret
= image_id
? 0 : -ENOMEM
;
6736 rbd_dev
->image_format
= 1;
6737 } else if (ret
>= 0) {
6740 image_id
= ceph_extract_encoded_string(&p
, p
+ ret
,
6742 ret
= PTR_ERR_OR_ZERO(image_id
);
6744 rbd_dev
->image_format
= 2;
6748 rbd_dev
->spec
->image_id
= image_id
;
6749 dout("image_id is %s\n", image_id
);
6753 ceph_oid_destroy(&oid
);
6758 * Undo whatever state changes are made by v1 or v2 header info
6761 static void rbd_dev_unprobe(struct rbd_device
*rbd_dev
)
6763 struct rbd_image_header
*header
;
6765 rbd_dev_parent_put(rbd_dev
);
6766 rbd_object_map_free(rbd_dev
);
6767 rbd_dev_mapping_clear(rbd_dev
);
6769 /* Free dynamic fields from the header, then zero it out */
6771 header
= &rbd_dev
->header
;
6772 ceph_put_snap_context(header
->snapc
);
6773 kfree(header
->snap_sizes
);
6774 kfree(header
->snap_names
);
6775 kfree(header
->object_prefix
);
6776 memset(header
, 0, sizeof (*header
));
6779 static int rbd_dev_v2_header_onetime(struct rbd_device
*rbd_dev
)
6783 ret
= rbd_dev_v2_object_prefix(rbd_dev
);
6788 * Get the and check features for the image. Currently the
6789 * features are assumed to never change.
6791 ret
= rbd_dev_v2_features(rbd_dev
);
6795 /* If the image supports fancy striping, get its parameters */
6797 if (rbd_dev
->header
.features
& RBD_FEATURE_STRIPINGV2
) {
6798 ret
= rbd_dev_v2_striping_info(rbd_dev
);
6803 if (rbd_dev
->header
.features
& RBD_FEATURE_DATA_POOL
) {
6804 ret
= rbd_dev_v2_data_pool(rbd_dev
);
6809 rbd_init_layout(rbd_dev
);
6813 rbd_dev
->header
.features
= 0;
6814 kfree(rbd_dev
->header
.object_prefix
);
6815 rbd_dev
->header
.object_prefix
= NULL
;
6820 * @depth is rbd_dev_image_probe() -> rbd_dev_probe_parent() ->
6821 * rbd_dev_image_probe() recursion depth, which means it's also the
6822 * length of the already discovered part of the parent chain.
6824 static int rbd_dev_probe_parent(struct rbd_device
*rbd_dev
, int depth
)
6826 struct rbd_device
*parent
= NULL
;
6829 if (!rbd_dev
->parent_spec
)
6832 if (++depth
> RBD_MAX_PARENT_CHAIN_LEN
) {
6833 pr_info("parent chain is too long (%d)\n", depth
);
6838 parent
= __rbd_dev_create(rbd_dev
->rbd_client
, rbd_dev
->parent_spec
);
6845 * Images related by parent/child relationships always share
6846 * rbd_client and spec/parent_spec, so bump their refcounts.
6848 __rbd_get_client(rbd_dev
->rbd_client
);
6849 rbd_spec_get(rbd_dev
->parent_spec
);
6851 ret
= rbd_dev_image_probe(parent
, depth
);
6855 rbd_dev
->parent
= parent
;
6856 atomic_set(&rbd_dev
->parent_ref
, 1);
6860 rbd_dev_unparent(rbd_dev
);
6861 rbd_dev_destroy(parent
);
6865 static void rbd_dev_device_release(struct rbd_device
*rbd_dev
)
6867 clear_bit(RBD_DEV_FLAG_EXISTS
, &rbd_dev
->flags
);
6868 rbd_free_disk(rbd_dev
);
6870 unregister_blkdev(rbd_dev
->major
, rbd_dev
->name
);
6874 * rbd_dev->header_rwsem must be locked for write and will be unlocked
6877 static int rbd_dev_device_setup(struct rbd_device
*rbd_dev
)
6881 /* Record our major and minor device numbers. */
6883 if (!single_major
) {
6884 ret
= register_blkdev(0, rbd_dev
->name
);
6886 goto err_out_unlock
;
6888 rbd_dev
->major
= ret
;
6891 rbd_dev
->major
= rbd_major
;
6892 rbd_dev
->minor
= rbd_dev_id_to_minor(rbd_dev
->dev_id
);
6895 /* Set up the blkdev mapping. */
6897 ret
= rbd_init_disk(rbd_dev
);
6899 goto err_out_blkdev
;
6901 set_capacity(rbd_dev
->disk
, rbd_dev
->mapping
.size
/ SECTOR_SIZE
);
6902 set_disk_ro(rbd_dev
->disk
, rbd_dev
->opts
->read_only
);
6904 ret
= dev_set_name(&rbd_dev
->dev
, "%d", rbd_dev
->dev_id
);
6908 set_bit(RBD_DEV_FLAG_EXISTS
, &rbd_dev
->flags
);
6909 up_write(&rbd_dev
->header_rwsem
);
6913 rbd_free_disk(rbd_dev
);
6916 unregister_blkdev(rbd_dev
->major
, rbd_dev
->name
);
6918 up_write(&rbd_dev
->header_rwsem
);
6922 static int rbd_dev_header_name(struct rbd_device
*rbd_dev
)
6924 struct rbd_spec
*spec
= rbd_dev
->spec
;
6927 /* Record the header object name for this rbd image. */
6929 rbd_assert(rbd_image_format_valid(rbd_dev
->image_format
));
6930 if (rbd_dev
->image_format
== 1)
6931 ret
= ceph_oid_aprintf(&rbd_dev
->header_oid
, GFP_KERNEL
, "%s%s",
6932 spec
->image_name
, RBD_SUFFIX
);
6934 ret
= ceph_oid_aprintf(&rbd_dev
->header_oid
, GFP_KERNEL
, "%s%s",
6935 RBD_HEADER_PREFIX
, spec
->image_id
);
6940 static void rbd_dev_image_release(struct rbd_device
*rbd_dev
)
6943 rbd_unregister_watch(rbd_dev
);
6945 rbd_dev_unprobe(rbd_dev
);
6946 rbd_dev
->image_format
= 0;
6947 kfree(rbd_dev
->spec
->image_id
);
6948 rbd_dev
->spec
->image_id
= NULL
;
6952 * Probe for the existence of the header object for the given rbd
6953 * device. If this image is the one being mapped (i.e., not a
6954 * parent), initiate a watch on its header object before using that
6955 * object to get detailed information about the rbd image.
6957 * On success, returns with header_rwsem held for write if called
6960 static int rbd_dev_image_probe(struct rbd_device
*rbd_dev
, int depth
)
6965 * Get the id from the image id object. Unless there's an
6966 * error, rbd_dev->spec->image_id will be filled in with
6967 * a dynamically-allocated string, and rbd_dev->image_format
6968 * will be set to either 1 or 2.
6970 ret
= rbd_dev_image_id(rbd_dev
);
6974 ret
= rbd_dev_header_name(rbd_dev
);
6976 goto err_out_format
;
6979 ret
= rbd_register_watch(rbd_dev
);
6982 pr_info("image %s/%s%s%s does not exist\n",
6983 rbd_dev
->spec
->pool_name
,
6984 rbd_dev
->spec
->pool_ns
?: "",
6985 rbd_dev
->spec
->pool_ns
? "/" : "",
6986 rbd_dev
->spec
->image_name
);
6987 goto err_out_format
;
6992 down_write(&rbd_dev
->header_rwsem
);
6994 ret
= rbd_dev_header_info(rbd_dev
);
6999 * If this image is the one being mapped, we have pool name and
7000 * id, image name and id, and snap name - need to fill snap id.
7001 * Otherwise this is a parent image, identified by pool, image
7002 * and snap ids - need to fill in names for those ids.
7005 ret
= rbd_spec_fill_snap_id(rbd_dev
);
7007 ret
= rbd_spec_fill_names(rbd_dev
);
7010 pr_info("snap %s/%s%s%s@%s does not exist\n",
7011 rbd_dev
->spec
->pool_name
,
7012 rbd_dev
->spec
->pool_ns
?: "",
7013 rbd_dev
->spec
->pool_ns
? "/" : "",
7014 rbd_dev
->spec
->image_name
,
7015 rbd_dev
->spec
->snap_name
);
7019 ret
= rbd_dev_mapping_set(rbd_dev
);
7023 if (rbd_dev
->spec
->snap_id
!= CEPH_NOSNAP
&&
7024 (rbd_dev
->header
.features
& RBD_FEATURE_OBJECT_MAP
)) {
7025 ret
= rbd_object_map_load(rbd_dev
);
7030 if (rbd_dev
->header
.features
& RBD_FEATURE_LAYERING
) {
7031 ret
= rbd_dev_v2_parent_info(rbd_dev
);
7036 ret
= rbd_dev_probe_parent(rbd_dev
, depth
);
7040 dout("discovered format %u image, header name is %s\n",
7041 rbd_dev
->image_format
, rbd_dev
->header_oid
.name
);
7046 up_write(&rbd_dev
->header_rwsem
);
7048 rbd_unregister_watch(rbd_dev
);
7049 rbd_dev_unprobe(rbd_dev
);
7051 rbd_dev
->image_format
= 0;
7052 kfree(rbd_dev
->spec
->image_id
);
7053 rbd_dev
->spec
->image_id
= NULL
;
7057 static ssize_t
do_rbd_add(struct bus_type
*bus
,
7061 struct rbd_device
*rbd_dev
= NULL
;
7062 struct ceph_options
*ceph_opts
= NULL
;
7063 struct rbd_options
*rbd_opts
= NULL
;
7064 struct rbd_spec
*spec
= NULL
;
7065 struct rbd_client
*rbdc
;
7068 if (!capable(CAP_SYS_ADMIN
))
7071 if (!try_module_get(THIS_MODULE
))
7074 /* parse add command */
7075 rc
= rbd_add_parse_args(buf
, &ceph_opts
, &rbd_opts
, &spec
);
7079 rbdc
= rbd_get_client(ceph_opts
);
7086 rc
= ceph_pg_poolid_by_name(rbdc
->client
->osdc
.osdmap
, spec
->pool_name
);
7089 pr_info("pool %s does not exist\n", spec
->pool_name
);
7090 goto err_out_client
;
7092 spec
->pool_id
= (u64
)rc
;
7094 rbd_dev
= rbd_dev_create(rbdc
, spec
, rbd_opts
);
7097 goto err_out_client
;
7099 rbdc
= NULL
; /* rbd_dev now owns this */
7100 spec
= NULL
; /* rbd_dev now owns this */
7101 rbd_opts
= NULL
; /* rbd_dev now owns this */
7103 rbd_dev
->config_info
= kstrdup(buf
, GFP_KERNEL
);
7104 if (!rbd_dev
->config_info
) {
7106 goto err_out_rbd_dev
;
7109 rc
= rbd_dev_image_probe(rbd_dev
, 0);
7111 goto err_out_rbd_dev
;
7113 /* If we are mapping a snapshot it must be marked read-only */
7114 if (rbd_dev
->spec
->snap_id
!= CEPH_NOSNAP
)
7115 rbd_dev
->opts
->read_only
= true;
7117 if (rbd_dev
->opts
->alloc_size
> rbd_dev
->layout
.object_size
) {
7118 rbd_warn(rbd_dev
, "alloc_size adjusted to %u",
7119 rbd_dev
->layout
.object_size
);
7120 rbd_dev
->opts
->alloc_size
= rbd_dev
->layout
.object_size
;
7123 rc
= rbd_dev_device_setup(rbd_dev
);
7125 goto err_out_image_probe
;
7127 rc
= rbd_add_acquire_lock(rbd_dev
);
7129 goto err_out_image_lock
;
7131 /* Everything's ready. Announce the disk to the world. */
7133 rc
= device_add(&rbd_dev
->dev
);
7135 goto err_out_image_lock
;
7137 add_disk(rbd_dev
->disk
);
7138 /* see rbd_init_disk() */
7139 blk_put_queue(rbd_dev
->disk
->queue
);
7141 spin_lock(&rbd_dev_list_lock
);
7142 list_add_tail(&rbd_dev
->node
, &rbd_dev_list
);
7143 spin_unlock(&rbd_dev_list_lock
);
7145 pr_info("%s: capacity %llu features 0x%llx\n", rbd_dev
->disk
->disk_name
,
7146 (unsigned long long)get_capacity(rbd_dev
->disk
) << SECTOR_SHIFT
,
7147 rbd_dev
->header
.features
);
7150 module_put(THIS_MODULE
);
7154 rbd_dev_image_unlock(rbd_dev
);
7155 rbd_dev_device_release(rbd_dev
);
7156 err_out_image_probe
:
7157 rbd_dev_image_release(rbd_dev
);
7159 rbd_dev_destroy(rbd_dev
);
7161 rbd_put_client(rbdc
);
7168 static ssize_t
add_store(struct bus_type
*bus
, const char *buf
, size_t count
)
7173 return do_rbd_add(bus
, buf
, count
);
7176 static ssize_t
add_single_major_store(struct bus_type
*bus
, const char *buf
,
7179 return do_rbd_add(bus
, buf
, count
);
7182 static void rbd_dev_remove_parent(struct rbd_device
*rbd_dev
)
7184 while (rbd_dev
->parent
) {
7185 struct rbd_device
*first
= rbd_dev
;
7186 struct rbd_device
*second
= first
->parent
;
7187 struct rbd_device
*third
;
7190 * Follow to the parent with no grandparent and
7193 while (second
&& (third
= second
->parent
)) {
7198 rbd_dev_image_release(second
);
7199 rbd_dev_destroy(second
);
7200 first
->parent
= NULL
;
7201 first
->parent_overlap
= 0;
7203 rbd_assert(first
->parent_spec
);
7204 rbd_spec_put(first
->parent_spec
);
7205 first
->parent_spec
= NULL
;
7209 static ssize_t
do_rbd_remove(struct bus_type
*bus
,
7213 struct rbd_device
*rbd_dev
= NULL
;
7214 struct list_head
*tmp
;
7220 if (!capable(CAP_SYS_ADMIN
))
7225 sscanf(buf
, "%d %5s", &dev_id
, opt_buf
);
7227 pr_err("dev_id out of range\n");
7230 if (opt_buf
[0] != '\0') {
7231 if (!strcmp(opt_buf
, "force")) {
7234 pr_err("bad remove option at '%s'\n", opt_buf
);
7240 spin_lock(&rbd_dev_list_lock
);
7241 list_for_each(tmp
, &rbd_dev_list
) {
7242 rbd_dev
= list_entry(tmp
, struct rbd_device
, node
);
7243 if (rbd_dev
->dev_id
== dev_id
) {
7249 spin_lock_irq(&rbd_dev
->lock
);
7250 if (rbd_dev
->open_count
&& !force
)
7252 else if (test_and_set_bit(RBD_DEV_FLAG_REMOVING
,
7255 spin_unlock_irq(&rbd_dev
->lock
);
7257 spin_unlock(&rbd_dev_list_lock
);
7263 * Prevent new IO from being queued and wait for existing
7264 * IO to complete/fail.
7266 blk_mq_freeze_queue(rbd_dev
->disk
->queue
);
7267 blk_set_queue_dying(rbd_dev
->disk
->queue
);
7270 del_gendisk(rbd_dev
->disk
);
7271 spin_lock(&rbd_dev_list_lock
);
7272 list_del_init(&rbd_dev
->node
);
7273 spin_unlock(&rbd_dev_list_lock
);
7274 device_del(&rbd_dev
->dev
);
7276 rbd_dev_image_unlock(rbd_dev
);
7277 rbd_dev_device_release(rbd_dev
);
7278 rbd_dev_image_release(rbd_dev
);
7279 rbd_dev_destroy(rbd_dev
);
7283 static ssize_t
remove_store(struct bus_type
*bus
, const char *buf
, size_t count
)
7288 return do_rbd_remove(bus
, buf
, count
);
7291 static ssize_t
remove_single_major_store(struct bus_type
*bus
, const char *buf
,
7294 return do_rbd_remove(bus
, buf
, count
);
7298 * create control files in sysfs
7301 static int __init
rbd_sysfs_init(void)
7305 ret
= device_register(&rbd_root_dev
);
7309 ret
= bus_register(&rbd_bus_type
);
7311 device_unregister(&rbd_root_dev
);
7316 static void __exit
rbd_sysfs_cleanup(void)
7318 bus_unregister(&rbd_bus_type
);
7319 device_unregister(&rbd_root_dev
);
7322 static int __init
rbd_slab_init(void)
7324 rbd_assert(!rbd_img_request_cache
);
7325 rbd_img_request_cache
= KMEM_CACHE(rbd_img_request
, 0);
7326 if (!rbd_img_request_cache
)
7329 rbd_assert(!rbd_obj_request_cache
);
7330 rbd_obj_request_cache
= KMEM_CACHE(rbd_obj_request
, 0);
7331 if (!rbd_obj_request_cache
)
7337 kmem_cache_destroy(rbd_img_request_cache
);
7338 rbd_img_request_cache
= NULL
;
7342 static void rbd_slab_exit(void)
7344 rbd_assert(rbd_obj_request_cache
);
7345 kmem_cache_destroy(rbd_obj_request_cache
);
7346 rbd_obj_request_cache
= NULL
;
7348 rbd_assert(rbd_img_request_cache
);
7349 kmem_cache_destroy(rbd_img_request_cache
);
7350 rbd_img_request_cache
= NULL
;
7353 static int __init
rbd_init(void)
7357 if (!libceph_compatible(NULL
)) {
7358 rbd_warn(NULL
, "libceph incompatibility (quitting)");
7362 rc
= rbd_slab_init();
7367 * The number of active work items is limited by the number of
7368 * rbd devices * queue depth, so leave @max_active at default.
7370 rbd_wq
= alloc_workqueue(RBD_DRV_NAME
, WQ_MEM_RECLAIM
, 0);
7377 rbd_major
= register_blkdev(0, RBD_DRV_NAME
);
7378 if (rbd_major
< 0) {
7384 rc
= rbd_sysfs_init();
7386 goto err_out_blkdev
;
7389 pr_info("loaded (major %d)\n", rbd_major
);
7391 pr_info("loaded\n");
7397 unregister_blkdev(rbd_major
, RBD_DRV_NAME
);
7399 destroy_workqueue(rbd_wq
);
7405 static void __exit
rbd_exit(void)
7407 ida_destroy(&rbd_dev_id_ida
);
7408 rbd_sysfs_cleanup();
7410 unregister_blkdev(rbd_major
, RBD_DRV_NAME
);
7411 destroy_workqueue(rbd_wq
);
7415 module_init(rbd_init
);
7416 module_exit(rbd_exit
);
7418 MODULE_AUTHOR("Alex Elder <elder@inktank.com>");
7419 MODULE_AUTHOR("Sage Weil <sage@newdream.net>");
7420 MODULE_AUTHOR("Yehuda Sadeh <yehuda@hq.newdream.net>");
7421 /* following authorship retained from original osdblk.c */
7422 MODULE_AUTHOR("Jeff Garzik <jeff@garzik.org>");
7424 MODULE_DESCRIPTION("RADOS Block Device (RBD) driver");
7425 MODULE_LICENSE("GPL");