3 rbd.c -- Export ceph rados objects as a Linux block device
6 based on drivers/block/osdblk.c:
8 Copyright 2009 Red Hat, Inc.
10 This program is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation.
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with this program; see the file COPYING. If not, write to
21 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
25 For usage instructions, please refer to:
27 Documentation/ABI/testing/sysfs-bus-rbd
31 #include <linux/ceph/libceph.h>
32 #include <linux/ceph/osd_client.h>
33 #include <linux/ceph/mon_client.h>
34 #include <linux/ceph/cls_lock_client.h>
35 #include <linux/ceph/striper.h>
36 #include <linux/ceph/decode.h>
37 #include <linux/fs_parser.h>
38 #include <linux/bsearch.h>
40 #include <linux/kernel.h>
41 #include <linux/device.h>
42 #include <linux/module.h>
43 #include <linux/blk-mq.h>
45 #include <linux/blkdev.h>
46 #include <linux/slab.h>
47 #include <linux/idr.h>
48 #include <linux/workqueue.h>
50 #include "rbd_types.h"
52 #define RBD_DEBUG /* Activate rbd_assert() calls */
55 * Increment the given counter and return its updated value.
56 * If the counter is already 0 it will not be incremented.
57 * If the counter is already at its maximum value returns
58 * -EINVAL without updating it.
60 static int atomic_inc_return_safe(atomic_t
*v
)
64 counter
= (unsigned int)atomic_fetch_add_unless(v
, 1, 0);
65 if (counter
<= (unsigned int)INT_MAX
)
73 /* Decrement the counter. Return the resulting value, or -EINVAL */
74 static int atomic_dec_return_safe(atomic_t
*v
)
78 counter
= atomic_dec_return(v
);
87 #define RBD_DRV_NAME "rbd"
89 #define RBD_MINORS_PER_MAJOR 256
90 #define RBD_SINGLE_MAJOR_PART_SHIFT 4
92 #define RBD_MAX_PARENT_CHAIN_LEN 16
94 #define RBD_SNAP_DEV_NAME_PREFIX "snap_"
95 #define RBD_MAX_SNAP_NAME_LEN \
96 (NAME_MAX - (sizeof (RBD_SNAP_DEV_NAME_PREFIX) - 1))
98 #define RBD_MAX_SNAP_COUNT 510 /* allows max snapc to fit in 4KB */
100 #define RBD_SNAP_HEAD_NAME "-"
102 #define BAD_SNAP_INDEX U32_MAX /* invalid index into snap array */
104 /* This allows a single page to hold an image name sent by OSD */
105 #define RBD_IMAGE_NAME_LEN_MAX (PAGE_SIZE - sizeof (__le32) - 1)
106 #define RBD_IMAGE_ID_LEN_MAX 64
108 #define RBD_OBJ_PREFIX_LEN_MAX 64
110 #define RBD_NOTIFY_TIMEOUT 5 /* seconds */
111 #define RBD_RETRY_DELAY msecs_to_jiffies(1000)
115 #define RBD_FEATURE_LAYERING (1ULL<<0)
116 #define RBD_FEATURE_STRIPINGV2 (1ULL<<1)
117 #define RBD_FEATURE_EXCLUSIVE_LOCK (1ULL<<2)
118 #define RBD_FEATURE_OBJECT_MAP (1ULL<<3)
119 #define RBD_FEATURE_FAST_DIFF (1ULL<<4)
120 #define RBD_FEATURE_DEEP_FLATTEN (1ULL<<5)
121 #define RBD_FEATURE_DATA_POOL (1ULL<<7)
122 #define RBD_FEATURE_OPERATIONS (1ULL<<8)
124 #define RBD_FEATURES_ALL (RBD_FEATURE_LAYERING | \
125 RBD_FEATURE_STRIPINGV2 | \
126 RBD_FEATURE_EXCLUSIVE_LOCK | \
127 RBD_FEATURE_OBJECT_MAP | \
128 RBD_FEATURE_FAST_DIFF | \
129 RBD_FEATURE_DEEP_FLATTEN | \
130 RBD_FEATURE_DATA_POOL | \
131 RBD_FEATURE_OPERATIONS)
133 /* Features supported by this (client software) implementation. */
135 #define RBD_FEATURES_SUPPORTED (RBD_FEATURES_ALL)
138 * An RBD device name will be "rbd#", where the "rbd" comes from
139 * RBD_DRV_NAME above, and # is a unique integer identifier.
141 #define DEV_NAME_LEN 32
144 * block device image metadata (in-memory version)
146 struct rbd_image_header
{
147 /* These six fields never change for a given rbd image */
153 u64 features
; /* Might be changeable someday? */
155 /* The remaining fields need to be updated occasionally */
157 struct ceph_snap_context
*snapc
;
158 char *snap_names
; /* format 1 only */
159 u64
*snap_sizes
; /* format 1 only */
163 * An rbd image specification.
165 * The tuple (pool_id, image_id, snap_id) is sufficient to uniquely
166 * identify an image. Each rbd_dev structure includes a pointer to
167 * an rbd_spec structure that encapsulates this identity.
169 * Each of the id's in an rbd_spec has an associated name. For a
170 * user-mapped image, the names are supplied and the id's associated
171 * with them are looked up. For a layered image, a parent image is
172 * defined by the tuple, and the names are looked up.
174 * An rbd_dev structure contains a parent_spec pointer which is
175 * non-null if the image it represents is a child in a layered
176 * image. This pointer will refer to the rbd_spec structure used
177 * by the parent rbd_dev for its own identity (i.e., the structure
178 * is shared between the parent and child).
180 * Since these structures are populated once, during the discovery
181 * phase of image construction, they are effectively immutable so
182 * we make no effort to synchronize access to them.
184 * Note that code herein does not assume the image name is known (it
185 * could be a null pointer).
189 const char *pool_name
;
190 const char *pool_ns
; /* NULL if default, never "" */
192 const char *image_id
;
193 const char *image_name
;
196 const char *snap_name
;
202 * an instance of the client. multiple devices may share an rbd client.
205 struct ceph_client
*client
;
207 struct list_head node
;
210 struct pending_result
{
211 int result
; /* first nonzero result */
215 struct rbd_img_request
;
217 enum obj_request_type
{
218 OBJ_REQUEST_NODATA
= 1,
219 OBJ_REQUEST_BIO
, /* pointer into provided bio (list) */
220 OBJ_REQUEST_BVECS
, /* pointer into provided bio_vec array */
221 OBJ_REQUEST_OWN_BVECS
, /* private bio_vec array, doesn't own pages */
224 enum obj_operation_type
{
231 #define RBD_OBJ_FLAG_DELETION (1U << 0)
232 #define RBD_OBJ_FLAG_COPYUP_ENABLED (1U << 1)
233 #define RBD_OBJ_FLAG_COPYUP_ZEROS (1U << 2)
234 #define RBD_OBJ_FLAG_MAY_EXIST (1U << 3)
235 #define RBD_OBJ_FLAG_NOOP_FOR_NONEXISTENT (1U << 4)
237 enum rbd_obj_read_state
{
238 RBD_OBJ_READ_START
= 1,
244 * Writes go through the following state machine to deal with
247 * . . . . . RBD_OBJ_WRITE_GUARD. . . . . . . . . . . . . .
250 * . RBD_OBJ_WRITE_READ_FROM_PARENT. . . .
252 * . v v (deep-copyup .
253 * (image . RBD_OBJ_WRITE_COPYUP_EMPTY_SNAPC . not needed) .
256 * . . . .RBD_OBJ_WRITE_COPYUP_OPS. . . . . (copyup .
259 * done . . . . . . . . . . . . . . . . . .
264 * Writes start in RBD_OBJ_WRITE_GUARD or _FLAT, depending on whether
265 * assert_exists guard is needed or not (in some cases it's not needed
266 * even if there is a parent).
268 enum rbd_obj_write_state
{
269 RBD_OBJ_WRITE_START
= 1,
270 RBD_OBJ_WRITE_PRE_OBJECT_MAP
,
271 RBD_OBJ_WRITE_OBJECT
,
272 __RBD_OBJ_WRITE_COPYUP
,
273 RBD_OBJ_WRITE_COPYUP
,
274 RBD_OBJ_WRITE_POST_OBJECT_MAP
,
277 enum rbd_obj_copyup_state
{
278 RBD_OBJ_COPYUP_START
= 1,
279 RBD_OBJ_COPYUP_READ_PARENT
,
280 __RBD_OBJ_COPYUP_OBJECT_MAPS
,
281 RBD_OBJ_COPYUP_OBJECT_MAPS
,
282 __RBD_OBJ_COPYUP_WRITE_OBJECT
,
283 RBD_OBJ_COPYUP_WRITE_OBJECT
,
286 struct rbd_obj_request
{
287 struct ceph_object_extent ex
;
288 unsigned int flags
; /* RBD_OBJ_FLAG_* */
290 enum rbd_obj_read_state read_state
; /* for reads */
291 enum rbd_obj_write_state write_state
; /* for writes */
294 struct rbd_img_request
*img_request
;
295 struct ceph_file_extent
*img_extents
;
299 struct ceph_bio_iter bio_pos
;
301 struct ceph_bvec_iter bvec_pos
;
307 enum rbd_obj_copyup_state copyup_state
;
308 struct bio_vec
*copyup_bvecs
;
309 u32 copyup_bvec_count
;
311 struct list_head osd_reqs
; /* w/ r_private_item */
313 struct mutex state_mutex
;
314 struct pending_result pending
;
319 IMG_REQ_CHILD
, /* initiator: block = 0, child image = 1 */
320 IMG_REQ_LAYERED
, /* ENOENT handling: normal = 0, layered = 1 */
325 RBD_IMG_EXCLUSIVE_LOCK
,
326 __RBD_IMG_OBJECT_REQUESTS
,
327 RBD_IMG_OBJECT_REQUESTS
,
330 struct rbd_img_request
{
331 struct rbd_device
*rbd_dev
;
332 enum obj_operation_type op_type
;
333 enum obj_request_type data_type
;
335 enum rbd_img_state state
;
337 u64 snap_id
; /* for reads */
338 struct ceph_snap_context
*snapc
; /* for writes */
340 struct rbd_obj_request
*obj_request
; /* obj req initiator */
342 struct list_head lock_item
;
343 struct list_head object_extents
; /* obj_req.ex structs */
345 struct mutex state_mutex
;
346 struct pending_result pending
;
347 struct work_struct work
;
351 #define for_each_obj_request(ireq, oreq) \
352 list_for_each_entry(oreq, &(ireq)->object_extents, ex.oe_item)
353 #define for_each_obj_request_safe(ireq, oreq, n) \
354 list_for_each_entry_safe(oreq, n, &(ireq)->object_extents, ex.oe_item)
356 enum rbd_watch_state
{
357 RBD_WATCH_STATE_UNREGISTERED
,
358 RBD_WATCH_STATE_REGISTERED
,
359 RBD_WATCH_STATE_ERROR
,
362 enum rbd_lock_state
{
363 RBD_LOCK_STATE_UNLOCKED
,
364 RBD_LOCK_STATE_LOCKED
,
365 RBD_LOCK_STATE_RELEASING
,
368 /* WatchNotify::ClientId */
369 struct rbd_client_id
{
382 int dev_id
; /* blkdev unique id */
384 int major
; /* blkdev assigned major */
386 struct gendisk
*disk
; /* blkdev's gendisk and rq */
388 u32 image_format
; /* Either 1 or 2 */
389 struct rbd_client
*rbd_client
;
391 char name
[DEV_NAME_LEN
]; /* blkdev name, e.g. rbd3 */
393 spinlock_t lock
; /* queue, flags, open_count */
395 struct rbd_image_header header
;
396 unsigned long flags
; /* possibly lock protected */
397 struct rbd_spec
*spec
;
398 struct rbd_options
*opts
;
399 char *config_info
; /* add{,_single_major} string */
401 struct ceph_object_id header_oid
;
402 struct ceph_object_locator header_oloc
;
404 struct ceph_file_layout layout
; /* used for all rbd requests */
406 struct mutex watch_mutex
;
407 enum rbd_watch_state watch_state
;
408 struct ceph_osd_linger_request
*watch_handle
;
410 struct delayed_work watch_dwork
;
412 struct rw_semaphore lock_rwsem
;
413 enum rbd_lock_state lock_state
;
414 char lock_cookie
[32];
415 struct rbd_client_id owner_cid
;
416 struct work_struct acquired_lock_work
;
417 struct work_struct released_lock_work
;
418 struct delayed_work lock_dwork
;
419 struct work_struct unlock_work
;
420 spinlock_t lock_lists_lock
;
421 struct list_head acquiring_list
;
422 struct list_head running_list
;
423 struct completion acquire_wait
;
425 struct completion releasing_wait
;
427 spinlock_t object_map_lock
;
429 u64 object_map_size
; /* in objects */
430 u64 object_map_flags
;
432 struct workqueue_struct
*task_wq
;
434 struct rbd_spec
*parent_spec
;
437 struct rbd_device
*parent
;
439 /* Block layer tags. */
440 struct blk_mq_tag_set tag_set
;
442 /* protects updating the header */
443 struct rw_semaphore header_rwsem
;
445 struct rbd_mapping mapping
;
447 struct list_head node
;
451 unsigned long open_count
; /* protected by lock */
455 * Flag bits for rbd_dev->flags:
456 * - REMOVING (which is coupled with rbd_dev->open_count) is protected
460 RBD_DEV_FLAG_EXISTS
, /* rbd_dev_device_setup() ran */
461 RBD_DEV_FLAG_REMOVING
, /* this mapping is being removed */
462 RBD_DEV_FLAG_READONLY
, /* -o ro or snapshot */
465 static DEFINE_MUTEX(client_mutex
); /* Serialize client creation */
467 static LIST_HEAD(rbd_dev_list
); /* devices */
468 static DEFINE_SPINLOCK(rbd_dev_list_lock
);
470 static LIST_HEAD(rbd_client_list
); /* clients */
471 static DEFINE_SPINLOCK(rbd_client_list_lock
);
473 /* Slab caches for frequently-allocated structures */
475 static struct kmem_cache
*rbd_img_request_cache
;
476 static struct kmem_cache
*rbd_obj_request_cache
;
478 static int rbd_major
;
479 static DEFINE_IDA(rbd_dev_id_ida
);
481 static struct workqueue_struct
*rbd_wq
;
483 static struct ceph_snap_context rbd_empty_snapc
= {
484 .nref
= REFCOUNT_INIT(1),
488 * single-major requires >= 0.75 version of userspace rbd utility.
490 static bool single_major
= true;
491 module_param(single_major
, bool, 0444);
492 MODULE_PARM_DESC(single_major
, "Use a single major number for all rbd devices (default: true)");
494 static ssize_t
add_store(struct bus_type
*bus
, const char *buf
, size_t count
);
495 static ssize_t
remove_store(struct bus_type
*bus
, const char *buf
,
497 static ssize_t
add_single_major_store(struct bus_type
*bus
, const char *buf
,
499 static ssize_t
remove_single_major_store(struct bus_type
*bus
, const char *buf
,
501 static int rbd_dev_image_probe(struct rbd_device
*rbd_dev
, int depth
);
503 static int rbd_dev_id_to_minor(int dev_id
)
505 return dev_id
<< RBD_SINGLE_MAJOR_PART_SHIFT
;
508 static int minor_to_rbd_dev_id(int minor
)
510 return minor
>> RBD_SINGLE_MAJOR_PART_SHIFT
;
513 static bool rbd_is_ro(struct rbd_device
*rbd_dev
)
515 return test_bit(RBD_DEV_FLAG_READONLY
, &rbd_dev
->flags
);
518 static bool rbd_is_snap(struct rbd_device
*rbd_dev
)
520 return rbd_dev
->spec
->snap_id
!= CEPH_NOSNAP
;
523 static bool __rbd_is_lock_owner(struct rbd_device
*rbd_dev
)
525 lockdep_assert_held(&rbd_dev
->lock_rwsem
);
527 return rbd_dev
->lock_state
== RBD_LOCK_STATE_LOCKED
||
528 rbd_dev
->lock_state
== RBD_LOCK_STATE_RELEASING
;
531 static bool rbd_is_lock_owner(struct rbd_device
*rbd_dev
)
535 down_read(&rbd_dev
->lock_rwsem
);
536 is_lock_owner
= __rbd_is_lock_owner(rbd_dev
);
537 up_read(&rbd_dev
->lock_rwsem
);
538 return is_lock_owner
;
541 static ssize_t
supported_features_show(struct bus_type
*bus
, char *buf
)
543 return sprintf(buf
, "0x%llx\n", RBD_FEATURES_SUPPORTED
);
546 static BUS_ATTR_WO(add
);
547 static BUS_ATTR_WO(remove
);
548 static BUS_ATTR_WO(add_single_major
);
549 static BUS_ATTR_WO(remove_single_major
);
550 static BUS_ATTR_RO(supported_features
);
552 static struct attribute
*rbd_bus_attrs
[] = {
554 &bus_attr_remove
.attr
,
555 &bus_attr_add_single_major
.attr
,
556 &bus_attr_remove_single_major
.attr
,
557 &bus_attr_supported_features
.attr
,
561 static umode_t
rbd_bus_is_visible(struct kobject
*kobj
,
562 struct attribute
*attr
, int index
)
565 (attr
== &bus_attr_add_single_major
.attr
||
566 attr
== &bus_attr_remove_single_major
.attr
))
572 static const struct attribute_group rbd_bus_group
= {
573 .attrs
= rbd_bus_attrs
,
574 .is_visible
= rbd_bus_is_visible
,
576 __ATTRIBUTE_GROUPS(rbd_bus
);
578 static struct bus_type rbd_bus_type
= {
580 .bus_groups
= rbd_bus_groups
,
583 static void rbd_root_dev_release(struct device
*dev
)
587 static struct device rbd_root_dev
= {
589 .release
= rbd_root_dev_release
,
592 static __printf(2, 3)
593 void rbd_warn(struct rbd_device
*rbd_dev
, const char *fmt
, ...)
595 struct va_format vaf
;
603 printk(KERN_WARNING
"%s: %pV\n", RBD_DRV_NAME
, &vaf
);
604 else if (rbd_dev
->disk
)
605 printk(KERN_WARNING
"%s: %s: %pV\n",
606 RBD_DRV_NAME
, rbd_dev
->disk
->disk_name
, &vaf
);
607 else if (rbd_dev
->spec
&& rbd_dev
->spec
->image_name
)
608 printk(KERN_WARNING
"%s: image %s: %pV\n",
609 RBD_DRV_NAME
, rbd_dev
->spec
->image_name
, &vaf
);
610 else if (rbd_dev
->spec
&& rbd_dev
->spec
->image_id
)
611 printk(KERN_WARNING
"%s: id %s: %pV\n",
612 RBD_DRV_NAME
, rbd_dev
->spec
->image_id
, &vaf
);
614 printk(KERN_WARNING
"%s: rbd_dev %p: %pV\n",
615 RBD_DRV_NAME
, rbd_dev
, &vaf
);
620 #define rbd_assert(expr) \
621 if (unlikely(!(expr))) { \
622 printk(KERN_ERR "\nAssertion failure in %s() " \
624 "\trbd_assert(%s);\n\n", \
625 __func__, __LINE__, #expr); \
628 #else /* !RBD_DEBUG */
629 # define rbd_assert(expr) ((void) 0)
630 #endif /* !RBD_DEBUG */
632 static void rbd_dev_remove_parent(struct rbd_device
*rbd_dev
);
634 static int rbd_dev_refresh(struct rbd_device
*rbd_dev
);
635 static int rbd_dev_v2_header_onetime(struct rbd_device
*rbd_dev
);
636 static int rbd_dev_header_info(struct rbd_device
*rbd_dev
);
637 static int rbd_dev_v2_parent_info(struct rbd_device
*rbd_dev
);
638 static const char *rbd_dev_v2_snap_name(struct rbd_device
*rbd_dev
,
640 static int _rbd_dev_v2_snap_size(struct rbd_device
*rbd_dev
, u64 snap_id
,
641 u8
*order
, u64
*snap_size
);
642 static int rbd_dev_v2_get_flags(struct rbd_device
*rbd_dev
);
644 static void rbd_obj_handle_request(struct rbd_obj_request
*obj_req
, int result
);
645 static void rbd_img_handle_request(struct rbd_img_request
*img_req
, int result
);
648 * Return true if nothing else is pending.
650 static bool pending_result_dec(struct pending_result
*pending
, int *result
)
652 rbd_assert(pending
->num_pending
> 0);
654 if (*result
&& !pending
->result
)
655 pending
->result
= *result
;
656 if (--pending
->num_pending
)
659 *result
= pending
->result
;
663 static int rbd_open(struct block_device
*bdev
, fmode_t mode
)
665 struct rbd_device
*rbd_dev
= bdev
->bd_disk
->private_data
;
666 bool removing
= false;
668 spin_lock_irq(&rbd_dev
->lock
);
669 if (test_bit(RBD_DEV_FLAG_REMOVING
, &rbd_dev
->flags
))
672 rbd_dev
->open_count
++;
673 spin_unlock_irq(&rbd_dev
->lock
);
677 (void) get_device(&rbd_dev
->dev
);
682 static void rbd_release(struct gendisk
*disk
, fmode_t mode
)
684 struct rbd_device
*rbd_dev
= disk
->private_data
;
685 unsigned long open_count_before
;
687 spin_lock_irq(&rbd_dev
->lock
);
688 open_count_before
= rbd_dev
->open_count
--;
689 spin_unlock_irq(&rbd_dev
->lock
);
690 rbd_assert(open_count_before
> 0);
692 put_device(&rbd_dev
->dev
);
695 static int rbd_ioctl_set_ro(struct rbd_device
*rbd_dev
, unsigned long arg
)
699 if (get_user(ro
, (int __user
*)arg
))
703 * Both images mapped read-only and snapshots can't be marked
707 if (rbd_is_ro(rbd_dev
))
710 rbd_assert(!rbd_is_snap(rbd_dev
));
713 /* Let blkdev_roset() handle it */
717 static int rbd_ioctl(struct block_device
*bdev
, fmode_t mode
,
718 unsigned int cmd
, unsigned long arg
)
720 struct rbd_device
*rbd_dev
= bdev
->bd_disk
->private_data
;
725 ret
= rbd_ioctl_set_ro(rbd_dev
, arg
);
735 static int rbd_compat_ioctl(struct block_device
*bdev
, fmode_t mode
,
736 unsigned int cmd
, unsigned long arg
)
738 return rbd_ioctl(bdev
, mode
, cmd
, arg
);
740 #endif /* CONFIG_COMPAT */
742 static const struct block_device_operations rbd_bd_ops
= {
743 .owner
= THIS_MODULE
,
745 .release
= rbd_release
,
748 .compat_ioctl
= rbd_compat_ioctl
,
753 * Initialize an rbd client instance. Success or not, this function
754 * consumes ceph_opts. Caller holds client_mutex.
756 static struct rbd_client
*rbd_client_create(struct ceph_options
*ceph_opts
)
758 struct rbd_client
*rbdc
;
761 dout("%s:\n", __func__
);
762 rbdc
= kmalloc(sizeof(struct rbd_client
), GFP_KERNEL
);
766 kref_init(&rbdc
->kref
);
767 INIT_LIST_HEAD(&rbdc
->node
);
769 rbdc
->client
= ceph_create_client(ceph_opts
, rbdc
);
770 if (IS_ERR(rbdc
->client
))
772 ceph_opts
= NULL
; /* Now rbdc->client is responsible for ceph_opts */
774 ret
= ceph_open_session(rbdc
->client
);
778 spin_lock(&rbd_client_list_lock
);
779 list_add_tail(&rbdc
->node
, &rbd_client_list
);
780 spin_unlock(&rbd_client_list_lock
);
782 dout("%s: rbdc %p\n", __func__
, rbdc
);
786 ceph_destroy_client(rbdc
->client
);
791 ceph_destroy_options(ceph_opts
);
792 dout("%s: error %d\n", __func__
, ret
);
797 static struct rbd_client
*__rbd_get_client(struct rbd_client
*rbdc
)
799 kref_get(&rbdc
->kref
);
805 * Find a ceph client with specific addr and configuration. If
806 * found, bump its reference count.
808 static struct rbd_client
*rbd_client_find(struct ceph_options
*ceph_opts
)
810 struct rbd_client
*client_node
;
813 if (ceph_opts
->flags
& CEPH_OPT_NOSHARE
)
816 spin_lock(&rbd_client_list_lock
);
817 list_for_each_entry(client_node
, &rbd_client_list
, node
) {
818 if (!ceph_compare_options(ceph_opts
, client_node
->client
)) {
819 __rbd_get_client(client_node
);
825 spin_unlock(&rbd_client_list_lock
);
827 return found
? client_node
: NULL
;
831 * (Per device) rbd map options
839 /* string args above */
847 static const struct fs_parameter_spec rbd_parameters
[] = {
848 fsparam_u32 ("alloc_size", Opt_alloc_size
),
849 fsparam_flag ("exclusive", Opt_exclusive
),
850 fsparam_flag ("lock_on_read", Opt_lock_on_read
),
851 fsparam_u32 ("lock_timeout", Opt_lock_timeout
),
852 fsparam_flag ("notrim", Opt_notrim
),
853 fsparam_string ("_pool_ns", Opt_pool_ns
),
854 fsparam_u32 ("queue_depth", Opt_queue_depth
),
855 fsparam_flag ("read_only", Opt_read_only
),
856 fsparam_flag ("read_write", Opt_read_write
),
857 fsparam_flag ("ro", Opt_read_only
),
858 fsparam_flag ("rw", Opt_read_write
),
865 unsigned long lock_timeout
;
872 #define RBD_QUEUE_DEPTH_DEFAULT BLKDEV_MAX_RQ
873 #define RBD_ALLOC_SIZE_DEFAULT (64 * 1024)
874 #define RBD_LOCK_TIMEOUT_DEFAULT 0 /* no timeout */
875 #define RBD_READ_ONLY_DEFAULT false
876 #define RBD_LOCK_ON_READ_DEFAULT false
877 #define RBD_EXCLUSIVE_DEFAULT false
878 #define RBD_TRIM_DEFAULT true
880 struct rbd_parse_opts_ctx
{
881 struct rbd_spec
*spec
;
882 struct ceph_options
*copts
;
883 struct rbd_options
*opts
;
886 static char* obj_op_name(enum obj_operation_type op_type
)
903 * Destroy ceph client
905 * Caller must hold rbd_client_list_lock.
907 static void rbd_client_release(struct kref
*kref
)
909 struct rbd_client
*rbdc
= container_of(kref
, struct rbd_client
, kref
);
911 dout("%s: rbdc %p\n", __func__
, rbdc
);
912 spin_lock(&rbd_client_list_lock
);
913 list_del(&rbdc
->node
);
914 spin_unlock(&rbd_client_list_lock
);
916 ceph_destroy_client(rbdc
->client
);
921 * Drop reference to ceph client node. If it's not referenced anymore, release
924 static void rbd_put_client(struct rbd_client
*rbdc
)
927 kref_put(&rbdc
->kref
, rbd_client_release
);
931 * Get a ceph client with specific addr and configuration, if one does
932 * not exist create it. Either way, ceph_opts is consumed by this
935 static struct rbd_client
*rbd_get_client(struct ceph_options
*ceph_opts
)
937 struct rbd_client
*rbdc
;
940 mutex_lock(&client_mutex
);
941 rbdc
= rbd_client_find(ceph_opts
);
943 ceph_destroy_options(ceph_opts
);
946 * Using an existing client. Make sure ->pg_pools is up to
947 * date before we look up the pool id in do_rbd_add().
949 ret
= ceph_wait_for_latest_osdmap(rbdc
->client
,
950 rbdc
->client
->options
->mount_timeout
);
952 rbd_warn(NULL
, "failed to get latest osdmap: %d", ret
);
953 rbd_put_client(rbdc
);
957 rbdc
= rbd_client_create(ceph_opts
);
959 mutex_unlock(&client_mutex
);
964 static bool rbd_image_format_valid(u32 image_format
)
966 return image_format
== 1 || image_format
== 2;
969 static bool rbd_dev_ondisk_valid(struct rbd_image_header_ondisk
*ondisk
)
974 /* The header has to start with the magic rbd header text */
975 if (memcmp(&ondisk
->text
, RBD_HEADER_TEXT
, sizeof (RBD_HEADER_TEXT
)))
978 /* The bio layer requires at least sector-sized I/O */
980 if (ondisk
->options
.order
< SECTOR_SHIFT
)
983 /* If we use u64 in a few spots we may be able to loosen this */
985 if (ondisk
->options
.order
> 8 * sizeof (int) - 1)
989 * The size of a snapshot header has to fit in a size_t, and
990 * that limits the number of snapshots.
992 snap_count
= le32_to_cpu(ondisk
->snap_count
);
993 size
= SIZE_MAX
- sizeof (struct ceph_snap_context
);
994 if (snap_count
> size
/ sizeof (__le64
))
998 * Not only that, but the size of the entire the snapshot
999 * header must also be representable in a size_t.
1001 size
-= snap_count
* sizeof (__le64
);
1002 if ((u64
) size
< le64_to_cpu(ondisk
->snap_names_len
))
1009 * returns the size of an object in the image
1011 static u32
rbd_obj_bytes(struct rbd_image_header
*header
)
1013 return 1U << header
->obj_order
;
1016 static void rbd_init_layout(struct rbd_device
*rbd_dev
)
1018 if (rbd_dev
->header
.stripe_unit
== 0 ||
1019 rbd_dev
->header
.stripe_count
== 0) {
1020 rbd_dev
->header
.stripe_unit
= rbd_obj_bytes(&rbd_dev
->header
);
1021 rbd_dev
->header
.stripe_count
= 1;
1024 rbd_dev
->layout
.stripe_unit
= rbd_dev
->header
.stripe_unit
;
1025 rbd_dev
->layout
.stripe_count
= rbd_dev
->header
.stripe_count
;
1026 rbd_dev
->layout
.object_size
= rbd_obj_bytes(&rbd_dev
->header
);
1027 rbd_dev
->layout
.pool_id
= rbd_dev
->header
.data_pool_id
== CEPH_NOPOOL
?
1028 rbd_dev
->spec
->pool_id
: rbd_dev
->header
.data_pool_id
;
1029 RCU_INIT_POINTER(rbd_dev
->layout
.pool_ns
, NULL
);
1033 * Fill an rbd image header with information from the given format 1
1036 static int rbd_header_from_disk(struct rbd_device
*rbd_dev
,
1037 struct rbd_image_header_ondisk
*ondisk
)
1039 struct rbd_image_header
*header
= &rbd_dev
->header
;
1040 bool first_time
= header
->object_prefix
== NULL
;
1041 struct ceph_snap_context
*snapc
;
1042 char *object_prefix
= NULL
;
1043 char *snap_names
= NULL
;
1044 u64
*snap_sizes
= NULL
;
1049 /* Allocate this now to avoid having to handle failure below */
1052 object_prefix
= kstrndup(ondisk
->object_prefix
,
1053 sizeof(ondisk
->object_prefix
),
1059 /* Allocate the snapshot context and fill it in */
1061 snap_count
= le32_to_cpu(ondisk
->snap_count
);
1062 snapc
= ceph_create_snap_context(snap_count
, GFP_KERNEL
);
1065 snapc
->seq
= le64_to_cpu(ondisk
->snap_seq
);
1067 struct rbd_image_snap_ondisk
*snaps
;
1068 u64 snap_names_len
= le64_to_cpu(ondisk
->snap_names_len
);
1070 /* We'll keep a copy of the snapshot names... */
1072 if (snap_names_len
> (u64
)SIZE_MAX
)
1074 snap_names
= kmalloc(snap_names_len
, GFP_KERNEL
);
1078 /* ...as well as the array of their sizes. */
1079 snap_sizes
= kmalloc_array(snap_count
,
1080 sizeof(*header
->snap_sizes
),
1086 * Copy the names, and fill in each snapshot's id
1089 * Note that rbd_dev_v1_header_info() guarantees the
1090 * ondisk buffer we're working with has
1091 * snap_names_len bytes beyond the end of the
1092 * snapshot id array, this memcpy() is safe.
1094 memcpy(snap_names
, &ondisk
->snaps
[snap_count
], snap_names_len
);
1095 snaps
= ondisk
->snaps
;
1096 for (i
= 0; i
< snap_count
; i
++) {
1097 snapc
->snaps
[i
] = le64_to_cpu(snaps
[i
].id
);
1098 snap_sizes
[i
] = le64_to_cpu(snaps
[i
].image_size
);
1102 /* We won't fail any more, fill in the header */
1105 header
->object_prefix
= object_prefix
;
1106 header
->obj_order
= ondisk
->options
.order
;
1107 rbd_init_layout(rbd_dev
);
1109 ceph_put_snap_context(header
->snapc
);
1110 kfree(header
->snap_names
);
1111 kfree(header
->snap_sizes
);
1114 /* The remaining fields always get updated (when we refresh) */
1116 header
->image_size
= le64_to_cpu(ondisk
->image_size
);
1117 header
->snapc
= snapc
;
1118 header
->snap_names
= snap_names
;
1119 header
->snap_sizes
= snap_sizes
;
1127 ceph_put_snap_context(snapc
);
1128 kfree(object_prefix
);
1133 static const char *_rbd_dev_v1_snap_name(struct rbd_device
*rbd_dev
, u32 which
)
1135 const char *snap_name
;
1137 rbd_assert(which
< rbd_dev
->header
.snapc
->num_snaps
);
1139 /* Skip over names until we find the one we are looking for */
1141 snap_name
= rbd_dev
->header
.snap_names
;
1143 snap_name
+= strlen(snap_name
) + 1;
1145 return kstrdup(snap_name
, GFP_KERNEL
);
1149 * Snapshot id comparison function for use with qsort()/bsearch().
1150 * Note that result is for snapshots in *descending* order.
1152 static int snapid_compare_reverse(const void *s1
, const void *s2
)
1154 u64 snap_id1
= *(u64
*)s1
;
1155 u64 snap_id2
= *(u64
*)s2
;
1157 if (snap_id1
< snap_id2
)
1159 return snap_id1
== snap_id2
? 0 : -1;
1163 * Search a snapshot context to see if the given snapshot id is
1166 * Returns the position of the snapshot id in the array if it's found,
1167 * or BAD_SNAP_INDEX otherwise.
1169 * Note: The snapshot array is in kept sorted (by the osd) in
1170 * reverse order, highest snapshot id first.
1172 static u32
rbd_dev_snap_index(struct rbd_device
*rbd_dev
, u64 snap_id
)
1174 struct ceph_snap_context
*snapc
= rbd_dev
->header
.snapc
;
1177 found
= bsearch(&snap_id
, &snapc
->snaps
, snapc
->num_snaps
,
1178 sizeof (snap_id
), snapid_compare_reverse
);
1180 return found
? (u32
)(found
- &snapc
->snaps
[0]) : BAD_SNAP_INDEX
;
1183 static const char *rbd_dev_v1_snap_name(struct rbd_device
*rbd_dev
,
1187 const char *snap_name
;
1189 which
= rbd_dev_snap_index(rbd_dev
, snap_id
);
1190 if (which
== BAD_SNAP_INDEX
)
1191 return ERR_PTR(-ENOENT
);
1193 snap_name
= _rbd_dev_v1_snap_name(rbd_dev
, which
);
1194 return snap_name
? snap_name
: ERR_PTR(-ENOMEM
);
1197 static const char *rbd_snap_name(struct rbd_device
*rbd_dev
, u64 snap_id
)
1199 if (snap_id
== CEPH_NOSNAP
)
1200 return RBD_SNAP_HEAD_NAME
;
1202 rbd_assert(rbd_image_format_valid(rbd_dev
->image_format
));
1203 if (rbd_dev
->image_format
== 1)
1204 return rbd_dev_v1_snap_name(rbd_dev
, snap_id
);
1206 return rbd_dev_v2_snap_name(rbd_dev
, snap_id
);
1209 static int rbd_snap_size(struct rbd_device
*rbd_dev
, u64 snap_id
,
1212 rbd_assert(rbd_image_format_valid(rbd_dev
->image_format
));
1213 if (snap_id
== CEPH_NOSNAP
) {
1214 *snap_size
= rbd_dev
->header
.image_size
;
1215 } else if (rbd_dev
->image_format
== 1) {
1218 which
= rbd_dev_snap_index(rbd_dev
, snap_id
);
1219 if (which
== BAD_SNAP_INDEX
)
1222 *snap_size
= rbd_dev
->header
.snap_sizes
[which
];
1227 ret
= _rbd_dev_v2_snap_size(rbd_dev
, snap_id
, NULL
, &size
);
1236 static int rbd_dev_mapping_set(struct rbd_device
*rbd_dev
)
1238 u64 snap_id
= rbd_dev
->spec
->snap_id
;
1242 ret
= rbd_snap_size(rbd_dev
, snap_id
, &size
);
1246 rbd_dev
->mapping
.size
= size
;
1250 static void rbd_dev_mapping_clear(struct rbd_device
*rbd_dev
)
1252 rbd_dev
->mapping
.size
= 0;
1255 static void zero_bvec(struct bio_vec
*bv
)
1258 unsigned long flags
;
1260 buf
= bvec_kmap_irq(bv
, &flags
);
1261 memset(buf
, 0, bv
->bv_len
);
1262 flush_dcache_page(bv
->bv_page
);
1263 bvec_kunmap_irq(buf
, &flags
);
1266 static void zero_bios(struct ceph_bio_iter
*bio_pos
, u32 off
, u32 bytes
)
1268 struct ceph_bio_iter it
= *bio_pos
;
1270 ceph_bio_iter_advance(&it
, off
);
1271 ceph_bio_iter_advance_step(&it
, bytes
, ({
1276 static void zero_bvecs(struct ceph_bvec_iter
*bvec_pos
, u32 off
, u32 bytes
)
1278 struct ceph_bvec_iter it
= *bvec_pos
;
1280 ceph_bvec_iter_advance(&it
, off
);
1281 ceph_bvec_iter_advance_step(&it
, bytes
, ({
1287 * Zero a range in @obj_req data buffer defined by a bio (list) or
1288 * (private) bio_vec array.
1290 * @off is relative to the start of the data buffer.
1292 static void rbd_obj_zero_range(struct rbd_obj_request
*obj_req
, u32 off
,
1295 dout("%s %p data buf %u~%u\n", __func__
, obj_req
, off
, bytes
);
1297 switch (obj_req
->img_request
->data_type
) {
1298 case OBJ_REQUEST_BIO
:
1299 zero_bios(&obj_req
->bio_pos
, off
, bytes
);
1301 case OBJ_REQUEST_BVECS
:
1302 case OBJ_REQUEST_OWN_BVECS
:
1303 zero_bvecs(&obj_req
->bvec_pos
, off
, bytes
);
1310 static void rbd_obj_request_destroy(struct kref
*kref
);
1311 static void rbd_obj_request_put(struct rbd_obj_request
*obj_request
)
1313 rbd_assert(obj_request
!= NULL
);
1314 dout("%s: obj %p (was %d)\n", __func__
, obj_request
,
1315 kref_read(&obj_request
->kref
));
1316 kref_put(&obj_request
->kref
, rbd_obj_request_destroy
);
1319 static inline void rbd_img_obj_request_add(struct rbd_img_request
*img_request
,
1320 struct rbd_obj_request
*obj_request
)
1322 rbd_assert(obj_request
->img_request
== NULL
);
1324 /* Image request now owns object's original reference */
1325 obj_request
->img_request
= img_request
;
1326 dout("%s: img %p obj %p\n", __func__
, img_request
, obj_request
);
1329 static inline void rbd_img_obj_request_del(struct rbd_img_request
*img_request
,
1330 struct rbd_obj_request
*obj_request
)
1332 dout("%s: img %p obj %p\n", __func__
, img_request
, obj_request
);
1333 list_del(&obj_request
->ex
.oe_item
);
1334 rbd_assert(obj_request
->img_request
== img_request
);
1335 rbd_obj_request_put(obj_request
);
1338 static void rbd_osd_submit(struct ceph_osd_request
*osd_req
)
1340 struct rbd_obj_request
*obj_req
= osd_req
->r_priv
;
1342 dout("%s osd_req %p for obj_req %p objno %llu %llu~%llu\n",
1343 __func__
, osd_req
, obj_req
, obj_req
->ex
.oe_objno
,
1344 obj_req
->ex
.oe_off
, obj_req
->ex
.oe_len
);
1345 ceph_osdc_start_request(osd_req
->r_osdc
, osd_req
, false);
1349 * The default/initial value for all image request flags is 0. Each
1350 * is conditionally set to 1 at image request initialization time
1351 * and currently never change thereafter.
1353 static void img_request_layered_set(struct rbd_img_request
*img_request
)
1355 set_bit(IMG_REQ_LAYERED
, &img_request
->flags
);
1358 static bool img_request_layered_test(struct rbd_img_request
*img_request
)
1360 return test_bit(IMG_REQ_LAYERED
, &img_request
->flags
) != 0;
1363 static bool rbd_obj_is_entire(struct rbd_obj_request
*obj_req
)
1365 struct rbd_device
*rbd_dev
= obj_req
->img_request
->rbd_dev
;
1367 return !obj_req
->ex
.oe_off
&&
1368 obj_req
->ex
.oe_len
== rbd_dev
->layout
.object_size
;
1371 static bool rbd_obj_is_tail(struct rbd_obj_request
*obj_req
)
1373 struct rbd_device
*rbd_dev
= obj_req
->img_request
->rbd_dev
;
1375 return obj_req
->ex
.oe_off
+ obj_req
->ex
.oe_len
==
1376 rbd_dev
->layout
.object_size
;
1380 * Must be called after rbd_obj_calc_img_extents().
1382 static bool rbd_obj_copyup_enabled(struct rbd_obj_request
*obj_req
)
1384 if (!obj_req
->num_img_extents
||
1385 (rbd_obj_is_entire(obj_req
) &&
1386 !obj_req
->img_request
->snapc
->num_snaps
))
1392 static u64
rbd_obj_img_extents_bytes(struct rbd_obj_request
*obj_req
)
1394 return ceph_file_extents_bytes(obj_req
->img_extents
,
1395 obj_req
->num_img_extents
);
1398 static bool rbd_img_is_write(struct rbd_img_request
*img_req
)
1400 switch (img_req
->op_type
) {
1404 case OBJ_OP_DISCARD
:
1405 case OBJ_OP_ZEROOUT
:
1412 static void rbd_osd_req_callback(struct ceph_osd_request
*osd_req
)
1414 struct rbd_obj_request
*obj_req
= osd_req
->r_priv
;
1417 dout("%s osd_req %p result %d for obj_req %p\n", __func__
, osd_req
,
1418 osd_req
->r_result
, obj_req
);
1421 * Writes aren't allowed to return a data payload. In some
1422 * guarded write cases (e.g. stat + zero on an empty object)
1423 * a stat response makes it through, but we don't care.
1425 if (osd_req
->r_result
> 0 && rbd_img_is_write(obj_req
->img_request
))
1428 result
= osd_req
->r_result
;
1430 rbd_obj_handle_request(obj_req
, result
);
1433 static void rbd_osd_format_read(struct ceph_osd_request
*osd_req
)
1435 struct rbd_obj_request
*obj_request
= osd_req
->r_priv
;
1437 osd_req
->r_flags
= CEPH_OSD_FLAG_READ
;
1438 osd_req
->r_snapid
= obj_request
->img_request
->snap_id
;
1441 static void rbd_osd_format_write(struct ceph_osd_request
*osd_req
)
1443 struct rbd_obj_request
*obj_request
= osd_req
->r_priv
;
1445 osd_req
->r_flags
= CEPH_OSD_FLAG_WRITE
;
1446 ktime_get_real_ts64(&osd_req
->r_mtime
);
1447 osd_req
->r_data_offset
= obj_request
->ex
.oe_off
;
1450 static struct ceph_osd_request
*
1451 __rbd_obj_add_osd_request(struct rbd_obj_request
*obj_req
,
1452 struct ceph_snap_context
*snapc
, int num_ops
)
1454 struct rbd_device
*rbd_dev
= obj_req
->img_request
->rbd_dev
;
1455 struct ceph_osd_client
*osdc
= &rbd_dev
->rbd_client
->client
->osdc
;
1456 struct ceph_osd_request
*req
;
1457 const char *name_format
= rbd_dev
->image_format
== 1 ?
1458 RBD_V1_DATA_FORMAT
: RBD_V2_DATA_FORMAT
;
1461 req
= ceph_osdc_alloc_request(osdc
, snapc
, num_ops
, false, GFP_NOIO
);
1463 return ERR_PTR(-ENOMEM
);
1465 list_add_tail(&req
->r_private_item
, &obj_req
->osd_reqs
);
1466 req
->r_callback
= rbd_osd_req_callback
;
1467 req
->r_priv
= obj_req
;
1470 * Data objects may be stored in a separate pool, but always in
1471 * the same namespace in that pool as the header in its pool.
1473 ceph_oloc_copy(&req
->r_base_oloc
, &rbd_dev
->header_oloc
);
1474 req
->r_base_oloc
.pool
= rbd_dev
->layout
.pool_id
;
1476 ret
= ceph_oid_aprintf(&req
->r_base_oid
, GFP_NOIO
, name_format
,
1477 rbd_dev
->header
.object_prefix
,
1478 obj_req
->ex
.oe_objno
);
1480 return ERR_PTR(ret
);
1485 static struct ceph_osd_request
*
1486 rbd_obj_add_osd_request(struct rbd_obj_request
*obj_req
, int num_ops
)
1488 return __rbd_obj_add_osd_request(obj_req
, obj_req
->img_request
->snapc
,
1492 static struct rbd_obj_request
*rbd_obj_request_create(void)
1494 struct rbd_obj_request
*obj_request
;
1496 obj_request
= kmem_cache_zalloc(rbd_obj_request_cache
, GFP_NOIO
);
1500 ceph_object_extent_init(&obj_request
->ex
);
1501 INIT_LIST_HEAD(&obj_request
->osd_reqs
);
1502 mutex_init(&obj_request
->state_mutex
);
1503 kref_init(&obj_request
->kref
);
1505 dout("%s %p\n", __func__
, obj_request
);
1509 static void rbd_obj_request_destroy(struct kref
*kref
)
1511 struct rbd_obj_request
*obj_request
;
1512 struct ceph_osd_request
*osd_req
;
1515 obj_request
= container_of(kref
, struct rbd_obj_request
, kref
);
1517 dout("%s: obj %p\n", __func__
, obj_request
);
1519 while (!list_empty(&obj_request
->osd_reqs
)) {
1520 osd_req
= list_first_entry(&obj_request
->osd_reqs
,
1521 struct ceph_osd_request
, r_private_item
);
1522 list_del_init(&osd_req
->r_private_item
);
1523 ceph_osdc_put_request(osd_req
);
1526 switch (obj_request
->img_request
->data_type
) {
1527 case OBJ_REQUEST_NODATA
:
1528 case OBJ_REQUEST_BIO
:
1529 case OBJ_REQUEST_BVECS
:
1530 break; /* Nothing to do */
1531 case OBJ_REQUEST_OWN_BVECS
:
1532 kfree(obj_request
->bvec_pos
.bvecs
);
1538 kfree(obj_request
->img_extents
);
1539 if (obj_request
->copyup_bvecs
) {
1540 for (i
= 0; i
< obj_request
->copyup_bvec_count
; i
++) {
1541 if (obj_request
->copyup_bvecs
[i
].bv_page
)
1542 __free_page(obj_request
->copyup_bvecs
[i
].bv_page
);
1544 kfree(obj_request
->copyup_bvecs
);
1547 kmem_cache_free(rbd_obj_request_cache
, obj_request
);
1550 /* It's OK to call this for a device with no parent */
1552 static void rbd_spec_put(struct rbd_spec
*spec
);
1553 static void rbd_dev_unparent(struct rbd_device
*rbd_dev
)
1555 rbd_dev_remove_parent(rbd_dev
);
1556 rbd_spec_put(rbd_dev
->parent_spec
);
1557 rbd_dev
->parent_spec
= NULL
;
1558 rbd_dev
->parent_overlap
= 0;
1562 * Parent image reference counting is used to determine when an
1563 * image's parent fields can be safely torn down--after there are no
1564 * more in-flight requests to the parent image. When the last
1565 * reference is dropped, cleaning them up is safe.
1567 static void rbd_dev_parent_put(struct rbd_device
*rbd_dev
)
1571 if (!rbd_dev
->parent_spec
)
1574 counter
= atomic_dec_return_safe(&rbd_dev
->parent_ref
);
1578 /* Last reference; clean up parent data structures */
1581 rbd_dev_unparent(rbd_dev
);
1583 rbd_warn(rbd_dev
, "parent reference underflow");
1587 * If an image has a non-zero parent overlap, get a reference to its
1590 * Returns true if the rbd device has a parent with a non-zero
1591 * overlap and a reference for it was successfully taken, or
1594 static bool rbd_dev_parent_get(struct rbd_device
*rbd_dev
)
1598 if (!rbd_dev
->parent_spec
)
1601 if (rbd_dev
->parent_overlap
)
1602 counter
= atomic_inc_return_safe(&rbd_dev
->parent_ref
);
1605 rbd_warn(rbd_dev
, "parent reference overflow");
1610 static void rbd_img_request_init(struct rbd_img_request
*img_request
,
1611 struct rbd_device
*rbd_dev
,
1612 enum obj_operation_type op_type
)
1614 memset(img_request
, 0, sizeof(*img_request
));
1616 img_request
->rbd_dev
= rbd_dev
;
1617 img_request
->op_type
= op_type
;
1619 INIT_LIST_HEAD(&img_request
->lock_item
);
1620 INIT_LIST_HEAD(&img_request
->object_extents
);
1621 mutex_init(&img_request
->state_mutex
);
1624 static void rbd_img_capture_header(struct rbd_img_request
*img_req
)
1626 struct rbd_device
*rbd_dev
= img_req
->rbd_dev
;
1628 lockdep_assert_held(&rbd_dev
->header_rwsem
);
1630 if (rbd_img_is_write(img_req
))
1631 img_req
->snapc
= ceph_get_snap_context(rbd_dev
->header
.snapc
);
1633 img_req
->snap_id
= rbd_dev
->spec
->snap_id
;
1635 if (rbd_dev_parent_get(rbd_dev
))
1636 img_request_layered_set(img_req
);
1639 static void rbd_img_request_destroy(struct rbd_img_request
*img_request
)
1641 struct rbd_obj_request
*obj_request
;
1642 struct rbd_obj_request
*next_obj_request
;
1644 dout("%s: img %p\n", __func__
, img_request
);
1646 WARN_ON(!list_empty(&img_request
->lock_item
));
1647 for_each_obj_request_safe(img_request
, obj_request
, next_obj_request
)
1648 rbd_img_obj_request_del(img_request
, obj_request
);
1650 if (img_request_layered_test(img_request
))
1651 rbd_dev_parent_put(img_request
->rbd_dev
);
1653 if (rbd_img_is_write(img_request
))
1654 ceph_put_snap_context(img_request
->snapc
);
1656 if (test_bit(IMG_REQ_CHILD
, &img_request
->flags
))
1657 kmem_cache_free(rbd_img_request_cache
, img_request
);
1660 #define BITS_PER_OBJ 2
1661 #define OBJS_PER_BYTE (BITS_PER_BYTE / BITS_PER_OBJ)
1662 #define OBJ_MASK ((1 << BITS_PER_OBJ) - 1)
1664 static void __rbd_object_map_index(struct rbd_device
*rbd_dev
, u64 objno
,
1665 u64
*index
, u8
*shift
)
1669 rbd_assert(objno
< rbd_dev
->object_map_size
);
1670 *index
= div_u64_rem(objno
, OBJS_PER_BYTE
, &off
);
1671 *shift
= (OBJS_PER_BYTE
- off
- 1) * BITS_PER_OBJ
;
1674 static u8
__rbd_object_map_get(struct rbd_device
*rbd_dev
, u64 objno
)
1679 lockdep_assert_held(&rbd_dev
->object_map_lock
);
1680 __rbd_object_map_index(rbd_dev
, objno
, &index
, &shift
);
1681 return (rbd_dev
->object_map
[index
] >> shift
) & OBJ_MASK
;
1684 static void __rbd_object_map_set(struct rbd_device
*rbd_dev
, u64 objno
, u8 val
)
1690 lockdep_assert_held(&rbd_dev
->object_map_lock
);
1691 rbd_assert(!(val
& ~OBJ_MASK
));
1693 __rbd_object_map_index(rbd_dev
, objno
, &index
, &shift
);
1694 p
= &rbd_dev
->object_map
[index
];
1695 *p
= (*p
& ~(OBJ_MASK
<< shift
)) | (val
<< shift
);
1698 static u8
rbd_object_map_get(struct rbd_device
*rbd_dev
, u64 objno
)
1702 spin_lock(&rbd_dev
->object_map_lock
);
1703 state
= __rbd_object_map_get(rbd_dev
, objno
);
1704 spin_unlock(&rbd_dev
->object_map_lock
);
1708 static bool use_object_map(struct rbd_device
*rbd_dev
)
1711 * An image mapped read-only can't use the object map -- it isn't
1712 * loaded because the header lock isn't acquired. Someone else can
1713 * write to the image and update the object map behind our back.
1715 * A snapshot can't be written to, so using the object map is always
1718 if (!rbd_is_snap(rbd_dev
) && rbd_is_ro(rbd_dev
))
1721 return ((rbd_dev
->header
.features
& RBD_FEATURE_OBJECT_MAP
) &&
1722 !(rbd_dev
->object_map_flags
& RBD_FLAG_OBJECT_MAP_INVALID
));
1725 static bool rbd_object_map_may_exist(struct rbd_device
*rbd_dev
, u64 objno
)
1729 /* fall back to default logic if object map is disabled or invalid */
1730 if (!use_object_map(rbd_dev
))
1733 state
= rbd_object_map_get(rbd_dev
, objno
);
1734 return state
!= OBJECT_NONEXISTENT
;
1737 static void rbd_object_map_name(struct rbd_device
*rbd_dev
, u64 snap_id
,
1738 struct ceph_object_id
*oid
)
1740 if (snap_id
== CEPH_NOSNAP
)
1741 ceph_oid_printf(oid
, "%s%s", RBD_OBJECT_MAP_PREFIX
,
1742 rbd_dev
->spec
->image_id
);
1744 ceph_oid_printf(oid
, "%s%s.%016llx", RBD_OBJECT_MAP_PREFIX
,
1745 rbd_dev
->spec
->image_id
, snap_id
);
1748 static int rbd_object_map_lock(struct rbd_device
*rbd_dev
)
1750 struct ceph_osd_client
*osdc
= &rbd_dev
->rbd_client
->client
->osdc
;
1751 CEPH_DEFINE_OID_ONSTACK(oid
);
1754 struct ceph_locker
*lockers
;
1756 bool broke_lock
= false;
1759 rbd_object_map_name(rbd_dev
, CEPH_NOSNAP
, &oid
);
1762 ret
= ceph_cls_lock(osdc
, &oid
, &rbd_dev
->header_oloc
, RBD_LOCK_NAME
,
1763 CEPH_CLS_LOCK_EXCLUSIVE
, "", "", "", 0);
1764 if (ret
!= -EBUSY
|| broke_lock
) {
1766 ret
= 0; /* already locked by myself */
1768 rbd_warn(rbd_dev
, "failed to lock object map: %d", ret
);
1772 ret
= ceph_cls_lock_info(osdc
, &oid
, &rbd_dev
->header_oloc
,
1773 RBD_LOCK_NAME
, &lock_type
, &lock_tag
,
1774 &lockers
, &num_lockers
);
1779 rbd_warn(rbd_dev
, "failed to get object map lockers: %d", ret
);
1784 if (num_lockers
== 0)
1787 rbd_warn(rbd_dev
, "breaking object map lock owned by %s%llu",
1788 ENTITY_NAME(lockers
[0].id
.name
));
1790 ret
= ceph_cls_break_lock(osdc
, &oid
, &rbd_dev
->header_oloc
,
1791 RBD_LOCK_NAME
, lockers
[0].id
.cookie
,
1792 &lockers
[0].id
.name
);
1793 ceph_free_lockers(lockers
, num_lockers
);
1798 rbd_warn(rbd_dev
, "failed to break object map lock: %d", ret
);
1806 static void rbd_object_map_unlock(struct rbd_device
*rbd_dev
)
1808 struct ceph_osd_client
*osdc
= &rbd_dev
->rbd_client
->client
->osdc
;
1809 CEPH_DEFINE_OID_ONSTACK(oid
);
1812 rbd_object_map_name(rbd_dev
, CEPH_NOSNAP
, &oid
);
1814 ret
= ceph_cls_unlock(osdc
, &oid
, &rbd_dev
->header_oloc
, RBD_LOCK_NAME
,
1816 if (ret
&& ret
!= -ENOENT
)
1817 rbd_warn(rbd_dev
, "failed to unlock object map: %d", ret
);
1820 static int decode_object_map_header(void **p
, void *end
, u64
*object_map_size
)
1828 ceph_decode_32_safe(p
, end
, header_len
, e_inval
);
1829 header_end
= *p
+ header_len
;
1831 ret
= ceph_start_decoding(p
, end
, 1, "BitVector header", &struct_v
,
1836 ceph_decode_64_safe(p
, end
, *object_map_size
, e_inval
);
1845 static int __rbd_object_map_load(struct rbd_device
*rbd_dev
)
1847 struct ceph_osd_client
*osdc
= &rbd_dev
->rbd_client
->client
->osdc
;
1848 CEPH_DEFINE_OID_ONSTACK(oid
);
1849 struct page
**pages
;
1853 u64 object_map_bytes
;
1854 u64 object_map_size
;
1858 rbd_assert(!rbd_dev
->object_map
&& !rbd_dev
->object_map_size
);
1860 num_objects
= ceph_get_num_objects(&rbd_dev
->layout
,
1861 rbd_dev
->mapping
.size
);
1862 object_map_bytes
= DIV_ROUND_UP_ULL(num_objects
* BITS_PER_OBJ
,
1864 num_pages
= calc_pages_for(0, object_map_bytes
) + 1;
1865 pages
= ceph_alloc_page_vector(num_pages
, GFP_KERNEL
);
1867 return PTR_ERR(pages
);
1869 reply_len
= num_pages
* PAGE_SIZE
;
1870 rbd_object_map_name(rbd_dev
, rbd_dev
->spec
->snap_id
, &oid
);
1871 ret
= ceph_osdc_call(osdc
, &oid
, &rbd_dev
->header_oloc
,
1872 "rbd", "object_map_load", CEPH_OSD_FLAG_READ
,
1873 NULL
, 0, pages
, &reply_len
);
1877 p
= page_address(pages
[0]);
1878 end
= p
+ min(reply_len
, (size_t)PAGE_SIZE
);
1879 ret
= decode_object_map_header(&p
, end
, &object_map_size
);
1883 if (object_map_size
!= num_objects
) {
1884 rbd_warn(rbd_dev
, "object map size mismatch: %llu vs %llu",
1885 object_map_size
, num_objects
);
1890 if (offset_in_page(p
) + object_map_bytes
> reply_len
) {
1895 rbd_dev
->object_map
= kvmalloc(object_map_bytes
, GFP_KERNEL
);
1896 if (!rbd_dev
->object_map
) {
1901 rbd_dev
->object_map_size
= object_map_size
;
1902 ceph_copy_from_page_vector(pages
, rbd_dev
->object_map
,
1903 offset_in_page(p
), object_map_bytes
);
1906 ceph_release_page_vector(pages
, num_pages
);
1910 static void rbd_object_map_free(struct rbd_device
*rbd_dev
)
1912 kvfree(rbd_dev
->object_map
);
1913 rbd_dev
->object_map
= NULL
;
1914 rbd_dev
->object_map_size
= 0;
1917 static int rbd_object_map_load(struct rbd_device
*rbd_dev
)
1921 ret
= __rbd_object_map_load(rbd_dev
);
1925 ret
= rbd_dev_v2_get_flags(rbd_dev
);
1927 rbd_object_map_free(rbd_dev
);
1931 if (rbd_dev
->object_map_flags
& RBD_FLAG_OBJECT_MAP_INVALID
)
1932 rbd_warn(rbd_dev
, "object map is invalid");
1937 static int rbd_object_map_open(struct rbd_device
*rbd_dev
)
1941 ret
= rbd_object_map_lock(rbd_dev
);
1945 ret
= rbd_object_map_load(rbd_dev
);
1947 rbd_object_map_unlock(rbd_dev
);
1954 static void rbd_object_map_close(struct rbd_device
*rbd_dev
)
1956 rbd_object_map_free(rbd_dev
);
1957 rbd_object_map_unlock(rbd_dev
);
1961 * This function needs snap_id (or more precisely just something to
1962 * distinguish between HEAD and snapshot object maps), new_state and
1963 * current_state that were passed to rbd_object_map_update().
1965 * To avoid allocating and stashing a context we piggyback on the OSD
1966 * request. A HEAD update has two ops (assert_locked). For new_state
1967 * and current_state we decode our own object_map_update op, encoded in
1968 * rbd_cls_object_map_update().
1970 static int rbd_object_map_update_finish(struct rbd_obj_request
*obj_req
,
1971 struct ceph_osd_request
*osd_req
)
1973 struct rbd_device
*rbd_dev
= obj_req
->img_request
->rbd_dev
;
1974 struct ceph_osd_data
*osd_data
;
1976 u8 state
, new_state
, uninitialized_var(current_state
);
1977 bool has_current_state
;
1980 if (osd_req
->r_result
)
1981 return osd_req
->r_result
;
1984 * Nothing to do for a snapshot object map.
1986 if (osd_req
->r_num_ops
== 1)
1990 * Update in-memory HEAD object map.
1992 rbd_assert(osd_req
->r_num_ops
== 2);
1993 osd_data
= osd_req_op_data(osd_req
, 1, cls
, request_data
);
1994 rbd_assert(osd_data
->type
== CEPH_OSD_DATA_TYPE_PAGES
);
1996 p
= page_address(osd_data
->pages
[0]);
1997 objno
= ceph_decode_64(&p
);
1998 rbd_assert(objno
== obj_req
->ex
.oe_objno
);
1999 rbd_assert(ceph_decode_64(&p
) == objno
+ 1);
2000 new_state
= ceph_decode_8(&p
);
2001 has_current_state
= ceph_decode_8(&p
);
2002 if (has_current_state
)
2003 current_state
= ceph_decode_8(&p
);
2005 spin_lock(&rbd_dev
->object_map_lock
);
2006 state
= __rbd_object_map_get(rbd_dev
, objno
);
2007 if (!has_current_state
|| current_state
== state
||
2008 (current_state
== OBJECT_EXISTS
&& state
== OBJECT_EXISTS_CLEAN
))
2009 __rbd_object_map_set(rbd_dev
, objno
, new_state
);
2010 spin_unlock(&rbd_dev
->object_map_lock
);
2015 static void rbd_object_map_callback(struct ceph_osd_request
*osd_req
)
2017 struct rbd_obj_request
*obj_req
= osd_req
->r_priv
;
2020 dout("%s osd_req %p result %d for obj_req %p\n", __func__
, osd_req
,
2021 osd_req
->r_result
, obj_req
);
2023 result
= rbd_object_map_update_finish(obj_req
, osd_req
);
2024 rbd_obj_handle_request(obj_req
, result
);
2027 static bool update_needed(struct rbd_device
*rbd_dev
, u64 objno
, u8 new_state
)
2029 u8 state
= rbd_object_map_get(rbd_dev
, objno
);
2031 if (state
== new_state
||
2032 (new_state
== OBJECT_PENDING
&& state
== OBJECT_NONEXISTENT
) ||
2033 (new_state
== OBJECT_NONEXISTENT
&& state
!= OBJECT_PENDING
))
2039 static int rbd_cls_object_map_update(struct ceph_osd_request
*req
,
2040 int which
, u64 objno
, u8 new_state
,
2041 const u8
*current_state
)
2043 struct page
**pages
;
2047 ret
= osd_req_op_cls_init(req
, which
, "rbd", "object_map_update");
2051 pages
= ceph_alloc_page_vector(1, GFP_NOIO
);
2053 return PTR_ERR(pages
);
2055 p
= start
= page_address(pages
[0]);
2056 ceph_encode_64(&p
, objno
);
2057 ceph_encode_64(&p
, objno
+ 1);
2058 ceph_encode_8(&p
, new_state
);
2059 if (current_state
) {
2060 ceph_encode_8(&p
, 1);
2061 ceph_encode_8(&p
, *current_state
);
2063 ceph_encode_8(&p
, 0);
2066 osd_req_op_cls_request_data_pages(req
, which
, pages
, p
- start
, 0,
2073 * 0 - object map update sent
2074 * 1 - object map update isn't needed
2077 static int rbd_object_map_update(struct rbd_obj_request
*obj_req
, u64 snap_id
,
2078 u8 new_state
, const u8
*current_state
)
2080 struct rbd_device
*rbd_dev
= obj_req
->img_request
->rbd_dev
;
2081 struct ceph_osd_client
*osdc
= &rbd_dev
->rbd_client
->client
->osdc
;
2082 struct ceph_osd_request
*req
;
2087 if (snap_id
== CEPH_NOSNAP
) {
2088 if (!update_needed(rbd_dev
, obj_req
->ex
.oe_objno
, new_state
))
2091 num_ops
++; /* assert_locked */
2094 req
= ceph_osdc_alloc_request(osdc
, NULL
, num_ops
, false, GFP_NOIO
);
2098 list_add_tail(&req
->r_private_item
, &obj_req
->osd_reqs
);
2099 req
->r_callback
= rbd_object_map_callback
;
2100 req
->r_priv
= obj_req
;
2102 rbd_object_map_name(rbd_dev
, snap_id
, &req
->r_base_oid
);
2103 ceph_oloc_copy(&req
->r_base_oloc
, &rbd_dev
->header_oloc
);
2104 req
->r_flags
= CEPH_OSD_FLAG_WRITE
;
2105 ktime_get_real_ts64(&req
->r_mtime
);
2107 if (snap_id
== CEPH_NOSNAP
) {
2109 * Protect against possible race conditions during lock
2110 * ownership transitions.
2112 ret
= ceph_cls_assert_locked(req
, which
++, RBD_LOCK_NAME
,
2113 CEPH_CLS_LOCK_EXCLUSIVE
, "", "");
2118 ret
= rbd_cls_object_map_update(req
, which
, obj_req
->ex
.oe_objno
,
2119 new_state
, current_state
);
2123 ret
= ceph_osdc_alloc_messages(req
, GFP_NOIO
);
2127 ceph_osdc_start_request(osdc
, req
, false);
2131 static void prune_extents(struct ceph_file_extent
*img_extents
,
2132 u32
*num_img_extents
, u64 overlap
)
2134 u32 cnt
= *num_img_extents
;
2136 /* drop extents completely beyond the overlap */
2137 while (cnt
&& img_extents
[cnt
- 1].fe_off
>= overlap
)
2141 struct ceph_file_extent
*ex
= &img_extents
[cnt
- 1];
2143 /* trim final overlapping extent */
2144 if (ex
->fe_off
+ ex
->fe_len
> overlap
)
2145 ex
->fe_len
= overlap
- ex
->fe_off
;
2148 *num_img_extents
= cnt
;
2152 * Determine the byte range(s) covered by either just the object extent
2153 * or the entire object in the parent image.
2155 static int rbd_obj_calc_img_extents(struct rbd_obj_request
*obj_req
,
2158 struct rbd_device
*rbd_dev
= obj_req
->img_request
->rbd_dev
;
2161 if (!rbd_dev
->parent_overlap
)
2164 ret
= ceph_extent_to_file(&rbd_dev
->layout
, obj_req
->ex
.oe_objno
,
2165 entire
? 0 : obj_req
->ex
.oe_off
,
2166 entire
? rbd_dev
->layout
.object_size
:
2168 &obj_req
->img_extents
,
2169 &obj_req
->num_img_extents
);
2173 prune_extents(obj_req
->img_extents
, &obj_req
->num_img_extents
,
2174 rbd_dev
->parent_overlap
);
2178 static void rbd_osd_setup_data(struct ceph_osd_request
*osd_req
, int which
)
2180 struct rbd_obj_request
*obj_req
= osd_req
->r_priv
;
2182 switch (obj_req
->img_request
->data_type
) {
2183 case OBJ_REQUEST_BIO
:
2184 osd_req_op_extent_osd_data_bio(osd_req
, which
,
2186 obj_req
->ex
.oe_len
);
2188 case OBJ_REQUEST_BVECS
:
2189 case OBJ_REQUEST_OWN_BVECS
:
2190 rbd_assert(obj_req
->bvec_pos
.iter
.bi_size
==
2191 obj_req
->ex
.oe_len
);
2192 rbd_assert(obj_req
->bvec_idx
== obj_req
->bvec_count
);
2193 osd_req_op_extent_osd_data_bvec_pos(osd_req
, which
,
2194 &obj_req
->bvec_pos
);
2201 static int rbd_osd_setup_stat(struct ceph_osd_request
*osd_req
, int which
)
2203 struct page
**pages
;
2206 * The response data for a STAT call consists of:
2213 pages
= ceph_alloc_page_vector(1, GFP_NOIO
);
2215 return PTR_ERR(pages
);
2217 osd_req_op_init(osd_req
, which
, CEPH_OSD_OP_STAT
, 0);
2218 osd_req_op_raw_data_in_pages(osd_req
, which
, pages
,
2219 8 + sizeof(struct ceph_timespec
),
2224 static int rbd_osd_setup_copyup(struct ceph_osd_request
*osd_req
, int which
,
2227 struct rbd_obj_request
*obj_req
= osd_req
->r_priv
;
2230 ret
= osd_req_op_cls_init(osd_req
, which
, "rbd", "copyup");
2234 osd_req_op_cls_request_data_bvecs(osd_req
, which
, obj_req
->copyup_bvecs
,
2235 obj_req
->copyup_bvec_count
, bytes
);
2239 static int rbd_obj_init_read(struct rbd_obj_request
*obj_req
)
2241 obj_req
->read_state
= RBD_OBJ_READ_START
;
2245 static void __rbd_osd_setup_write_ops(struct ceph_osd_request
*osd_req
,
2248 struct rbd_obj_request
*obj_req
= osd_req
->r_priv
;
2249 struct rbd_device
*rbd_dev
= obj_req
->img_request
->rbd_dev
;
2252 if (!use_object_map(rbd_dev
) ||
2253 !(obj_req
->flags
& RBD_OBJ_FLAG_MAY_EXIST
)) {
2254 osd_req_op_alloc_hint_init(osd_req
, which
++,
2255 rbd_dev
->layout
.object_size
,
2256 rbd_dev
->layout
.object_size
);
2259 if (rbd_obj_is_entire(obj_req
))
2260 opcode
= CEPH_OSD_OP_WRITEFULL
;
2262 opcode
= CEPH_OSD_OP_WRITE
;
2264 osd_req_op_extent_init(osd_req
, which
, opcode
,
2265 obj_req
->ex
.oe_off
, obj_req
->ex
.oe_len
, 0, 0);
2266 rbd_osd_setup_data(osd_req
, which
);
2269 static int rbd_obj_init_write(struct rbd_obj_request
*obj_req
)
2273 /* reverse map the entire object onto the parent */
2274 ret
= rbd_obj_calc_img_extents(obj_req
, true);
2278 if (rbd_obj_copyup_enabled(obj_req
))
2279 obj_req
->flags
|= RBD_OBJ_FLAG_COPYUP_ENABLED
;
2281 obj_req
->write_state
= RBD_OBJ_WRITE_START
;
2285 static u16
truncate_or_zero_opcode(struct rbd_obj_request
*obj_req
)
2287 return rbd_obj_is_tail(obj_req
) ? CEPH_OSD_OP_TRUNCATE
:
2291 static void __rbd_osd_setup_discard_ops(struct ceph_osd_request
*osd_req
,
2294 struct rbd_obj_request
*obj_req
= osd_req
->r_priv
;
2296 if (rbd_obj_is_entire(obj_req
) && !obj_req
->num_img_extents
) {
2297 rbd_assert(obj_req
->flags
& RBD_OBJ_FLAG_DELETION
);
2298 osd_req_op_init(osd_req
, which
, CEPH_OSD_OP_DELETE
, 0);
2300 osd_req_op_extent_init(osd_req
, which
,
2301 truncate_or_zero_opcode(obj_req
),
2302 obj_req
->ex
.oe_off
, obj_req
->ex
.oe_len
,
2307 static int rbd_obj_init_discard(struct rbd_obj_request
*obj_req
)
2309 struct rbd_device
*rbd_dev
= obj_req
->img_request
->rbd_dev
;
2314 * Align the range to alloc_size boundary and punt on discards
2315 * that are too small to free up any space.
2317 * alloc_size == object_size && is_tail() is a special case for
2318 * filestore with filestore_punch_hole = false, needed to allow
2319 * truncate (in addition to delete).
2321 if (rbd_dev
->opts
->alloc_size
!= rbd_dev
->layout
.object_size
||
2322 !rbd_obj_is_tail(obj_req
)) {
2323 off
= round_up(obj_req
->ex
.oe_off
, rbd_dev
->opts
->alloc_size
);
2324 next_off
= round_down(obj_req
->ex
.oe_off
+ obj_req
->ex
.oe_len
,
2325 rbd_dev
->opts
->alloc_size
);
2326 if (off
>= next_off
)
2329 dout("%s %p %llu~%llu -> %llu~%llu\n", __func__
,
2330 obj_req
, obj_req
->ex
.oe_off
, obj_req
->ex
.oe_len
,
2331 off
, next_off
- off
);
2332 obj_req
->ex
.oe_off
= off
;
2333 obj_req
->ex
.oe_len
= next_off
- off
;
2336 /* reverse map the entire object onto the parent */
2337 ret
= rbd_obj_calc_img_extents(obj_req
, true);
2341 obj_req
->flags
|= RBD_OBJ_FLAG_NOOP_FOR_NONEXISTENT
;
2342 if (rbd_obj_is_entire(obj_req
) && !obj_req
->num_img_extents
)
2343 obj_req
->flags
|= RBD_OBJ_FLAG_DELETION
;
2345 obj_req
->write_state
= RBD_OBJ_WRITE_START
;
2349 static void __rbd_osd_setup_zeroout_ops(struct ceph_osd_request
*osd_req
,
2352 struct rbd_obj_request
*obj_req
= osd_req
->r_priv
;
2355 if (rbd_obj_is_entire(obj_req
)) {
2356 if (obj_req
->num_img_extents
) {
2357 if (!(obj_req
->flags
& RBD_OBJ_FLAG_COPYUP_ENABLED
))
2358 osd_req_op_init(osd_req
, which
++,
2359 CEPH_OSD_OP_CREATE
, 0);
2360 opcode
= CEPH_OSD_OP_TRUNCATE
;
2362 rbd_assert(obj_req
->flags
& RBD_OBJ_FLAG_DELETION
);
2363 osd_req_op_init(osd_req
, which
++,
2364 CEPH_OSD_OP_DELETE
, 0);
2368 opcode
= truncate_or_zero_opcode(obj_req
);
2372 osd_req_op_extent_init(osd_req
, which
, opcode
,
2373 obj_req
->ex
.oe_off
, obj_req
->ex
.oe_len
,
2377 static int rbd_obj_init_zeroout(struct rbd_obj_request
*obj_req
)
2381 /* reverse map the entire object onto the parent */
2382 ret
= rbd_obj_calc_img_extents(obj_req
, true);
2386 if (rbd_obj_copyup_enabled(obj_req
))
2387 obj_req
->flags
|= RBD_OBJ_FLAG_COPYUP_ENABLED
;
2388 if (!obj_req
->num_img_extents
) {
2389 obj_req
->flags
|= RBD_OBJ_FLAG_NOOP_FOR_NONEXISTENT
;
2390 if (rbd_obj_is_entire(obj_req
))
2391 obj_req
->flags
|= RBD_OBJ_FLAG_DELETION
;
2394 obj_req
->write_state
= RBD_OBJ_WRITE_START
;
2398 static int count_write_ops(struct rbd_obj_request
*obj_req
)
2400 struct rbd_img_request
*img_req
= obj_req
->img_request
;
2402 switch (img_req
->op_type
) {
2404 if (!use_object_map(img_req
->rbd_dev
) ||
2405 !(obj_req
->flags
& RBD_OBJ_FLAG_MAY_EXIST
))
2406 return 2; /* setallochint + write/writefull */
2408 return 1; /* write/writefull */
2409 case OBJ_OP_DISCARD
:
2410 return 1; /* delete/truncate/zero */
2411 case OBJ_OP_ZEROOUT
:
2412 if (rbd_obj_is_entire(obj_req
) && obj_req
->num_img_extents
&&
2413 !(obj_req
->flags
& RBD_OBJ_FLAG_COPYUP_ENABLED
))
2414 return 2; /* create + truncate */
2416 return 1; /* delete/truncate/zero */
2422 static void rbd_osd_setup_write_ops(struct ceph_osd_request
*osd_req
,
2425 struct rbd_obj_request
*obj_req
= osd_req
->r_priv
;
2427 switch (obj_req
->img_request
->op_type
) {
2429 __rbd_osd_setup_write_ops(osd_req
, which
);
2431 case OBJ_OP_DISCARD
:
2432 __rbd_osd_setup_discard_ops(osd_req
, which
);
2434 case OBJ_OP_ZEROOUT
:
2435 __rbd_osd_setup_zeroout_ops(osd_req
, which
);
2443 * Prune the list of object requests (adjust offset and/or length, drop
2444 * redundant requests). Prepare object request state machines and image
2445 * request state machine for execution.
2447 static int __rbd_img_fill_request(struct rbd_img_request
*img_req
)
2449 struct rbd_obj_request
*obj_req
, *next_obj_req
;
2452 for_each_obj_request_safe(img_req
, obj_req
, next_obj_req
) {
2453 switch (img_req
->op_type
) {
2455 ret
= rbd_obj_init_read(obj_req
);
2458 ret
= rbd_obj_init_write(obj_req
);
2460 case OBJ_OP_DISCARD
:
2461 ret
= rbd_obj_init_discard(obj_req
);
2463 case OBJ_OP_ZEROOUT
:
2464 ret
= rbd_obj_init_zeroout(obj_req
);
2472 rbd_img_obj_request_del(img_req
, obj_req
);
2477 img_req
->state
= RBD_IMG_START
;
2481 union rbd_img_fill_iter
{
2482 struct ceph_bio_iter bio_iter
;
2483 struct ceph_bvec_iter bvec_iter
;
2486 struct rbd_img_fill_ctx
{
2487 enum obj_request_type pos_type
;
2488 union rbd_img_fill_iter
*pos
;
2489 union rbd_img_fill_iter iter
;
2490 ceph_object_extent_fn_t set_pos_fn
;
2491 ceph_object_extent_fn_t count_fn
;
2492 ceph_object_extent_fn_t copy_fn
;
2495 static struct ceph_object_extent
*alloc_object_extent(void *arg
)
2497 struct rbd_img_request
*img_req
= arg
;
2498 struct rbd_obj_request
*obj_req
;
2500 obj_req
= rbd_obj_request_create();
2504 rbd_img_obj_request_add(img_req
, obj_req
);
2505 return &obj_req
->ex
;
2509 * While su != os && sc == 1 is technically not fancy (it's the same
2510 * layout as su == os && sc == 1), we can't use the nocopy path for it
2511 * because ->set_pos_fn() should be called only once per object.
2512 * ceph_file_to_extents() invokes action_fn once per stripe unit, so
2513 * treat su != os && sc == 1 as fancy.
2515 static bool rbd_layout_is_fancy(struct ceph_file_layout
*l
)
2517 return l
->stripe_unit
!= l
->object_size
;
2520 static int rbd_img_fill_request_nocopy(struct rbd_img_request
*img_req
,
2521 struct ceph_file_extent
*img_extents
,
2522 u32 num_img_extents
,
2523 struct rbd_img_fill_ctx
*fctx
)
2528 img_req
->data_type
= fctx
->pos_type
;
2531 * Create object requests and set each object request's starting
2532 * position in the provided bio (list) or bio_vec array.
2534 fctx
->iter
= *fctx
->pos
;
2535 for (i
= 0; i
< num_img_extents
; i
++) {
2536 ret
= ceph_file_to_extents(&img_req
->rbd_dev
->layout
,
2537 img_extents
[i
].fe_off
,
2538 img_extents
[i
].fe_len
,
2539 &img_req
->object_extents
,
2540 alloc_object_extent
, img_req
,
2541 fctx
->set_pos_fn
, &fctx
->iter
);
2546 return __rbd_img_fill_request(img_req
);
2550 * Map a list of image extents to a list of object extents, create the
2551 * corresponding object requests (normally each to a different object,
2552 * but not always) and add them to @img_req. For each object request,
2553 * set up its data descriptor to point to the corresponding chunk(s) of
2554 * @fctx->pos data buffer.
2556 * Because ceph_file_to_extents() will merge adjacent object extents
2557 * together, each object request's data descriptor may point to multiple
2558 * different chunks of @fctx->pos data buffer.
2560 * @fctx->pos data buffer is assumed to be large enough.
2562 static int rbd_img_fill_request(struct rbd_img_request
*img_req
,
2563 struct ceph_file_extent
*img_extents
,
2564 u32 num_img_extents
,
2565 struct rbd_img_fill_ctx
*fctx
)
2567 struct rbd_device
*rbd_dev
= img_req
->rbd_dev
;
2568 struct rbd_obj_request
*obj_req
;
2572 if (fctx
->pos_type
== OBJ_REQUEST_NODATA
||
2573 !rbd_layout_is_fancy(&rbd_dev
->layout
))
2574 return rbd_img_fill_request_nocopy(img_req
, img_extents
,
2575 num_img_extents
, fctx
);
2577 img_req
->data_type
= OBJ_REQUEST_OWN_BVECS
;
2580 * Create object requests and determine ->bvec_count for each object
2581 * request. Note that ->bvec_count sum over all object requests may
2582 * be greater than the number of bio_vecs in the provided bio (list)
2583 * or bio_vec array because when mapped, those bio_vecs can straddle
2584 * stripe unit boundaries.
2586 fctx
->iter
= *fctx
->pos
;
2587 for (i
= 0; i
< num_img_extents
; i
++) {
2588 ret
= ceph_file_to_extents(&rbd_dev
->layout
,
2589 img_extents
[i
].fe_off
,
2590 img_extents
[i
].fe_len
,
2591 &img_req
->object_extents
,
2592 alloc_object_extent
, img_req
,
2593 fctx
->count_fn
, &fctx
->iter
);
2598 for_each_obj_request(img_req
, obj_req
) {
2599 obj_req
->bvec_pos
.bvecs
= kmalloc_array(obj_req
->bvec_count
,
2600 sizeof(*obj_req
->bvec_pos
.bvecs
),
2602 if (!obj_req
->bvec_pos
.bvecs
)
2607 * Fill in each object request's private bio_vec array, splitting and
2608 * rearranging the provided bio_vecs in stripe unit chunks as needed.
2610 fctx
->iter
= *fctx
->pos
;
2611 for (i
= 0; i
< num_img_extents
; i
++) {
2612 ret
= ceph_iterate_extents(&rbd_dev
->layout
,
2613 img_extents
[i
].fe_off
,
2614 img_extents
[i
].fe_len
,
2615 &img_req
->object_extents
,
2616 fctx
->copy_fn
, &fctx
->iter
);
2621 return __rbd_img_fill_request(img_req
);
2624 static int rbd_img_fill_nodata(struct rbd_img_request
*img_req
,
2627 struct ceph_file_extent ex
= { off
, len
};
2628 union rbd_img_fill_iter dummy
= {};
2629 struct rbd_img_fill_ctx fctx
= {
2630 .pos_type
= OBJ_REQUEST_NODATA
,
2634 return rbd_img_fill_request(img_req
, &ex
, 1, &fctx
);
2637 static void set_bio_pos(struct ceph_object_extent
*ex
, u32 bytes
, void *arg
)
2639 struct rbd_obj_request
*obj_req
=
2640 container_of(ex
, struct rbd_obj_request
, ex
);
2641 struct ceph_bio_iter
*it
= arg
;
2643 dout("%s objno %llu bytes %u\n", __func__
, ex
->oe_objno
, bytes
);
2644 obj_req
->bio_pos
= *it
;
2645 ceph_bio_iter_advance(it
, bytes
);
2648 static void count_bio_bvecs(struct ceph_object_extent
*ex
, u32 bytes
, void *arg
)
2650 struct rbd_obj_request
*obj_req
=
2651 container_of(ex
, struct rbd_obj_request
, ex
);
2652 struct ceph_bio_iter
*it
= arg
;
2654 dout("%s objno %llu bytes %u\n", __func__
, ex
->oe_objno
, bytes
);
2655 ceph_bio_iter_advance_step(it
, bytes
, ({
2656 obj_req
->bvec_count
++;
2661 static void copy_bio_bvecs(struct ceph_object_extent
*ex
, u32 bytes
, void *arg
)
2663 struct rbd_obj_request
*obj_req
=
2664 container_of(ex
, struct rbd_obj_request
, ex
);
2665 struct ceph_bio_iter
*it
= arg
;
2667 dout("%s objno %llu bytes %u\n", __func__
, ex
->oe_objno
, bytes
);
2668 ceph_bio_iter_advance_step(it
, bytes
, ({
2669 obj_req
->bvec_pos
.bvecs
[obj_req
->bvec_idx
++] = bv
;
2670 obj_req
->bvec_pos
.iter
.bi_size
+= bv
.bv_len
;
2674 static int __rbd_img_fill_from_bio(struct rbd_img_request
*img_req
,
2675 struct ceph_file_extent
*img_extents
,
2676 u32 num_img_extents
,
2677 struct ceph_bio_iter
*bio_pos
)
2679 struct rbd_img_fill_ctx fctx
= {
2680 .pos_type
= OBJ_REQUEST_BIO
,
2681 .pos
= (union rbd_img_fill_iter
*)bio_pos
,
2682 .set_pos_fn
= set_bio_pos
,
2683 .count_fn
= count_bio_bvecs
,
2684 .copy_fn
= copy_bio_bvecs
,
2687 return rbd_img_fill_request(img_req
, img_extents
, num_img_extents
,
2691 static int rbd_img_fill_from_bio(struct rbd_img_request
*img_req
,
2692 u64 off
, u64 len
, struct bio
*bio
)
2694 struct ceph_file_extent ex
= { off
, len
};
2695 struct ceph_bio_iter it
= { .bio
= bio
, .iter
= bio
->bi_iter
};
2697 return __rbd_img_fill_from_bio(img_req
, &ex
, 1, &it
);
2700 static void set_bvec_pos(struct ceph_object_extent
*ex
, u32 bytes
, void *arg
)
2702 struct rbd_obj_request
*obj_req
=
2703 container_of(ex
, struct rbd_obj_request
, ex
);
2704 struct ceph_bvec_iter
*it
= arg
;
2706 obj_req
->bvec_pos
= *it
;
2707 ceph_bvec_iter_shorten(&obj_req
->bvec_pos
, bytes
);
2708 ceph_bvec_iter_advance(it
, bytes
);
2711 static void count_bvecs(struct ceph_object_extent
*ex
, u32 bytes
, void *arg
)
2713 struct rbd_obj_request
*obj_req
=
2714 container_of(ex
, struct rbd_obj_request
, ex
);
2715 struct ceph_bvec_iter
*it
= arg
;
2717 ceph_bvec_iter_advance_step(it
, bytes
, ({
2718 obj_req
->bvec_count
++;
2722 static void copy_bvecs(struct ceph_object_extent
*ex
, u32 bytes
, void *arg
)
2724 struct rbd_obj_request
*obj_req
=
2725 container_of(ex
, struct rbd_obj_request
, ex
);
2726 struct ceph_bvec_iter
*it
= arg
;
2728 ceph_bvec_iter_advance_step(it
, bytes
, ({
2729 obj_req
->bvec_pos
.bvecs
[obj_req
->bvec_idx
++] = bv
;
2730 obj_req
->bvec_pos
.iter
.bi_size
+= bv
.bv_len
;
2734 static int __rbd_img_fill_from_bvecs(struct rbd_img_request
*img_req
,
2735 struct ceph_file_extent
*img_extents
,
2736 u32 num_img_extents
,
2737 struct ceph_bvec_iter
*bvec_pos
)
2739 struct rbd_img_fill_ctx fctx
= {
2740 .pos_type
= OBJ_REQUEST_BVECS
,
2741 .pos
= (union rbd_img_fill_iter
*)bvec_pos
,
2742 .set_pos_fn
= set_bvec_pos
,
2743 .count_fn
= count_bvecs
,
2744 .copy_fn
= copy_bvecs
,
2747 return rbd_img_fill_request(img_req
, img_extents
, num_img_extents
,
2751 static int rbd_img_fill_from_bvecs(struct rbd_img_request
*img_req
,
2752 struct ceph_file_extent
*img_extents
,
2753 u32 num_img_extents
,
2754 struct bio_vec
*bvecs
)
2756 struct ceph_bvec_iter it
= {
2758 .iter
= { .bi_size
= ceph_file_extents_bytes(img_extents
,
2762 return __rbd_img_fill_from_bvecs(img_req
, img_extents
, num_img_extents
,
2766 static void rbd_img_handle_request_work(struct work_struct
*work
)
2768 struct rbd_img_request
*img_req
=
2769 container_of(work
, struct rbd_img_request
, work
);
2771 rbd_img_handle_request(img_req
, img_req
->work_result
);
2774 static void rbd_img_schedule(struct rbd_img_request
*img_req
, int result
)
2776 INIT_WORK(&img_req
->work
, rbd_img_handle_request_work
);
2777 img_req
->work_result
= result
;
2778 queue_work(rbd_wq
, &img_req
->work
);
2781 static bool rbd_obj_may_exist(struct rbd_obj_request
*obj_req
)
2783 struct rbd_device
*rbd_dev
= obj_req
->img_request
->rbd_dev
;
2785 if (rbd_object_map_may_exist(rbd_dev
, obj_req
->ex
.oe_objno
)) {
2786 obj_req
->flags
|= RBD_OBJ_FLAG_MAY_EXIST
;
2790 dout("%s %p objno %llu assuming dne\n", __func__
, obj_req
,
2791 obj_req
->ex
.oe_objno
);
2795 static int rbd_obj_read_object(struct rbd_obj_request
*obj_req
)
2797 struct ceph_osd_request
*osd_req
;
2800 osd_req
= __rbd_obj_add_osd_request(obj_req
, NULL
, 1);
2801 if (IS_ERR(osd_req
))
2802 return PTR_ERR(osd_req
);
2804 osd_req_op_extent_init(osd_req
, 0, CEPH_OSD_OP_READ
,
2805 obj_req
->ex
.oe_off
, obj_req
->ex
.oe_len
, 0, 0);
2806 rbd_osd_setup_data(osd_req
, 0);
2807 rbd_osd_format_read(osd_req
);
2809 ret
= ceph_osdc_alloc_messages(osd_req
, GFP_NOIO
);
2813 rbd_osd_submit(osd_req
);
2817 static int rbd_obj_read_from_parent(struct rbd_obj_request
*obj_req
)
2819 struct rbd_img_request
*img_req
= obj_req
->img_request
;
2820 struct rbd_device
*parent
= img_req
->rbd_dev
->parent
;
2821 struct rbd_img_request
*child_img_req
;
2824 child_img_req
= kmem_cache_alloc(rbd_img_request_cache
, GFP_NOIO
);
2828 rbd_img_request_init(child_img_req
, parent
, OBJ_OP_READ
);
2829 __set_bit(IMG_REQ_CHILD
, &child_img_req
->flags
);
2830 child_img_req
->obj_request
= obj_req
;
2832 down_read(&parent
->header_rwsem
);
2833 rbd_img_capture_header(child_img_req
);
2834 up_read(&parent
->header_rwsem
);
2836 dout("%s child_img_req %p for obj_req %p\n", __func__
, child_img_req
,
2839 if (!rbd_img_is_write(img_req
)) {
2840 switch (img_req
->data_type
) {
2841 case OBJ_REQUEST_BIO
:
2842 ret
= __rbd_img_fill_from_bio(child_img_req
,
2843 obj_req
->img_extents
,
2844 obj_req
->num_img_extents
,
2847 case OBJ_REQUEST_BVECS
:
2848 case OBJ_REQUEST_OWN_BVECS
:
2849 ret
= __rbd_img_fill_from_bvecs(child_img_req
,
2850 obj_req
->img_extents
,
2851 obj_req
->num_img_extents
,
2852 &obj_req
->bvec_pos
);
2858 ret
= rbd_img_fill_from_bvecs(child_img_req
,
2859 obj_req
->img_extents
,
2860 obj_req
->num_img_extents
,
2861 obj_req
->copyup_bvecs
);
2864 rbd_img_request_destroy(child_img_req
);
2868 /* avoid parent chain recursion */
2869 rbd_img_schedule(child_img_req
, 0);
2873 static bool rbd_obj_advance_read(struct rbd_obj_request
*obj_req
, int *result
)
2875 struct rbd_device
*rbd_dev
= obj_req
->img_request
->rbd_dev
;
2879 switch (obj_req
->read_state
) {
2880 case RBD_OBJ_READ_START
:
2881 rbd_assert(!*result
);
2883 if (!rbd_obj_may_exist(obj_req
)) {
2885 obj_req
->read_state
= RBD_OBJ_READ_OBJECT
;
2889 ret
= rbd_obj_read_object(obj_req
);
2894 obj_req
->read_state
= RBD_OBJ_READ_OBJECT
;
2896 case RBD_OBJ_READ_OBJECT
:
2897 if (*result
== -ENOENT
&& rbd_dev
->parent_overlap
) {
2898 /* reverse map this object extent onto the parent */
2899 ret
= rbd_obj_calc_img_extents(obj_req
, false);
2904 if (obj_req
->num_img_extents
) {
2905 ret
= rbd_obj_read_from_parent(obj_req
);
2910 obj_req
->read_state
= RBD_OBJ_READ_PARENT
;
2916 * -ENOENT means a hole in the image -- zero-fill the entire
2917 * length of the request. A short read also implies zero-fill
2918 * to the end of the request.
2920 if (*result
== -ENOENT
) {
2921 rbd_obj_zero_range(obj_req
, 0, obj_req
->ex
.oe_len
);
2923 } else if (*result
>= 0) {
2924 if (*result
< obj_req
->ex
.oe_len
)
2925 rbd_obj_zero_range(obj_req
, *result
,
2926 obj_req
->ex
.oe_len
- *result
);
2928 rbd_assert(*result
== obj_req
->ex
.oe_len
);
2932 case RBD_OBJ_READ_PARENT
:
2934 * The parent image is read only up to the overlap -- zero-fill
2935 * from the overlap to the end of the request.
2938 u32 obj_overlap
= rbd_obj_img_extents_bytes(obj_req
);
2940 if (obj_overlap
< obj_req
->ex
.oe_len
)
2941 rbd_obj_zero_range(obj_req
, obj_overlap
,
2942 obj_req
->ex
.oe_len
- obj_overlap
);
2950 static bool rbd_obj_write_is_noop(struct rbd_obj_request
*obj_req
)
2952 struct rbd_device
*rbd_dev
= obj_req
->img_request
->rbd_dev
;
2954 if (rbd_object_map_may_exist(rbd_dev
, obj_req
->ex
.oe_objno
))
2955 obj_req
->flags
|= RBD_OBJ_FLAG_MAY_EXIST
;
2957 if (!(obj_req
->flags
& RBD_OBJ_FLAG_MAY_EXIST
) &&
2958 (obj_req
->flags
& RBD_OBJ_FLAG_NOOP_FOR_NONEXISTENT
)) {
2959 dout("%s %p noop for nonexistent\n", __func__
, obj_req
);
2968 * 0 - object map update sent
2969 * 1 - object map update isn't needed
2972 static int rbd_obj_write_pre_object_map(struct rbd_obj_request
*obj_req
)
2974 struct rbd_device
*rbd_dev
= obj_req
->img_request
->rbd_dev
;
2977 if (!(rbd_dev
->header
.features
& RBD_FEATURE_OBJECT_MAP
))
2980 if (obj_req
->flags
& RBD_OBJ_FLAG_DELETION
)
2981 new_state
= OBJECT_PENDING
;
2983 new_state
= OBJECT_EXISTS
;
2985 return rbd_object_map_update(obj_req
, CEPH_NOSNAP
, new_state
, NULL
);
2988 static int rbd_obj_write_object(struct rbd_obj_request
*obj_req
)
2990 struct ceph_osd_request
*osd_req
;
2991 int num_ops
= count_write_ops(obj_req
);
2995 if (obj_req
->flags
& RBD_OBJ_FLAG_COPYUP_ENABLED
)
2996 num_ops
++; /* stat */
2998 osd_req
= rbd_obj_add_osd_request(obj_req
, num_ops
);
2999 if (IS_ERR(osd_req
))
3000 return PTR_ERR(osd_req
);
3002 if (obj_req
->flags
& RBD_OBJ_FLAG_COPYUP_ENABLED
) {
3003 ret
= rbd_osd_setup_stat(osd_req
, which
++);
3008 rbd_osd_setup_write_ops(osd_req
, which
);
3009 rbd_osd_format_write(osd_req
);
3011 ret
= ceph_osdc_alloc_messages(osd_req
, GFP_NOIO
);
3015 rbd_osd_submit(osd_req
);
3020 * copyup_bvecs pages are never highmem pages
3022 static bool is_zero_bvecs(struct bio_vec
*bvecs
, u32 bytes
)
3024 struct ceph_bvec_iter it
= {
3026 .iter
= { .bi_size
= bytes
},
3029 ceph_bvec_iter_advance_step(&it
, bytes
, ({
3030 if (memchr_inv(page_address(bv
.bv_page
) + bv
.bv_offset
, 0,
3037 #define MODS_ONLY U32_MAX
3039 static int rbd_obj_copyup_empty_snapc(struct rbd_obj_request
*obj_req
,
3042 struct ceph_osd_request
*osd_req
;
3045 dout("%s obj_req %p bytes %u\n", __func__
, obj_req
, bytes
);
3046 rbd_assert(bytes
> 0 && bytes
!= MODS_ONLY
);
3048 osd_req
= __rbd_obj_add_osd_request(obj_req
, &rbd_empty_snapc
, 1);
3049 if (IS_ERR(osd_req
))
3050 return PTR_ERR(osd_req
);
3052 ret
= rbd_osd_setup_copyup(osd_req
, 0, bytes
);
3056 rbd_osd_format_write(osd_req
);
3058 ret
= ceph_osdc_alloc_messages(osd_req
, GFP_NOIO
);
3062 rbd_osd_submit(osd_req
);
3066 static int rbd_obj_copyup_current_snapc(struct rbd_obj_request
*obj_req
,
3069 struct ceph_osd_request
*osd_req
;
3070 int num_ops
= count_write_ops(obj_req
);
3074 dout("%s obj_req %p bytes %u\n", __func__
, obj_req
, bytes
);
3076 if (bytes
!= MODS_ONLY
)
3077 num_ops
++; /* copyup */
3079 osd_req
= rbd_obj_add_osd_request(obj_req
, num_ops
);
3080 if (IS_ERR(osd_req
))
3081 return PTR_ERR(osd_req
);
3083 if (bytes
!= MODS_ONLY
) {
3084 ret
= rbd_osd_setup_copyup(osd_req
, which
++, bytes
);
3089 rbd_osd_setup_write_ops(osd_req
, which
);
3090 rbd_osd_format_write(osd_req
);
3092 ret
= ceph_osdc_alloc_messages(osd_req
, GFP_NOIO
);
3096 rbd_osd_submit(osd_req
);
3100 static int setup_copyup_bvecs(struct rbd_obj_request
*obj_req
, u64 obj_overlap
)
3104 rbd_assert(!obj_req
->copyup_bvecs
);
3105 obj_req
->copyup_bvec_count
= calc_pages_for(0, obj_overlap
);
3106 obj_req
->copyup_bvecs
= kcalloc(obj_req
->copyup_bvec_count
,
3107 sizeof(*obj_req
->copyup_bvecs
),
3109 if (!obj_req
->copyup_bvecs
)
3112 for (i
= 0; i
< obj_req
->copyup_bvec_count
; i
++) {
3113 unsigned int len
= min(obj_overlap
, (u64
)PAGE_SIZE
);
3115 obj_req
->copyup_bvecs
[i
].bv_page
= alloc_page(GFP_NOIO
);
3116 if (!obj_req
->copyup_bvecs
[i
].bv_page
)
3119 obj_req
->copyup_bvecs
[i
].bv_offset
= 0;
3120 obj_req
->copyup_bvecs
[i
].bv_len
= len
;
3124 rbd_assert(!obj_overlap
);
3129 * The target object doesn't exist. Read the data for the entire
3130 * target object up to the overlap point (if any) from the parent,
3131 * so we can use it for a copyup.
3133 static int rbd_obj_copyup_read_parent(struct rbd_obj_request
*obj_req
)
3135 struct rbd_device
*rbd_dev
= obj_req
->img_request
->rbd_dev
;
3138 rbd_assert(obj_req
->num_img_extents
);
3139 prune_extents(obj_req
->img_extents
, &obj_req
->num_img_extents
,
3140 rbd_dev
->parent_overlap
);
3141 if (!obj_req
->num_img_extents
) {
3143 * The overlap has become 0 (most likely because the
3144 * image has been flattened). Re-submit the original write
3145 * request -- pass MODS_ONLY since the copyup isn't needed
3148 return rbd_obj_copyup_current_snapc(obj_req
, MODS_ONLY
);
3151 ret
= setup_copyup_bvecs(obj_req
, rbd_obj_img_extents_bytes(obj_req
));
3155 return rbd_obj_read_from_parent(obj_req
);
3158 static void rbd_obj_copyup_object_maps(struct rbd_obj_request
*obj_req
)
3160 struct rbd_device
*rbd_dev
= obj_req
->img_request
->rbd_dev
;
3161 struct ceph_snap_context
*snapc
= obj_req
->img_request
->snapc
;
3166 rbd_assert(!obj_req
->pending
.result
&& !obj_req
->pending
.num_pending
);
3168 if (!(rbd_dev
->header
.features
& RBD_FEATURE_OBJECT_MAP
))
3171 if (obj_req
->flags
& RBD_OBJ_FLAG_COPYUP_ZEROS
)
3174 for (i
= 0; i
< snapc
->num_snaps
; i
++) {
3175 if ((rbd_dev
->header
.features
& RBD_FEATURE_FAST_DIFF
) &&
3176 i
+ 1 < snapc
->num_snaps
)
3177 new_state
= OBJECT_EXISTS_CLEAN
;
3179 new_state
= OBJECT_EXISTS
;
3181 ret
= rbd_object_map_update(obj_req
, snapc
->snaps
[i
],
3184 obj_req
->pending
.result
= ret
;
3189 obj_req
->pending
.num_pending
++;
3193 static void rbd_obj_copyup_write_object(struct rbd_obj_request
*obj_req
)
3195 u32 bytes
= rbd_obj_img_extents_bytes(obj_req
);
3198 rbd_assert(!obj_req
->pending
.result
&& !obj_req
->pending
.num_pending
);
3201 * Only send non-zero copyup data to save some I/O and network
3202 * bandwidth -- zero copyup data is equivalent to the object not
3205 if (obj_req
->flags
& RBD_OBJ_FLAG_COPYUP_ZEROS
)
3208 if (obj_req
->img_request
->snapc
->num_snaps
&& bytes
> 0) {
3210 * Send a copyup request with an empty snapshot context to
3211 * deep-copyup the object through all existing snapshots.
3212 * A second request with the current snapshot context will be
3213 * sent for the actual modification.
3215 ret
= rbd_obj_copyup_empty_snapc(obj_req
, bytes
);
3217 obj_req
->pending
.result
= ret
;
3221 obj_req
->pending
.num_pending
++;
3225 ret
= rbd_obj_copyup_current_snapc(obj_req
, bytes
);
3227 obj_req
->pending
.result
= ret
;
3231 obj_req
->pending
.num_pending
++;
3234 static bool rbd_obj_advance_copyup(struct rbd_obj_request
*obj_req
, int *result
)
3236 struct rbd_device
*rbd_dev
= obj_req
->img_request
->rbd_dev
;
3240 switch (obj_req
->copyup_state
) {
3241 case RBD_OBJ_COPYUP_START
:
3242 rbd_assert(!*result
);
3244 ret
= rbd_obj_copyup_read_parent(obj_req
);
3249 if (obj_req
->num_img_extents
)
3250 obj_req
->copyup_state
= RBD_OBJ_COPYUP_READ_PARENT
;
3252 obj_req
->copyup_state
= RBD_OBJ_COPYUP_WRITE_OBJECT
;
3254 case RBD_OBJ_COPYUP_READ_PARENT
:
3258 if (is_zero_bvecs(obj_req
->copyup_bvecs
,
3259 rbd_obj_img_extents_bytes(obj_req
))) {
3260 dout("%s %p detected zeros\n", __func__
, obj_req
);
3261 obj_req
->flags
|= RBD_OBJ_FLAG_COPYUP_ZEROS
;
3264 rbd_obj_copyup_object_maps(obj_req
);
3265 if (!obj_req
->pending
.num_pending
) {
3266 *result
= obj_req
->pending
.result
;
3267 obj_req
->copyup_state
= RBD_OBJ_COPYUP_OBJECT_MAPS
;
3270 obj_req
->copyup_state
= __RBD_OBJ_COPYUP_OBJECT_MAPS
;
3272 case __RBD_OBJ_COPYUP_OBJECT_MAPS
:
3273 if (!pending_result_dec(&obj_req
->pending
, result
))
3276 case RBD_OBJ_COPYUP_OBJECT_MAPS
:
3278 rbd_warn(rbd_dev
, "snap object map update failed: %d",
3283 rbd_obj_copyup_write_object(obj_req
);
3284 if (!obj_req
->pending
.num_pending
) {
3285 *result
= obj_req
->pending
.result
;
3286 obj_req
->copyup_state
= RBD_OBJ_COPYUP_WRITE_OBJECT
;
3289 obj_req
->copyup_state
= __RBD_OBJ_COPYUP_WRITE_OBJECT
;
3291 case __RBD_OBJ_COPYUP_WRITE_OBJECT
:
3292 if (!pending_result_dec(&obj_req
->pending
, result
))
3295 case RBD_OBJ_COPYUP_WRITE_OBJECT
:
3304 * 0 - object map update sent
3305 * 1 - object map update isn't needed
3308 static int rbd_obj_write_post_object_map(struct rbd_obj_request
*obj_req
)
3310 struct rbd_device
*rbd_dev
= obj_req
->img_request
->rbd_dev
;
3311 u8 current_state
= OBJECT_PENDING
;
3313 if (!(rbd_dev
->header
.features
& RBD_FEATURE_OBJECT_MAP
))
3316 if (!(obj_req
->flags
& RBD_OBJ_FLAG_DELETION
))
3319 return rbd_object_map_update(obj_req
, CEPH_NOSNAP
, OBJECT_NONEXISTENT
,
3323 static bool rbd_obj_advance_write(struct rbd_obj_request
*obj_req
, int *result
)
3325 struct rbd_device
*rbd_dev
= obj_req
->img_request
->rbd_dev
;
3329 switch (obj_req
->write_state
) {
3330 case RBD_OBJ_WRITE_START
:
3331 rbd_assert(!*result
);
3333 if (rbd_obj_write_is_noop(obj_req
))
3336 ret
= rbd_obj_write_pre_object_map(obj_req
);
3341 obj_req
->write_state
= RBD_OBJ_WRITE_PRE_OBJECT_MAP
;
3345 case RBD_OBJ_WRITE_PRE_OBJECT_MAP
:
3347 rbd_warn(rbd_dev
, "pre object map update failed: %d",
3351 ret
= rbd_obj_write_object(obj_req
);
3356 obj_req
->write_state
= RBD_OBJ_WRITE_OBJECT
;
3358 case RBD_OBJ_WRITE_OBJECT
:
3359 if (*result
== -ENOENT
) {
3360 if (obj_req
->flags
& RBD_OBJ_FLAG_COPYUP_ENABLED
) {
3362 obj_req
->copyup_state
= RBD_OBJ_COPYUP_START
;
3363 obj_req
->write_state
= __RBD_OBJ_WRITE_COPYUP
;
3367 * On a non-existent object:
3368 * delete - -ENOENT, truncate/zero - 0
3370 if (obj_req
->flags
& RBD_OBJ_FLAG_DELETION
)
3376 obj_req
->write_state
= RBD_OBJ_WRITE_COPYUP
;
3378 case __RBD_OBJ_WRITE_COPYUP
:
3379 if (!rbd_obj_advance_copyup(obj_req
, result
))
3382 case RBD_OBJ_WRITE_COPYUP
:
3384 rbd_warn(rbd_dev
, "copyup failed: %d", *result
);
3387 ret
= rbd_obj_write_post_object_map(obj_req
);
3392 obj_req
->write_state
= RBD_OBJ_WRITE_POST_OBJECT_MAP
;
3396 case RBD_OBJ_WRITE_POST_OBJECT_MAP
:
3398 rbd_warn(rbd_dev
, "post object map update failed: %d",
3407 * Return true if @obj_req is completed.
3409 static bool __rbd_obj_handle_request(struct rbd_obj_request
*obj_req
,
3412 struct rbd_img_request
*img_req
= obj_req
->img_request
;
3413 struct rbd_device
*rbd_dev
= img_req
->rbd_dev
;
3416 mutex_lock(&obj_req
->state_mutex
);
3417 if (!rbd_img_is_write(img_req
))
3418 done
= rbd_obj_advance_read(obj_req
, result
);
3420 done
= rbd_obj_advance_write(obj_req
, result
);
3421 mutex_unlock(&obj_req
->state_mutex
);
3423 if (done
&& *result
) {
3424 rbd_assert(*result
< 0);
3425 rbd_warn(rbd_dev
, "%s at objno %llu %llu~%llu result %d",
3426 obj_op_name(img_req
->op_type
), obj_req
->ex
.oe_objno
,
3427 obj_req
->ex
.oe_off
, obj_req
->ex
.oe_len
, *result
);
3433 * This is open-coded in rbd_img_handle_request() to avoid parent chain
3436 static void rbd_obj_handle_request(struct rbd_obj_request
*obj_req
, int result
)
3438 if (__rbd_obj_handle_request(obj_req
, &result
))
3439 rbd_img_handle_request(obj_req
->img_request
, result
);
3442 static bool need_exclusive_lock(struct rbd_img_request
*img_req
)
3444 struct rbd_device
*rbd_dev
= img_req
->rbd_dev
;
3446 if (!(rbd_dev
->header
.features
& RBD_FEATURE_EXCLUSIVE_LOCK
))
3449 if (rbd_is_ro(rbd_dev
))
3452 rbd_assert(!test_bit(IMG_REQ_CHILD
, &img_req
->flags
));
3453 if (rbd_dev
->opts
->lock_on_read
||
3454 (rbd_dev
->header
.features
& RBD_FEATURE_OBJECT_MAP
))
3457 return rbd_img_is_write(img_req
);
3460 static bool rbd_lock_add_request(struct rbd_img_request
*img_req
)
3462 struct rbd_device
*rbd_dev
= img_req
->rbd_dev
;
3465 lockdep_assert_held(&rbd_dev
->lock_rwsem
);
3466 locked
= rbd_dev
->lock_state
== RBD_LOCK_STATE_LOCKED
;
3467 spin_lock(&rbd_dev
->lock_lists_lock
);
3468 rbd_assert(list_empty(&img_req
->lock_item
));
3470 list_add_tail(&img_req
->lock_item
, &rbd_dev
->acquiring_list
);
3472 list_add_tail(&img_req
->lock_item
, &rbd_dev
->running_list
);
3473 spin_unlock(&rbd_dev
->lock_lists_lock
);
3477 static void rbd_lock_del_request(struct rbd_img_request
*img_req
)
3479 struct rbd_device
*rbd_dev
= img_req
->rbd_dev
;
3482 lockdep_assert_held(&rbd_dev
->lock_rwsem
);
3483 spin_lock(&rbd_dev
->lock_lists_lock
);
3484 rbd_assert(!list_empty(&img_req
->lock_item
));
3485 list_del_init(&img_req
->lock_item
);
3486 need_wakeup
= (rbd_dev
->lock_state
== RBD_LOCK_STATE_RELEASING
&&
3487 list_empty(&rbd_dev
->running_list
));
3488 spin_unlock(&rbd_dev
->lock_lists_lock
);
3490 complete(&rbd_dev
->releasing_wait
);
3493 static int rbd_img_exclusive_lock(struct rbd_img_request
*img_req
)
3495 struct rbd_device
*rbd_dev
= img_req
->rbd_dev
;
3497 if (!need_exclusive_lock(img_req
))
3500 if (rbd_lock_add_request(img_req
))
3503 if (rbd_dev
->opts
->exclusive
) {
3504 WARN_ON(1); /* lock got released? */
3509 * Note the use of mod_delayed_work() in rbd_acquire_lock()
3510 * and cancel_delayed_work() in wake_lock_waiters().
3512 dout("%s rbd_dev %p queueing lock_dwork\n", __func__
, rbd_dev
);
3513 queue_delayed_work(rbd_dev
->task_wq
, &rbd_dev
->lock_dwork
, 0);
3517 static void rbd_img_object_requests(struct rbd_img_request
*img_req
)
3519 struct rbd_obj_request
*obj_req
;
3521 rbd_assert(!img_req
->pending
.result
&& !img_req
->pending
.num_pending
);
3523 for_each_obj_request(img_req
, obj_req
) {
3526 if (__rbd_obj_handle_request(obj_req
, &result
)) {
3528 img_req
->pending
.result
= result
;
3532 img_req
->pending
.num_pending
++;
3537 static bool rbd_img_advance(struct rbd_img_request
*img_req
, int *result
)
3539 struct rbd_device
*rbd_dev
= img_req
->rbd_dev
;
3543 switch (img_req
->state
) {
3545 rbd_assert(!*result
);
3547 ret
= rbd_img_exclusive_lock(img_req
);
3552 img_req
->state
= RBD_IMG_EXCLUSIVE_LOCK
;
3556 case RBD_IMG_EXCLUSIVE_LOCK
:
3560 rbd_assert(!need_exclusive_lock(img_req
) ||
3561 __rbd_is_lock_owner(rbd_dev
));
3563 rbd_img_object_requests(img_req
);
3564 if (!img_req
->pending
.num_pending
) {
3565 *result
= img_req
->pending
.result
;
3566 img_req
->state
= RBD_IMG_OBJECT_REQUESTS
;
3569 img_req
->state
= __RBD_IMG_OBJECT_REQUESTS
;
3571 case __RBD_IMG_OBJECT_REQUESTS
:
3572 if (!pending_result_dec(&img_req
->pending
, result
))
3575 case RBD_IMG_OBJECT_REQUESTS
:
3583 * Return true if @img_req is completed.
3585 static bool __rbd_img_handle_request(struct rbd_img_request
*img_req
,
3588 struct rbd_device
*rbd_dev
= img_req
->rbd_dev
;
3591 if (need_exclusive_lock(img_req
)) {
3592 down_read(&rbd_dev
->lock_rwsem
);
3593 mutex_lock(&img_req
->state_mutex
);
3594 done
= rbd_img_advance(img_req
, result
);
3596 rbd_lock_del_request(img_req
);
3597 mutex_unlock(&img_req
->state_mutex
);
3598 up_read(&rbd_dev
->lock_rwsem
);
3600 mutex_lock(&img_req
->state_mutex
);
3601 done
= rbd_img_advance(img_req
, result
);
3602 mutex_unlock(&img_req
->state_mutex
);
3605 if (done
&& *result
) {
3606 rbd_assert(*result
< 0);
3607 rbd_warn(rbd_dev
, "%s%s result %d",
3608 test_bit(IMG_REQ_CHILD
, &img_req
->flags
) ? "child " : "",
3609 obj_op_name(img_req
->op_type
), *result
);
3614 static void rbd_img_handle_request(struct rbd_img_request
*img_req
, int result
)
3617 if (!__rbd_img_handle_request(img_req
, &result
))
3620 if (test_bit(IMG_REQ_CHILD
, &img_req
->flags
)) {
3621 struct rbd_obj_request
*obj_req
= img_req
->obj_request
;
3623 rbd_img_request_destroy(img_req
);
3624 if (__rbd_obj_handle_request(obj_req
, &result
)) {
3625 img_req
= obj_req
->img_request
;
3629 struct request
*rq
= blk_mq_rq_from_pdu(img_req
);
3631 rbd_img_request_destroy(img_req
);
3632 blk_mq_end_request(rq
, errno_to_blk_status(result
));
3636 static const struct rbd_client_id rbd_empty_cid
;
3638 static bool rbd_cid_equal(const struct rbd_client_id
*lhs
,
3639 const struct rbd_client_id
*rhs
)
3641 return lhs
->gid
== rhs
->gid
&& lhs
->handle
== rhs
->handle
;
3644 static struct rbd_client_id
rbd_get_cid(struct rbd_device
*rbd_dev
)
3646 struct rbd_client_id cid
;
3648 mutex_lock(&rbd_dev
->watch_mutex
);
3649 cid
.gid
= ceph_client_gid(rbd_dev
->rbd_client
->client
);
3650 cid
.handle
= rbd_dev
->watch_cookie
;
3651 mutex_unlock(&rbd_dev
->watch_mutex
);
3656 * lock_rwsem must be held for write
3658 static void rbd_set_owner_cid(struct rbd_device
*rbd_dev
,
3659 const struct rbd_client_id
*cid
)
3661 dout("%s rbd_dev %p %llu-%llu -> %llu-%llu\n", __func__
, rbd_dev
,
3662 rbd_dev
->owner_cid
.gid
, rbd_dev
->owner_cid
.handle
,
3663 cid
->gid
, cid
->handle
);
3664 rbd_dev
->owner_cid
= *cid
; /* struct */
3667 static void format_lock_cookie(struct rbd_device
*rbd_dev
, char *buf
)
3669 mutex_lock(&rbd_dev
->watch_mutex
);
3670 sprintf(buf
, "%s %llu", RBD_LOCK_COOKIE_PREFIX
, rbd_dev
->watch_cookie
);
3671 mutex_unlock(&rbd_dev
->watch_mutex
);
3674 static void __rbd_lock(struct rbd_device
*rbd_dev
, const char *cookie
)
3676 struct rbd_client_id cid
= rbd_get_cid(rbd_dev
);
3678 rbd_dev
->lock_state
= RBD_LOCK_STATE_LOCKED
;
3679 strcpy(rbd_dev
->lock_cookie
, cookie
);
3680 rbd_set_owner_cid(rbd_dev
, &cid
);
3681 queue_work(rbd_dev
->task_wq
, &rbd_dev
->acquired_lock_work
);
3685 * lock_rwsem must be held for write
3687 static int rbd_lock(struct rbd_device
*rbd_dev
)
3689 struct ceph_osd_client
*osdc
= &rbd_dev
->rbd_client
->client
->osdc
;
3693 WARN_ON(__rbd_is_lock_owner(rbd_dev
) ||
3694 rbd_dev
->lock_cookie
[0] != '\0');
3696 format_lock_cookie(rbd_dev
, cookie
);
3697 ret
= ceph_cls_lock(osdc
, &rbd_dev
->header_oid
, &rbd_dev
->header_oloc
,
3698 RBD_LOCK_NAME
, CEPH_CLS_LOCK_EXCLUSIVE
, cookie
,
3699 RBD_LOCK_TAG
, "", 0);
3703 __rbd_lock(rbd_dev
, cookie
);
3708 * lock_rwsem must be held for write
3710 static void rbd_unlock(struct rbd_device
*rbd_dev
)
3712 struct ceph_osd_client
*osdc
= &rbd_dev
->rbd_client
->client
->osdc
;
3715 WARN_ON(!__rbd_is_lock_owner(rbd_dev
) ||
3716 rbd_dev
->lock_cookie
[0] == '\0');
3718 ret
= ceph_cls_unlock(osdc
, &rbd_dev
->header_oid
, &rbd_dev
->header_oloc
,
3719 RBD_LOCK_NAME
, rbd_dev
->lock_cookie
);
3720 if (ret
&& ret
!= -ENOENT
)
3721 rbd_warn(rbd_dev
, "failed to unlock header: %d", ret
);
3723 /* treat errors as the image is unlocked */
3724 rbd_dev
->lock_state
= RBD_LOCK_STATE_UNLOCKED
;
3725 rbd_dev
->lock_cookie
[0] = '\0';
3726 rbd_set_owner_cid(rbd_dev
, &rbd_empty_cid
);
3727 queue_work(rbd_dev
->task_wq
, &rbd_dev
->released_lock_work
);
3730 static int __rbd_notify_op_lock(struct rbd_device
*rbd_dev
,
3731 enum rbd_notify_op notify_op
,
3732 struct page
***preply_pages
,
3735 struct ceph_osd_client
*osdc
= &rbd_dev
->rbd_client
->client
->osdc
;
3736 struct rbd_client_id cid
= rbd_get_cid(rbd_dev
);
3737 char buf
[4 + 8 + 8 + CEPH_ENCODING_START_BLK_LEN
];
3738 int buf_size
= sizeof(buf
);
3741 dout("%s rbd_dev %p notify_op %d\n", __func__
, rbd_dev
, notify_op
);
3743 /* encode *LockPayload NotifyMessage (op + ClientId) */
3744 ceph_start_encoding(&p
, 2, 1, buf_size
- CEPH_ENCODING_START_BLK_LEN
);
3745 ceph_encode_32(&p
, notify_op
);
3746 ceph_encode_64(&p
, cid
.gid
);
3747 ceph_encode_64(&p
, cid
.handle
);
3749 return ceph_osdc_notify(osdc
, &rbd_dev
->header_oid
,
3750 &rbd_dev
->header_oloc
, buf
, buf_size
,
3751 RBD_NOTIFY_TIMEOUT
, preply_pages
, preply_len
);
3754 static void rbd_notify_op_lock(struct rbd_device
*rbd_dev
,
3755 enum rbd_notify_op notify_op
)
3757 struct page
**reply_pages
;
3760 __rbd_notify_op_lock(rbd_dev
, notify_op
, &reply_pages
, &reply_len
);
3761 ceph_release_page_vector(reply_pages
, calc_pages_for(0, reply_len
));
3764 static void rbd_notify_acquired_lock(struct work_struct
*work
)
3766 struct rbd_device
*rbd_dev
= container_of(work
, struct rbd_device
,
3767 acquired_lock_work
);
3769 rbd_notify_op_lock(rbd_dev
, RBD_NOTIFY_OP_ACQUIRED_LOCK
);
3772 static void rbd_notify_released_lock(struct work_struct
*work
)
3774 struct rbd_device
*rbd_dev
= container_of(work
, struct rbd_device
,
3775 released_lock_work
);
3777 rbd_notify_op_lock(rbd_dev
, RBD_NOTIFY_OP_RELEASED_LOCK
);
3780 static int rbd_request_lock(struct rbd_device
*rbd_dev
)
3782 struct page
**reply_pages
;
3784 bool lock_owner_responded
= false;
3787 dout("%s rbd_dev %p\n", __func__
, rbd_dev
);
3789 ret
= __rbd_notify_op_lock(rbd_dev
, RBD_NOTIFY_OP_REQUEST_LOCK
,
3790 &reply_pages
, &reply_len
);
3791 if (ret
&& ret
!= -ETIMEDOUT
) {
3792 rbd_warn(rbd_dev
, "failed to request lock: %d", ret
);
3796 if (reply_len
> 0 && reply_len
<= PAGE_SIZE
) {
3797 void *p
= page_address(reply_pages
[0]);
3798 void *const end
= p
+ reply_len
;
3801 ceph_decode_32_safe(&p
, end
, n
, e_inval
); /* num_acks */
3806 ceph_decode_need(&p
, end
, 8 + 8, e_inval
);
3807 p
+= 8 + 8; /* skip gid and cookie */
3809 ceph_decode_32_safe(&p
, end
, len
, e_inval
);
3813 if (lock_owner_responded
) {
3815 "duplicate lock owners detected");
3820 lock_owner_responded
= true;
3821 ret
= ceph_start_decoding(&p
, end
, 1, "ResponseMessage",
3825 "failed to decode ResponseMessage: %d",
3830 ret
= ceph_decode_32(&p
);
3834 if (!lock_owner_responded
) {
3835 rbd_warn(rbd_dev
, "no lock owners detected");
3840 ceph_release_page_vector(reply_pages
, calc_pages_for(0, reply_len
));
3849 * Either image request state machine(s) or rbd_add_acquire_lock()
3852 static void wake_lock_waiters(struct rbd_device
*rbd_dev
, int result
)
3854 struct rbd_img_request
*img_req
;
3856 dout("%s rbd_dev %p result %d\n", __func__
, rbd_dev
, result
);
3857 lockdep_assert_held_write(&rbd_dev
->lock_rwsem
);
3859 cancel_delayed_work(&rbd_dev
->lock_dwork
);
3860 if (!completion_done(&rbd_dev
->acquire_wait
)) {
3861 rbd_assert(list_empty(&rbd_dev
->acquiring_list
) &&
3862 list_empty(&rbd_dev
->running_list
));
3863 rbd_dev
->acquire_err
= result
;
3864 complete_all(&rbd_dev
->acquire_wait
);
3868 list_for_each_entry(img_req
, &rbd_dev
->acquiring_list
, lock_item
) {
3869 mutex_lock(&img_req
->state_mutex
);
3870 rbd_assert(img_req
->state
== RBD_IMG_EXCLUSIVE_LOCK
);
3871 rbd_img_schedule(img_req
, result
);
3872 mutex_unlock(&img_req
->state_mutex
);
3875 list_splice_tail_init(&rbd_dev
->acquiring_list
, &rbd_dev
->running_list
);
3878 static int get_lock_owner_info(struct rbd_device
*rbd_dev
,
3879 struct ceph_locker
**lockers
, u32
*num_lockers
)
3881 struct ceph_osd_client
*osdc
= &rbd_dev
->rbd_client
->client
->osdc
;
3886 dout("%s rbd_dev %p\n", __func__
, rbd_dev
);
3888 ret
= ceph_cls_lock_info(osdc
, &rbd_dev
->header_oid
,
3889 &rbd_dev
->header_oloc
, RBD_LOCK_NAME
,
3890 &lock_type
, &lock_tag
, lockers
, num_lockers
);
3894 if (*num_lockers
== 0) {
3895 dout("%s rbd_dev %p no lockers detected\n", __func__
, rbd_dev
);
3899 if (strcmp(lock_tag
, RBD_LOCK_TAG
)) {
3900 rbd_warn(rbd_dev
, "locked by external mechanism, tag %s",
3906 if (lock_type
== CEPH_CLS_LOCK_SHARED
) {
3907 rbd_warn(rbd_dev
, "shared lock type detected");
3912 if (strncmp((*lockers
)[0].id
.cookie
, RBD_LOCK_COOKIE_PREFIX
,
3913 strlen(RBD_LOCK_COOKIE_PREFIX
))) {
3914 rbd_warn(rbd_dev
, "locked by external mechanism, cookie %s",
3915 (*lockers
)[0].id
.cookie
);
3925 static int find_watcher(struct rbd_device
*rbd_dev
,
3926 const struct ceph_locker
*locker
)
3928 struct ceph_osd_client
*osdc
= &rbd_dev
->rbd_client
->client
->osdc
;
3929 struct ceph_watch_item
*watchers
;
3935 ret
= ceph_osdc_list_watchers(osdc
, &rbd_dev
->header_oid
,
3936 &rbd_dev
->header_oloc
, &watchers
,
3941 sscanf(locker
->id
.cookie
, RBD_LOCK_COOKIE_PREFIX
" %llu", &cookie
);
3942 for (i
= 0; i
< num_watchers
; i
++) {
3943 if (!memcmp(&watchers
[i
].addr
, &locker
->info
.addr
,
3944 sizeof(locker
->info
.addr
)) &&
3945 watchers
[i
].cookie
== cookie
) {
3946 struct rbd_client_id cid
= {
3947 .gid
= le64_to_cpu(watchers
[i
].name
.num
),
3951 dout("%s rbd_dev %p found cid %llu-%llu\n", __func__
,
3952 rbd_dev
, cid
.gid
, cid
.handle
);
3953 rbd_set_owner_cid(rbd_dev
, &cid
);
3959 dout("%s rbd_dev %p no watchers\n", __func__
, rbd_dev
);
3967 * lock_rwsem must be held for write
3969 static int rbd_try_lock(struct rbd_device
*rbd_dev
)
3971 struct ceph_client
*client
= rbd_dev
->rbd_client
->client
;
3972 struct ceph_locker
*lockers
;
3977 ret
= rbd_lock(rbd_dev
);
3981 /* determine if the current lock holder is still alive */
3982 ret
= get_lock_owner_info(rbd_dev
, &lockers
, &num_lockers
);
3986 if (num_lockers
== 0)
3989 ret
= find_watcher(rbd_dev
, lockers
);
3991 goto out
; /* request lock or error */
3993 rbd_warn(rbd_dev
, "breaking header lock owned by %s%llu",
3994 ENTITY_NAME(lockers
[0].id
.name
));
3996 ret
= ceph_monc_blacklist_add(&client
->monc
,
3997 &lockers
[0].info
.addr
);
3999 rbd_warn(rbd_dev
, "blacklist of %s%llu failed: %d",
4000 ENTITY_NAME(lockers
[0].id
.name
), ret
);
4004 ret
= ceph_cls_break_lock(&client
->osdc
, &rbd_dev
->header_oid
,
4005 &rbd_dev
->header_oloc
, RBD_LOCK_NAME
,
4006 lockers
[0].id
.cookie
,
4007 &lockers
[0].id
.name
);
4008 if (ret
&& ret
!= -ENOENT
)
4012 ceph_free_lockers(lockers
, num_lockers
);
4016 ceph_free_lockers(lockers
, num_lockers
);
4020 static int rbd_post_acquire_action(struct rbd_device
*rbd_dev
)
4024 if (rbd_dev
->header
.features
& RBD_FEATURE_OBJECT_MAP
) {
4025 ret
= rbd_object_map_open(rbd_dev
);
4036 * 1 - caller should call rbd_request_lock()
4039 static int rbd_try_acquire_lock(struct rbd_device
*rbd_dev
)
4043 down_read(&rbd_dev
->lock_rwsem
);
4044 dout("%s rbd_dev %p read lock_state %d\n", __func__
, rbd_dev
,
4045 rbd_dev
->lock_state
);
4046 if (__rbd_is_lock_owner(rbd_dev
)) {
4047 up_read(&rbd_dev
->lock_rwsem
);
4051 up_read(&rbd_dev
->lock_rwsem
);
4052 down_write(&rbd_dev
->lock_rwsem
);
4053 dout("%s rbd_dev %p write lock_state %d\n", __func__
, rbd_dev
,
4054 rbd_dev
->lock_state
);
4055 if (__rbd_is_lock_owner(rbd_dev
)) {
4056 up_write(&rbd_dev
->lock_rwsem
);
4060 ret
= rbd_try_lock(rbd_dev
);
4062 rbd_warn(rbd_dev
, "failed to lock header: %d", ret
);
4063 if (ret
== -EBLACKLISTED
)
4066 ret
= 1; /* request lock anyway */
4069 up_write(&rbd_dev
->lock_rwsem
);
4073 rbd_assert(rbd_dev
->lock_state
== RBD_LOCK_STATE_LOCKED
);
4074 rbd_assert(list_empty(&rbd_dev
->running_list
));
4076 ret
= rbd_post_acquire_action(rbd_dev
);
4078 rbd_warn(rbd_dev
, "post-acquire action failed: %d", ret
);
4080 * Can't stay in RBD_LOCK_STATE_LOCKED because
4081 * rbd_lock_add_request() would let the request through,
4082 * assuming that e.g. object map is locked and loaded.
4084 rbd_unlock(rbd_dev
);
4088 wake_lock_waiters(rbd_dev
, ret
);
4089 up_write(&rbd_dev
->lock_rwsem
);
4093 static void rbd_acquire_lock(struct work_struct
*work
)
4095 struct rbd_device
*rbd_dev
= container_of(to_delayed_work(work
),
4096 struct rbd_device
, lock_dwork
);
4099 dout("%s rbd_dev %p\n", __func__
, rbd_dev
);
4101 ret
= rbd_try_acquire_lock(rbd_dev
);
4103 dout("%s rbd_dev %p ret %d - done\n", __func__
, rbd_dev
, ret
);
4107 ret
= rbd_request_lock(rbd_dev
);
4108 if (ret
== -ETIMEDOUT
) {
4109 goto again
; /* treat this as a dead client */
4110 } else if (ret
== -EROFS
) {
4111 rbd_warn(rbd_dev
, "peer will not release lock");
4112 down_write(&rbd_dev
->lock_rwsem
);
4113 wake_lock_waiters(rbd_dev
, ret
);
4114 up_write(&rbd_dev
->lock_rwsem
);
4115 } else if (ret
< 0) {
4116 rbd_warn(rbd_dev
, "error requesting lock: %d", ret
);
4117 mod_delayed_work(rbd_dev
->task_wq
, &rbd_dev
->lock_dwork
,
4121 * lock owner acked, but resend if we don't see them
4124 dout("%s rbd_dev %p requeuing lock_dwork\n", __func__
,
4126 mod_delayed_work(rbd_dev
->task_wq
, &rbd_dev
->lock_dwork
,
4127 msecs_to_jiffies(2 * RBD_NOTIFY_TIMEOUT
* MSEC_PER_SEC
));
4131 static bool rbd_quiesce_lock(struct rbd_device
*rbd_dev
)
4135 dout("%s rbd_dev %p\n", __func__
, rbd_dev
);
4136 lockdep_assert_held_write(&rbd_dev
->lock_rwsem
);
4138 if (rbd_dev
->lock_state
!= RBD_LOCK_STATE_LOCKED
)
4142 * Ensure that all in-flight IO is flushed.
4144 rbd_dev
->lock_state
= RBD_LOCK_STATE_RELEASING
;
4145 rbd_assert(!completion_done(&rbd_dev
->releasing_wait
));
4146 need_wait
= !list_empty(&rbd_dev
->running_list
);
4147 downgrade_write(&rbd_dev
->lock_rwsem
);
4149 wait_for_completion(&rbd_dev
->releasing_wait
);
4150 up_read(&rbd_dev
->lock_rwsem
);
4152 down_write(&rbd_dev
->lock_rwsem
);
4153 if (rbd_dev
->lock_state
!= RBD_LOCK_STATE_RELEASING
)
4156 rbd_assert(list_empty(&rbd_dev
->running_list
));
4160 static void rbd_pre_release_action(struct rbd_device
*rbd_dev
)
4162 if (rbd_dev
->header
.features
& RBD_FEATURE_OBJECT_MAP
)
4163 rbd_object_map_close(rbd_dev
);
4166 static void __rbd_release_lock(struct rbd_device
*rbd_dev
)
4168 rbd_assert(list_empty(&rbd_dev
->running_list
));
4170 rbd_pre_release_action(rbd_dev
);
4171 rbd_unlock(rbd_dev
);
4175 * lock_rwsem must be held for write
4177 static void rbd_release_lock(struct rbd_device
*rbd_dev
)
4179 if (!rbd_quiesce_lock(rbd_dev
))
4182 __rbd_release_lock(rbd_dev
);
4185 * Give others a chance to grab the lock - we would re-acquire
4186 * almost immediately if we got new IO while draining the running
4187 * list otherwise. We need to ack our own notifications, so this
4188 * lock_dwork will be requeued from rbd_handle_released_lock() by
4189 * way of maybe_kick_acquire().
4191 cancel_delayed_work(&rbd_dev
->lock_dwork
);
4194 static void rbd_release_lock_work(struct work_struct
*work
)
4196 struct rbd_device
*rbd_dev
= container_of(work
, struct rbd_device
,
4199 down_write(&rbd_dev
->lock_rwsem
);
4200 rbd_release_lock(rbd_dev
);
4201 up_write(&rbd_dev
->lock_rwsem
);
4204 static void maybe_kick_acquire(struct rbd_device
*rbd_dev
)
4208 dout("%s rbd_dev %p\n", __func__
, rbd_dev
);
4209 if (__rbd_is_lock_owner(rbd_dev
))
4212 spin_lock(&rbd_dev
->lock_lists_lock
);
4213 have_requests
= !list_empty(&rbd_dev
->acquiring_list
);
4214 spin_unlock(&rbd_dev
->lock_lists_lock
);
4215 if (have_requests
|| delayed_work_pending(&rbd_dev
->lock_dwork
)) {
4216 dout("%s rbd_dev %p kicking lock_dwork\n", __func__
, rbd_dev
);
4217 mod_delayed_work(rbd_dev
->task_wq
, &rbd_dev
->lock_dwork
, 0);
4221 static void rbd_handle_acquired_lock(struct rbd_device
*rbd_dev
, u8 struct_v
,
4224 struct rbd_client_id cid
= { 0 };
4226 if (struct_v
>= 2) {
4227 cid
.gid
= ceph_decode_64(p
);
4228 cid
.handle
= ceph_decode_64(p
);
4231 dout("%s rbd_dev %p cid %llu-%llu\n", __func__
, rbd_dev
, cid
.gid
,
4233 if (!rbd_cid_equal(&cid
, &rbd_empty_cid
)) {
4234 down_write(&rbd_dev
->lock_rwsem
);
4235 if (rbd_cid_equal(&cid
, &rbd_dev
->owner_cid
)) {
4237 * we already know that the remote client is
4240 up_write(&rbd_dev
->lock_rwsem
);
4244 rbd_set_owner_cid(rbd_dev
, &cid
);
4245 downgrade_write(&rbd_dev
->lock_rwsem
);
4247 down_read(&rbd_dev
->lock_rwsem
);
4250 maybe_kick_acquire(rbd_dev
);
4251 up_read(&rbd_dev
->lock_rwsem
);
4254 static void rbd_handle_released_lock(struct rbd_device
*rbd_dev
, u8 struct_v
,
4257 struct rbd_client_id cid
= { 0 };
4259 if (struct_v
>= 2) {
4260 cid
.gid
= ceph_decode_64(p
);
4261 cid
.handle
= ceph_decode_64(p
);
4264 dout("%s rbd_dev %p cid %llu-%llu\n", __func__
, rbd_dev
, cid
.gid
,
4266 if (!rbd_cid_equal(&cid
, &rbd_empty_cid
)) {
4267 down_write(&rbd_dev
->lock_rwsem
);
4268 if (!rbd_cid_equal(&cid
, &rbd_dev
->owner_cid
)) {
4269 dout("%s rbd_dev %p unexpected owner, cid %llu-%llu != owner_cid %llu-%llu\n",
4270 __func__
, rbd_dev
, cid
.gid
, cid
.handle
,
4271 rbd_dev
->owner_cid
.gid
, rbd_dev
->owner_cid
.handle
);
4272 up_write(&rbd_dev
->lock_rwsem
);
4276 rbd_set_owner_cid(rbd_dev
, &rbd_empty_cid
);
4277 downgrade_write(&rbd_dev
->lock_rwsem
);
4279 down_read(&rbd_dev
->lock_rwsem
);
4282 maybe_kick_acquire(rbd_dev
);
4283 up_read(&rbd_dev
->lock_rwsem
);
4287 * Returns result for ResponseMessage to be encoded (<= 0), or 1 if no
4288 * ResponseMessage is needed.
4290 static int rbd_handle_request_lock(struct rbd_device
*rbd_dev
, u8 struct_v
,
4293 struct rbd_client_id my_cid
= rbd_get_cid(rbd_dev
);
4294 struct rbd_client_id cid
= { 0 };
4297 if (struct_v
>= 2) {
4298 cid
.gid
= ceph_decode_64(p
);
4299 cid
.handle
= ceph_decode_64(p
);
4302 dout("%s rbd_dev %p cid %llu-%llu\n", __func__
, rbd_dev
, cid
.gid
,
4304 if (rbd_cid_equal(&cid
, &my_cid
))
4307 down_read(&rbd_dev
->lock_rwsem
);
4308 if (__rbd_is_lock_owner(rbd_dev
)) {
4309 if (rbd_dev
->lock_state
== RBD_LOCK_STATE_LOCKED
&&
4310 rbd_cid_equal(&rbd_dev
->owner_cid
, &rbd_empty_cid
))
4314 * encode ResponseMessage(0) so the peer can detect
4319 if (rbd_dev
->lock_state
== RBD_LOCK_STATE_LOCKED
) {
4320 if (!rbd_dev
->opts
->exclusive
) {
4321 dout("%s rbd_dev %p queueing unlock_work\n",
4323 queue_work(rbd_dev
->task_wq
,
4324 &rbd_dev
->unlock_work
);
4326 /* refuse to release the lock */
4333 up_read(&rbd_dev
->lock_rwsem
);
4337 static void __rbd_acknowledge_notify(struct rbd_device
*rbd_dev
,
4338 u64 notify_id
, u64 cookie
, s32
*result
)
4340 struct ceph_osd_client
*osdc
= &rbd_dev
->rbd_client
->client
->osdc
;
4341 char buf
[4 + CEPH_ENCODING_START_BLK_LEN
];
4342 int buf_size
= sizeof(buf
);
4348 /* encode ResponseMessage */
4349 ceph_start_encoding(&p
, 1, 1,
4350 buf_size
- CEPH_ENCODING_START_BLK_LEN
);
4351 ceph_encode_32(&p
, *result
);
4356 ret
= ceph_osdc_notify_ack(osdc
, &rbd_dev
->header_oid
,
4357 &rbd_dev
->header_oloc
, notify_id
, cookie
,
4360 rbd_warn(rbd_dev
, "acknowledge_notify failed: %d", ret
);
4363 static void rbd_acknowledge_notify(struct rbd_device
*rbd_dev
, u64 notify_id
,
4366 dout("%s rbd_dev %p\n", __func__
, rbd_dev
);
4367 __rbd_acknowledge_notify(rbd_dev
, notify_id
, cookie
, NULL
);
4370 static void rbd_acknowledge_notify_result(struct rbd_device
*rbd_dev
,
4371 u64 notify_id
, u64 cookie
, s32 result
)
4373 dout("%s rbd_dev %p result %d\n", __func__
, rbd_dev
, result
);
4374 __rbd_acknowledge_notify(rbd_dev
, notify_id
, cookie
, &result
);
4377 static void rbd_watch_cb(void *arg
, u64 notify_id
, u64 cookie
,
4378 u64 notifier_id
, void *data
, size_t data_len
)
4380 struct rbd_device
*rbd_dev
= arg
;
4382 void *const end
= p
+ data_len
;
4388 dout("%s rbd_dev %p cookie %llu notify_id %llu data_len %zu\n",
4389 __func__
, rbd_dev
, cookie
, notify_id
, data_len
);
4391 ret
= ceph_start_decoding(&p
, end
, 1, "NotifyMessage",
4394 rbd_warn(rbd_dev
, "failed to decode NotifyMessage: %d",
4399 notify_op
= ceph_decode_32(&p
);
4401 /* legacy notification for header updates */
4402 notify_op
= RBD_NOTIFY_OP_HEADER_UPDATE
;
4406 dout("%s rbd_dev %p notify_op %u\n", __func__
, rbd_dev
, notify_op
);
4407 switch (notify_op
) {
4408 case RBD_NOTIFY_OP_ACQUIRED_LOCK
:
4409 rbd_handle_acquired_lock(rbd_dev
, struct_v
, &p
);
4410 rbd_acknowledge_notify(rbd_dev
, notify_id
, cookie
);
4412 case RBD_NOTIFY_OP_RELEASED_LOCK
:
4413 rbd_handle_released_lock(rbd_dev
, struct_v
, &p
);
4414 rbd_acknowledge_notify(rbd_dev
, notify_id
, cookie
);
4416 case RBD_NOTIFY_OP_REQUEST_LOCK
:
4417 ret
= rbd_handle_request_lock(rbd_dev
, struct_v
, &p
);
4419 rbd_acknowledge_notify_result(rbd_dev
, notify_id
,
4422 rbd_acknowledge_notify(rbd_dev
, notify_id
, cookie
);
4424 case RBD_NOTIFY_OP_HEADER_UPDATE
:
4425 ret
= rbd_dev_refresh(rbd_dev
);
4427 rbd_warn(rbd_dev
, "refresh failed: %d", ret
);
4429 rbd_acknowledge_notify(rbd_dev
, notify_id
, cookie
);
4432 if (rbd_is_lock_owner(rbd_dev
))
4433 rbd_acknowledge_notify_result(rbd_dev
, notify_id
,
4434 cookie
, -EOPNOTSUPP
);
4436 rbd_acknowledge_notify(rbd_dev
, notify_id
, cookie
);
4441 static void __rbd_unregister_watch(struct rbd_device
*rbd_dev
);
4443 static void rbd_watch_errcb(void *arg
, u64 cookie
, int err
)
4445 struct rbd_device
*rbd_dev
= arg
;
4447 rbd_warn(rbd_dev
, "encountered watch error: %d", err
);
4449 down_write(&rbd_dev
->lock_rwsem
);
4450 rbd_set_owner_cid(rbd_dev
, &rbd_empty_cid
);
4451 up_write(&rbd_dev
->lock_rwsem
);
4453 mutex_lock(&rbd_dev
->watch_mutex
);
4454 if (rbd_dev
->watch_state
== RBD_WATCH_STATE_REGISTERED
) {
4455 __rbd_unregister_watch(rbd_dev
);
4456 rbd_dev
->watch_state
= RBD_WATCH_STATE_ERROR
;
4458 queue_delayed_work(rbd_dev
->task_wq
, &rbd_dev
->watch_dwork
, 0);
4460 mutex_unlock(&rbd_dev
->watch_mutex
);
4464 * watch_mutex must be locked
4466 static int __rbd_register_watch(struct rbd_device
*rbd_dev
)
4468 struct ceph_osd_client
*osdc
= &rbd_dev
->rbd_client
->client
->osdc
;
4469 struct ceph_osd_linger_request
*handle
;
4471 rbd_assert(!rbd_dev
->watch_handle
);
4472 dout("%s rbd_dev %p\n", __func__
, rbd_dev
);
4474 handle
= ceph_osdc_watch(osdc
, &rbd_dev
->header_oid
,
4475 &rbd_dev
->header_oloc
, rbd_watch_cb
,
4476 rbd_watch_errcb
, rbd_dev
);
4478 return PTR_ERR(handle
);
4480 rbd_dev
->watch_handle
= handle
;
4485 * watch_mutex must be locked
4487 static void __rbd_unregister_watch(struct rbd_device
*rbd_dev
)
4489 struct ceph_osd_client
*osdc
= &rbd_dev
->rbd_client
->client
->osdc
;
4492 rbd_assert(rbd_dev
->watch_handle
);
4493 dout("%s rbd_dev %p\n", __func__
, rbd_dev
);
4495 ret
= ceph_osdc_unwatch(osdc
, rbd_dev
->watch_handle
);
4497 rbd_warn(rbd_dev
, "failed to unwatch: %d", ret
);
4499 rbd_dev
->watch_handle
= NULL
;
4502 static int rbd_register_watch(struct rbd_device
*rbd_dev
)
4506 mutex_lock(&rbd_dev
->watch_mutex
);
4507 rbd_assert(rbd_dev
->watch_state
== RBD_WATCH_STATE_UNREGISTERED
);
4508 ret
= __rbd_register_watch(rbd_dev
);
4512 rbd_dev
->watch_state
= RBD_WATCH_STATE_REGISTERED
;
4513 rbd_dev
->watch_cookie
= rbd_dev
->watch_handle
->linger_id
;
4516 mutex_unlock(&rbd_dev
->watch_mutex
);
4520 static void cancel_tasks_sync(struct rbd_device
*rbd_dev
)
4522 dout("%s rbd_dev %p\n", __func__
, rbd_dev
);
4524 cancel_work_sync(&rbd_dev
->acquired_lock_work
);
4525 cancel_work_sync(&rbd_dev
->released_lock_work
);
4526 cancel_delayed_work_sync(&rbd_dev
->lock_dwork
);
4527 cancel_work_sync(&rbd_dev
->unlock_work
);
4530 static void rbd_unregister_watch(struct rbd_device
*rbd_dev
)
4532 cancel_tasks_sync(rbd_dev
);
4534 mutex_lock(&rbd_dev
->watch_mutex
);
4535 if (rbd_dev
->watch_state
== RBD_WATCH_STATE_REGISTERED
)
4536 __rbd_unregister_watch(rbd_dev
);
4537 rbd_dev
->watch_state
= RBD_WATCH_STATE_UNREGISTERED
;
4538 mutex_unlock(&rbd_dev
->watch_mutex
);
4540 cancel_delayed_work_sync(&rbd_dev
->watch_dwork
);
4541 ceph_osdc_flush_notifies(&rbd_dev
->rbd_client
->client
->osdc
);
4545 * lock_rwsem must be held for write
4547 static void rbd_reacquire_lock(struct rbd_device
*rbd_dev
)
4549 struct ceph_osd_client
*osdc
= &rbd_dev
->rbd_client
->client
->osdc
;
4553 if (!rbd_quiesce_lock(rbd_dev
))
4556 format_lock_cookie(rbd_dev
, cookie
);
4557 ret
= ceph_cls_set_cookie(osdc
, &rbd_dev
->header_oid
,
4558 &rbd_dev
->header_oloc
, RBD_LOCK_NAME
,
4559 CEPH_CLS_LOCK_EXCLUSIVE
, rbd_dev
->lock_cookie
,
4560 RBD_LOCK_TAG
, cookie
);
4562 if (ret
!= -EOPNOTSUPP
)
4563 rbd_warn(rbd_dev
, "failed to update lock cookie: %d",
4567 * Lock cookie cannot be updated on older OSDs, so do
4568 * a manual release and queue an acquire.
4570 __rbd_release_lock(rbd_dev
);
4571 queue_delayed_work(rbd_dev
->task_wq
, &rbd_dev
->lock_dwork
, 0);
4573 __rbd_lock(rbd_dev
, cookie
);
4574 wake_lock_waiters(rbd_dev
, 0);
4578 static void rbd_reregister_watch(struct work_struct
*work
)
4580 struct rbd_device
*rbd_dev
= container_of(to_delayed_work(work
),
4581 struct rbd_device
, watch_dwork
);
4584 dout("%s rbd_dev %p\n", __func__
, rbd_dev
);
4586 mutex_lock(&rbd_dev
->watch_mutex
);
4587 if (rbd_dev
->watch_state
!= RBD_WATCH_STATE_ERROR
) {
4588 mutex_unlock(&rbd_dev
->watch_mutex
);
4592 ret
= __rbd_register_watch(rbd_dev
);
4594 rbd_warn(rbd_dev
, "failed to reregister watch: %d", ret
);
4595 if (ret
!= -EBLACKLISTED
&& ret
!= -ENOENT
) {
4596 queue_delayed_work(rbd_dev
->task_wq
,
4597 &rbd_dev
->watch_dwork
,
4599 mutex_unlock(&rbd_dev
->watch_mutex
);
4603 mutex_unlock(&rbd_dev
->watch_mutex
);
4604 down_write(&rbd_dev
->lock_rwsem
);
4605 wake_lock_waiters(rbd_dev
, ret
);
4606 up_write(&rbd_dev
->lock_rwsem
);
4610 rbd_dev
->watch_state
= RBD_WATCH_STATE_REGISTERED
;
4611 rbd_dev
->watch_cookie
= rbd_dev
->watch_handle
->linger_id
;
4612 mutex_unlock(&rbd_dev
->watch_mutex
);
4614 down_write(&rbd_dev
->lock_rwsem
);
4615 if (rbd_dev
->lock_state
== RBD_LOCK_STATE_LOCKED
)
4616 rbd_reacquire_lock(rbd_dev
);
4617 up_write(&rbd_dev
->lock_rwsem
);
4619 ret
= rbd_dev_refresh(rbd_dev
);
4621 rbd_warn(rbd_dev
, "reregistration refresh failed: %d", ret
);
4625 * Synchronous osd object method call. Returns the number of bytes
4626 * returned in the outbound buffer, or a negative error code.
4628 static int rbd_obj_method_sync(struct rbd_device
*rbd_dev
,
4629 struct ceph_object_id
*oid
,
4630 struct ceph_object_locator
*oloc
,
4631 const char *method_name
,
4632 const void *outbound
,
4633 size_t outbound_size
,
4635 size_t inbound_size
)
4637 struct ceph_osd_client
*osdc
= &rbd_dev
->rbd_client
->client
->osdc
;
4638 struct page
*req_page
= NULL
;
4639 struct page
*reply_page
;
4643 * Method calls are ultimately read operations. The result
4644 * should placed into the inbound buffer provided. They
4645 * also supply outbound data--parameters for the object
4646 * method. Currently if this is present it will be a
4650 if (outbound_size
> PAGE_SIZE
)
4653 req_page
= alloc_page(GFP_KERNEL
);
4657 memcpy(page_address(req_page
), outbound
, outbound_size
);
4660 reply_page
= alloc_page(GFP_KERNEL
);
4663 __free_page(req_page
);
4667 ret
= ceph_osdc_call(osdc
, oid
, oloc
, RBD_DRV_NAME
, method_name
,
4668 CEPH_OSD_FLAG_READ
, req_page
, outbound_size
,
4669 &reply_page
, &inbound_size
);
4671 memcpy(inbound
, page_address(reply_page
), inbound_size
);
4676 __free_page(req_page
);
4677 __free_page(reply_page
);
4681 static void rbd_queue_workfn(struct work_struct
*work
)
4683 struct rbd_img_request
*img_request
=
4684 container_of(work
, struct rbd_img_request
, work
);
4685 struct rbd_device
*rbd_dev
= img_request
->rbd_dev
;
4686 enum obj_operation_type op_type
= img_request
->op_type
;
4687 struct request
*rq
= blk_mq_rq_from_pdu(img_request
);
4688 u64 offset
= (u64
)blk_rq_pos(rq
) << SECTOR_SHIFT
;
4689 u64 length
= blk_rq_bytes(rq
);
4693 /* Ignore/skip any zero-length requests */
4695 dout("%s: zero-length request\n", __func__
);
4697 goto err_img_request
;
4700 blk_mq_start_request(rq
);
4702 down_read(&rbd_dev
->header_rwsem
);
4703 mapping_size
= rbd_dev
->mapping
.size
;
4704 rbd_img_capture_header(img_request
);
4705 up_read(&rbd_dev
->header_rwsem
);
4707 if (offset
+ length
> mapping_size
) {
4708 rbd_warn(rbd_dev
, "beyond EOD (%llu~%llu > %llu)", offset
,
4709 length
, mapping_size
);
4711 goto err_img_request
;
4714 dout("%s rbd_dev %p img_req %p %s %llu~%llu\n", __func__
, rbd_dev
,
4715 img_request
, obj_op_name(op_type
), offset
, length
);
4717 if (op_type
== OBJ_OP_DISCARD
|| op_type
== OBJ_OP_ZEROOUT
)
4718 result
= rbd_img_fill_nodata(img_request
, offset
, length
);
4720 result
= rbd_img_fill_from_bio(img_request
, offset
, length
,
4723 goto err_img_request
;
4725 rbd_img_handle_request(img_request
, 0);
4729 rbd_img_request_destroy(img_request
);
4731 rbd_warn(rbd_dev
, "%s %llx at %llx result %d",
4732 obj_op_name(op_type
), length
, offset
, result
);
4733 blk_mq_end_request(rq
, errno_to_blk_status(result
));
4736 static blk_status_t
rbd_queue_rq(struct blk_mq_hw_ctx
*hctx
,
4737 const struct blk_mq_queue_data
*bd
)
4739 struct rbd_device
*rbd_dev
= hctx
->queue
->queuedata
;
4740 struct rbd_img_request
*img_req
= blk_mq_rq_to_pdu(bd
->rq
);
4741 enum obj_operation_type op_type
;
4743 switch (req_op(bd
->rq
)) {
4744 case REQ_OP_DISCARD
:
4745 op_type
= OBJ_OP_DISCARD
;
4747 case REQ_OP_WRITE_ZEROES
:
4748 op_type
= OBJ_OP_ZEROOUT
;
4751 op_type
= OBJ_OP_WRITE
;
4754 op_type
= OBJ_OP_READ
;
4757 rbd_warn(rbd_dev
, "unknown req_op %d", req_op(bd
->rq
));
4758 return BLK_STS_IOERR
;
4761 rbd_img_request_init(img_req
, rbd_dev
, op_type
);
4763 if (rbd_img_is_write(img_req
)) {
4764 if (rbd_is_ro(rbd_dev
)) {
4765 rbd_warn(rbd_dev
, "%s on read-only mapping",
4766 obj_op_name(img_req
->op_type
));
4767 return BLK_STS_IOERR
;
4769 rbd_assert(!rbd_is_snap(rbd_dev
));
4772 INIT_WORK(&img_req
->work
, rbd_queue_workfn
);
4773 queue_work(rbd_wq
, &img_req
->work
);
4777 static void rbd_free_disk(struct rbd_device
*rbd_dev
)
4779 blk_cleanup_queue(rbd_dev
->disk
->queue
);
4780 blk_mq_free_tag_set(&rbd_dev
->tag_set
);
4781 put_disk(rbd_dev
->disk
);
4782 rbd_dev
->disk
= NULL
;
4785 static int rbd_obj_read_sync(struct rbd_device
*rbd_dev
,
4786 struct ceph_object_id
*oid
,
4787 struct ceph_object_locator
*oloc
,
4788 void *buf
, int buf_len
)
4791 struct ceph_osd_client
*osdc
= &rbd_dev
->rbd_client
->client
->osdc
;
4792 struct ceph_osd_request
*req
;
4793 struct page
**pages
;
4794 int num_pages
= calc_pages_for(0, buf_len
);
4797 req
= ceph_osdc_alloc_request(osdc
, NULL
, 1, false, GFP_KERNEL
);
4801 ceph_oid_copy(&req
->r_base_oid
, oid
);
4802 ceph_oloc_copy(&req
->r_base_oloc
, oloc
);
4803 req
->r_flags
= CEPH_OSD_FLAG_READ
;
4805 pages
= ceph_alloc_page_vector(num_pages
, GFP_KERNEL
);
4806 if (IS_ERR(pages
)) {
4807 ret
= PTR_ERR(pages
);
4811 osd_req_op_extent_init(req
, 0, CEPH_OSD_OP_READ
, 0, buf_len
, 0, 0);
4812 osd_req_op_extent_osd_data_pages(req
, 0, pages
, buf_len
, 0, false,
4815 ret
= ceph_osdc_alloc_messages(req
, GFP_KERNEL
);
4819 ceph_osdc_start_request(osdc
, req
, false);
4820 ret
= ceph_osdc_wait_request(osdc
, req
);
4822 ceph_copy_from_page_vector(pages
, buf
, 0, ret
);
4825 ceph_osdc_put_request(req
);
4830 * Read the complete header for the given rbd device. On successful
4831 * return, the rbd_dev->header field will contain up-to-date
4832 * information about the image.
4834 static int rbd_dev_v1_header_info(struct rbd_device
*rbd_dev
)
4836 struct rbd_image_header_ondisk
*ondisk
= NULL
;
4843 * The complete header will include an array of its 64-bit
4844 * snapshot ids, followed by the names of those snapshots as
4845 * a contiguous block of NUL-terminated strings. Note that
4846 * the number of snapshots could change by the time we read
4847 * it in, in which case we re-read it.
4854 size
= sizeof (*ondisk
);
4855 size
+= snap_count
* sizeof (struct rbd_image_snap_ondisk
);
4857 ondisk
= kmalloc(size
, GFP_KERNEL
);
4861 ret
= rbd_obj_read_sync(rbd_dev
, &rbd_dev
->header_oid
,
4862 &rbd_dev
->header_oloc
, ondisk
, size
);
4865 if ((size_t)ret
< size
) {
4867 rbd_warn(rbd_dev
, "short header read (want %zd got %d)",
4871 if (!rbd_dev_ondisk_valid(ondisk
)) {
4873 rbd_warn(rbd_dev
, "invalid header");
4877 names_size
= le64_to_cpu(ondisk
->snap_names_len
);
4878 want_count
= snap_count
;
4879 snap_count
= le32_to_cpu(ondisk
->snap_count
);
4880 } while (snap_count
!= want_count
);
4882 ret
= rbd_header_from_disk(rbd_dev
, ondisk
);
4889 static void rbd_dev_update_size(struct rbd_device
*rbd_dev
)
4894 * If EXISTS is not set, rbd_dev->disk may be NULL, so don't
4895 * try to update its size. If REMOVING is set, updating size
4896 * is just useless work since the device can't be opened.
4898 if (test_bit(RBD_DEV_FLAG_EXISTS
, &rbd_dev
->flags
) &&
4899 !test_bit(RBD_DEV_FLAG_REMOVING
, &rbd_dev
->flags
)) {
4900 size
= (sector_t
)rbd_dev
->mapping
.size
/ SECTOR_SIZE
;
4901 dout("setting size to %llu sectors", (unsigned long long)size
);
4902 set_capacity(rbd_dev
->disk
, size
);
4903 revalidate_disk(rbd_dev
->disk
);
4907 static int rbd_dev_refresh(struct rbd_device
*rbd_dev
)
4912 down_write(&rbd_dev
->header_rwsem
);
4913 mapping_size
= rbd_dev
->mapping
.size
;
4915 ret
= rbd_dev_header_info(rbd_dev
);
4920 * If there is a parent, see if it has disappeared due to the
4921 * mapped image getting flattened.
4923 if (rbd_dev
->parent
) {
4924 ret
= rbd_dev_v2_parent_info(rbd_dev
);
4929 rbd_assert(!rbd_is_snap(rbd_dev
));
4930 rbd_dev
->mapping
.size
= rbd_dev
->header
.image_size
;
4933 up_write(&rbd_dev
->header_rwsem
);
4934 if (!ret
&& mapping_size
!= rbd_dev
->mapping
.size
)
4935 rbd_dev_update_size(rbd_dev
);
4940 static const struct blk_mq_ops rbd_mq_ops
= {
4941 .queue_rq
= rbd_queue_rq
,
4944 static int rbd_init_disk(struct rbd_device
*rbd_dev
)
4946 struct gendisk
*disk
;
4947 struct request_queue
*q
;
4948 unsigned int objset_bytes
=
4949 rbd_dev
->layout
.object_size
* rbd_dev
->layout
.stripe_count
;
4952 /* create gendisk info */
4953 disk
= alloc_disk(single_major
?
4954 (1 << RBD_SINGLE_MAJOR_PART_SHIFT
) :
4955 RBD_MINORS_PER_MAJOR
);
4959 snprintf(disk
->disk_name
, sizeof(disk
->disk_name
), RBD_DRV_NAME
"%d",
4961 disk
->major
= rbd_dev
->major
;
4962 disk
->first_minor
= rbd_dev
->minor
;
4964 disk
->flags
|= GENHD_FL_EXT_DEVT
;
4965 disk
->fops
= &rbd_bd_ops
;
4966 disk
->private_data
= rbd_dev
;
4968 memset(&rbd_dev
->tag_set
, 0, sizeof(rbd_dev
->tag_set
));
4969 rbd_dev
->tag_set
.ops
= &rbd_mq_ops
;
4970 rbd_dev
->tag_set
.queue_depth
= rbd_dev
->opts
->queue_depth
;
4971 rbd_dev
->tag_set
.numa_node
= NUMA_NO_NODE
;
4972 rbd_dev
->tag_set
.flags
= BLK_MQ_F_SHOULD_MERGE
;
4973 rbd_dev
->tag_set
.nr_hw_queues
= num_present_cpus();
4974 rbd_dev
->tag_set
.cmd_size
= sizeof(struct rbd_img_request
);
4976 err
= blk_mq_alloc_tag_set(&rbd_dev
->tag_set
);
4980 q
= blk_mq_init_queue(&rbd_dev
->tag_set
);
4986 blk_queue_flag_set(QUEUE_FLAG_NONROT
, q
);
4987 /* QUEUE_FLAG_ADD_RANDOM is off by default for blk-mq */
4989 blk_queue_max_hw_sectors(q
, objset_bytes
>> SECTOR_SHIFT
);
4990 q
->limits
.max_sectors
= queue_max_hw_sectors(q
);
4991 blk_queue_max_segments(q
, USHRT_MAX
);
4992 blk_queue_max_segment_size(q
, UINT_MAX
);
4993 blk_queue_io_min(q
, rbd_dev
->opts
->alloc_size
);
4994 blk_queue_io_opt(q
, rbd_dev
->opts
->alloc_size
);
4996 if (rbd_dev
->opts
->trim
) {
4997 blk_queue_flag_set(QUEUE_FLAG_DISCARD
, q
);
4998 q
->limits
.discard_granularity
= rbd_dev
->opts
->alloc_size
;
4999 blk_queue_max_discard_sectors(q
, objset_bytes
>> SECTOR_SHIFT
);
5000 blk_queue_max_write_zeroes_sectors(q
, objset_bytes
>> SECTOR_SHIFT
);
5003 if (!ceph_test_opt(rbd_dev
->rbd_client
->client
, NOCRC
))
5004 q
->backing_dev_info
->capabilities
|= BDI_CAP_STABLE_WRITES
;
5007 * disk_release() expects a queue ref from add_disk() and will
5008 * put it. Hold an extra ref until add_disk() is called.
5010 WARN_ON(!blk_get_queue(q
));
5012 q
->queuedata
= rbd_dev
;
5014 rbd_dev
->disk
= disk
;
5018 blk_mq_free_tag_set(&rbd_dev
->tag_set
);
5028 static struct rbd_device
*dev_to_rbd_dev(struct device
*dev
)
5030 return container_of(dev
, struct rbd_device
, dev
);
5033 static ssize_t
rbd_size_show(struct device
*dev
,
5034 struct device_attribute
*attr
, char *buf
)
5036 struct rbd_device
*rbd_dev
= dev_to_rbd_dev(dev
);
5038 return sprintf(buf
, "%llu\n",
5039 (unsigned long long)rbd_dev
->mapping
.size
);
5042 static ssize_t
rbd_features_show(struct device
*dev
,
5043 struct device_attribute
*attr
, char *buf
)
5045 struct rbd_device
*rbd_dev
= dev_to_rbd_dev(dev
);
5047 return sprintf(buf
, "0x%016llx\n", rbd_dev
->header
.features
);
5050 static ssize_t
rbd_major_show(struct device
*dev
,
5051 struct device_attribute
*attr
, char *buf
)
5053 struct rbd_device
*rbd_dev
= dev_to_rbd_dev(dev
);
5056 return sprintf(buf
, "%d\n", rbd_dev
->major
);
5058 return sprintf(buf
, "(none)\n");
5061 static ssize_t
rbd_minor_show(struct device
*dev
,
5062 struct device_attribute
*attr
, char *buf
)
5064 struct rbd_device
*rbd_dev
= dev_to_rbd_dev(dev
);
5066 return sprintf(buf
, "%d\n", rbd_dev
->minor
);
5069 static ssize_t
rbd_client_addr_show(struct device
*dev
,
5070 struct device_attribute
*attr
, char *buf
)
5072 struct rbd_device
*rbd_dev
= dev_to_rbd_dev(dev
);
5073 struct ceph_entity_addr
*client_addr
=
5074 ceph_client_addr(rbd_dev
->rbd_client
->client
);
5076 return sprintf(buf
, "%pISpc/%u\n", &client_addr
->in_addr
,
5077 le32_to_cpu(client_addr
->nonce
));
5080 static ssize_t
rbd_client_id_show(struct device
*dev
,
5081 struct device_attribute
*attr
, char *buf
)
5083 struct rbd_device
*rbd_dev
= dev_to_rbd_dev(dev
);
5085 return sprintf(buf
, "client%lld\n",
5086 ceph_client_gid(rbd_dev
->rbd_client
->client
));
5089 static ssize_t
rbd_cluster_fsid_show(struct device
*dev
,
5090 struct device_attribute
*attr
, char *buf
)
5092 struct rbd_device
*rbd_dev
= dev_to_rbd_dev(dev
);
5094 return sprintf(buf
, "%pU\n", &rbd_dev
->rbd_client
->client
->fsid
);
5097 static ssize_t
rbd_config_info_show(struct device
*dev
,
5098 struct device_attribute
*attr
, char *buf
)
5100 struct rbd_device
*rbd_dev
= dev_to_rbd_dev(dev
);
5102 return sprintf(buf
, "%s\n", rbd_dev
->config_info
);
5105 static ssize_t
rbd_pool_show(struct device
*dev
,
5106 struct device_attribute
*attr
, char *buf
)
5108 struct rbd_device
*rbd_dev
= dev_to_rbd_dev(dev
);
5110 return sprintf(buf
, "%s\n", rbd_dev
->spec
->pool_name
);
5113 static ssize_t
rbd_pool_id_show(struct device
*dev
,
5114 struct device_attribute
*attr
, char *buf
)
5116 struct rbd_device
*rbd_dev
= dev_to_rbd_dev(dev
);
5118 return sprintf(buf
, "%llu\n",
5119 (unsigned long long) rbd_dev
->spec
->pool_id
);
5122 static ssize_t
rbd_pool_ns_show(struct device
*dev
,
5123 struct device_attribute
*attr
, char *buf
)
5125 struct rbd_device
*rbd_dev
= dev_to_rbd_dev(dev
);
5127 return sprintf(buf
, "%s\n", rbd_dev
->spec
->pool_ns
?: "");
5130 static ssize_t
rbd_name_show(struct device
*dev
,
5131 struct device_attribute
*attr
, char *buf
)
5133 struct rbd_device
*rbd_dev
= dev_to_rbd_dev(dev
);
5135 if (rbd_dev
->spec
->image_name
)
5136 return sprintf(buf
, "%s\n", rbd_dev
->spec
->image_name
);
5138 return sprintf(buf
, "(unknown)\n");
5141 static ssize_t
rbd_image_id_show(struct device
*dev
,
5142 struct device_attribute
*attr
, char *buf
)
5144 struct rbd_device
*rbd_dev
= dev_to_rbd_dev(dev
);
5146 return sprintf(buf
, "%s\n", rbd_dev
->spec
->image_id
);
5150 * Shows the name of the currently-mapped snapshot (or
5151 * RBD_SNAP_HEAD_NAME for the base image).
5153 static ssize_t
rbd_snap_show(struct device
*dev
,
5154 struct device_attribute
*attr
,
5157 struct rbd_device
*rbd_dev
= dev_to_rbd_dev(dev
);
5159 return sprintf(buf
, "%s\n", rbd_dev
->spec
->snap_name
);
5162 static ssize_t
rbd_snap_id_show(struct device
*dev
,
5163 struct device_attribute
*attr
, char *buf
)
5165 struct rbd_device
*rbd_dev
= dev_to_rbd_dev(dev
);
5167 return sprintf(buf
, "%llu\n", rbd_dev
->spec
->snap_id
);
5171 * For a v2 image, shows the chain of parent images, separated by empty
5172 * lines. For v1 images or if there is no parent, shows "(no parent
5175 static ssize_t
rbd_parent_show(struct device
*dev
,
5176 struct device_attribute
*attr
,
5179 struct rbd_device
*rbd_dev
= dev_to_rbd_dev(dev
);
5182 if (!rbd_dev
->parent
)
5183 return sprintf(buf
, "(no parent image)\n");
5185 for ( ; rbd_dev
->parent
; rbd_dev
= rbd_dev
->parent
) {
5186 struct rbd_spec
*spec
= rbd_dev
->parent_spec
;
5188 count
+= sprintf(&buf
[count
], "%s"
5189 "pool_id %llu\npool_name %s\n"
5191 "image_id %s\nimage_name %s\n"
5192 "snap_id %llu\nsnap_name %s\n"
5194 !count
? "" : "\n", /* first? */
5195 spec
->pool_id
, spec
->pool_name
,
5196 spec
->pool_ns
?: "",
5197 spec
->image_id
, spec
->image_name
?: "(unknown)",
5198 spec
->snap_id
, spec
->snap_name
,
5199 rbd_dev
->parent_overlap
);
5205 static ssize_t
rbd_image_refresh(struct device
*dev
,
5206 struct device_attribute
*attr
,
5210 struct rbd_device
*rbd_dev
= dev_to_rbd_dev(dev
);
5213 ret
= rbd_dev_refresh(rbd_dev
);
5220 static DEVICE_ATTR(size
, 0444, rbd_size_show
, NULL
);
5221 static DEVICE_ATTR(features
, 0444, rbd_features_show
, NULL
);
5222 static DEVICE_ATTR(major
, 0444, rbd_major_show
, NULL
);
5223 static DEVICE_ATTR(minor
, 0444, rbd_minor_show
, NULL
);
5224 static DEVICE_ATTR(client_addr
, 0444, rbd_client_addr_show
, NULL
);
5225 static DEVICE_ATTR(client_id
, 0444, rbd_client_id_show
, NULL
);
5226 static DEVICE_ATTR(cluster_fsid
, 0444, rbd_cluster_fsid_show
, NULL
);
5227 static DEVICE_ATTR(config_info
, 0400, rbd_config_info_show
, NULL
);
5228 static DEVICE_ATTR(pool
, 0444, rbd_pool_show
, NULL
);
5229 static DEVICE_ATTR(pool_id
, 0444, rbd_pool_id_show
, NULL
);
5230 static DEVICE_ATTR(pool_ns
, 0444, rbd_pool_ns_show
, NULL
);
5231 static DEVICE_ATTR(name
, 0444, rbd_name_show
, NULL
);
5232 static DEVICE_ATTR(image_id
, 0444, rbd_image_id_show
, NULL
);
5233 static DEVICE_ATTR(refresh
, 0200, NULL
, rbd_image_refresh
);
5234 static DEVICE_ATTR(current_snap
, 0444, rbd_snap_show
, NULL
);
5235 static DEVICE_ATTR(snap_id
, 0444, rbd_snap_id_show
, NULL
);
5236 static DEVICE_ATTR(parent
, 0444, rbd_parent_show
, NULL
);
5238 static struct attribute
*rbd_attrs
[] = {
5239 &dev_attr_size
.attr
,
5240 &dev_attr_features
.attr
,
5241 &dev_attr_major
.attr
,
5242 &dev_attr_minor
.attr
,
5243 &dev_attr_client_addr
.attr
,
5244 &dev_attr_client_id
.attr
,
5245 &dev_attr_cluster_fsid
.attr
,
5246 &dev_attr_config_info
.attr
,
5247 &dev_attr_pool
.attr
,
5248 &dev_attr_pool_id
.attr
,
5249 &dev_attr_pool_ns
.attr
,
5250 &dev_attr_name
.attr
,
5251 &dev_attr_image_id
.attr
,
5252 &dev_attr_current_snap
.attr
,
5253 &dev_attr_snap_id
.attr
,
5254 &dev_attr_parent
.attr
,
5255 &dev_attr_refresh
.attr
,
5259 static struct attribute_group rbd_attr_group
= {
5263 static const struct attribute_group
*rbd_attr_groups
[] = {
5268 static void rbd_dev_release(struct device
*dev
);
5270 static const struct device_type rbd_device_type
= {
5272 .groups
= rbd_attr_groups
,
5273 .release
= rbd_dev_release
,
5276 static struct rbd_spec
*rbd_spec_get(struct rbd_spec
*spec
)
5278 kref_get(&spec
->kref
);
5283 static void rbd_spec_free(struct kref
*kref
);
5284 static void rbd_spec_put(struct rbd_spec
*spec
)
5287 kref_put(&spec
->kref
, rbd_spec_free
);
5290 static struct rbd_spec
*rbd_spec_alloc(void)
5292 struct rbd_spec
*spec
;
5294 spec
= kzalloc(sizeof (*spec
), GFP_KERNEL
);
5298 spec
->pool_id
= CEPH_NOPOOL
;
5299 spec
->snap_id
= CEPH_NOSNAP
;
5300 kref_init(&spec
->kref
);
5305 static void rbd_spec_free(struct kref
*kref
)
5307 struct rbd_spec
*spec
= container_of(kref
, struct rbd_spec
, kref
);
5309 kfree(spec
->pool_name
);
5310 kfree(spec
->pool_ns
);
5311 kfree(spec
->image_id
);
5312 kfree(spec
->image_name
);
5313 kfree(spec
->snap_name
);
5317 static void rbd_dev_free(struct rbd_device
*rbd_dev
)
5319 WARN_ON(rbd_dev
->watch_state
!= RBD_WATCH_STATE_UNREGISTERED
);
5320 WARN_ON(rbd_dev
->lock_state
!= RBD_LOCK_STATE_UNLOCKED
);
5322 ceph_oid_destroy(&rbd_dev
->header_oid
);
5323 ceph_oloc_destroy(&rbd_dev
->header_oloc
);
5324 kfree(rbd_dev
->config_info
);
5326 rbd_put_client(rbd_dev
->rbd_client
);
5327 rbd_spec_put(rbd_dev
->spec
);
5328 kfree(rbd_dev
->opts
);
5332 static void rbd_dev_release(struct device
*dev
)
5334 struct rbd_device
*rbd_dev
= dev_to_rbd_dev(dev
);
5335 bool need_put
= !!rbd_dev
->opts
;
5338 destroy_workqueue(rbd_dev
->task_wq
);
5339 ida_simple_remove(&rbd_dev_id_ida
, rbd_dev
->dev_id
);
5342 rbd_dev_free(rbd_dev
);
5345 * This is racy, but way better than putting module outside of
5346 * the release callback. The race window is pretty small, so
5347 * doing something similar to dm (dm-builtin.c) is overkill.
5350 module_put(THIS_MODULE
);
5353 static struct rbd_device
*__rbd_dev_create(struct rbd_client
*rbdc
,
5354 struct rbd_spec
*spec
)
5356 struct rbd_device
*rbd_dev
;
5358 rbd_dev
= kzalloc(sizeof(*rbd_dev
), GFP_KERNEL
);
5362 spin_lock_init(&rbd_dev
->lock
);
5363 INIT_LIST_HEAD(&rbd_dev
->node
);
5364 init_rwsem(&rbd_dev
->header_rwsem
);
5366 rbd_dev
->header
.data_pool_id
= CEPH_NOPOOL
;
5367 ceph_oid_init(&rbd_dev
->header_oid
);
5368 rbd_dev
->header_oloc
.pool
= spec
->pool_id
;
5369 if (spec
->pool_ns
) {
5370 WARN_ON(!*spec
->pool_ns
);
5371 rbd_dev
->header_oloc
.pool_ns
=
5372 ceph_find_or_create_string(spec
->pool_ns
,
5373 strlen(spec
->pool_ns
));
5376 mutex_init(&rbd_dev
->watch_mutex
);
5377 rbd_dev
->watch_state
= RBD_WATCH_STATE_UNREGISTERED
;
5378 INIT_DELAYED_WORK(&rbd_dev
->watch_dwork
, rbd_reregister_watch
);
5380 init_rwsem(&rbd_dev
->lock_rwsem
);
5381 rbd_dev
->lock_state
= RBD_LOCK_STATE_UNLOCKED
;
5382 INIT_WORK(&rbd_dev
->acquired_lock_work
, rbd_notify_acquired_lock
);
5383 INIT_WORK(&rbd_dev
->released_lock_work
, rbd_notify_released_lock
);
5384 INIT_DELAYED_WORK(&rbd_dev
->lock_dwork
, rbd_acquire_lock
);
5385 INIT_WORK(&rbd_dev
->unlock_work
, rbd_release_lock_work
);
5386 spin_lock_init(&rbd_dev
->lock_lists_lock
);
5387 INIT_LIST_HEAD(&rbd_dev
->acquiring_list
);
5388 INIT_LIST_HEAD(&rbd_dev
->running_list
);
5389 init_completion(&rbd_dev
->acquire_wait
);
5390 init_completion(&rbd_dev
->releasing_wait
);
5392 spin_lock_init(&rbd_dev
->object_map_lock
);
5394 rbd_dev
->dev
.bus
= &rbd_bus_type
;
5395 rbd_dev
->dev
.type
= &rbd_device_type
;
5396 rbd_dev
->dev
.parent
= &rbd_root_dev
;
5397 device_initialize(&rbd_dev
->dev
);
5399 rbd_dev
->rbd_client
= rbdc
;
5400 rbd_dev
->spec
= spec
;
5406 * Create a mapping rbd_dev.
5408 static struct rbd_device
*rbd_dev_create(struct rbd_client
*rbdc
,
5409 struct rbd_spec
*spec
,
5410 struct rbd_options
*opts
)
5412 struct rbd_device
*rbd_dev
;
5414 rbd_dev
= __rbd_dev_create(rbdc
, spec
);
5418 rbd_dev
->opts
= opts
;
5420 /* get an id and fill in device name */
5421 rbd_dev
->dev_id
= ida_simple_get(&rbd_dev_id_ida
, 0,
5422 minor_to_rbd_dev_id(1 << MINORBITS
),
5424 if (rbd_dev
->dev_id
< 0)
5427 sprintf(rbd_dev
->name
, RBD_DRV_NAME
"%d", rbd_dev
->dev_id
);
5428 rbd_dev
->task_wq
= alloc_ordered_workqueue("%s-tasks", WQ_MEM_RECLAIM
,
5430 if (!rbd_dev
->task_wq
)
5433 /* we have a ref from do_rbd_add() */
5434 __module_get(THIS_MODULE
);
5436 dout("%s rbd_dev %p dev_id %d\n", __func__
, rbd_dev
, rbd_dev
->dev_id
);
5440 ida_simple_remove(&rbd_dev_id_ida
, rbd_dev
->dev_id
);
5442 rbd_dev_free(rbd_dev
);
5446 static void rbd_dev_destroy(struct rbd_device
*rbd_dev
)
5449 put_device(&rbd_dev
->dev
);
5453 * Get the size and object order for an image snapshot, or if
5454 * snap_id is CEPH_NOSNAP, gets this information for the base
5457 static int _rbd_dev_v2_snap_size(struct rbd_device
*rbd_dev
, u64 snap_id
,
5458 u8
*order
, u64
*snap_size
)
5460 __le64 snapid
= cpu_to_le64(snap_id
);
5465 } __attribute__ ((packed
)) size_buf
= { 0 };
5467 ret
= rbd_obj_method_sync(rbd_dev
, &rbd_dev
->header_oid
,
5468 &rbd_dev
->header_oloc
, "get_size",
5469 &snapid
, sizeof(snapid
),
5470 &size_buf
, sizeof(size_buf
));
5471 dout("%s: rbd_obj_method_sync returned %d\n", __func__
, ret
);
5474 if (ret
< sizeof (size_buf
))
5478 *order
= size_buf
.order
;
5479 dout(" order %u", (unsigned int)*order
);
5481 *snap_size
= le64_to_cpu(size_buf
.size
);
5483 dout(" snap_id 0x%016llx snap_size = %llu\n",
5484 (unsigned long long)snap_id
,
5485 (unsigned long long)*snap_size
);
5490 static int rbd_dev_v2_image_size(struct rbd_device
*rbd_dev
)
5492 return _rbd_dev_v2_snap_size(rbd_dev
, CEPH_NOSNAP
,
5493 &rbd_dev
->header
.obj_order
,
5494 &rbd_dev
->header
.image_size
);
5497 static int rbd_dev_v2_object_prefix(struct rbd_device
*rbd_dev
)
5504 /* Response will be an encoded string, which includes a length */
5505 size
= sizeof(__le32
) + RBD_OBJ_PREFIX_LEN_MAX
;
5506 reply_buf
= kzalloc(size
, GFP_KERNEL
);
5510 ret
= rbd_obj_method_sync(rbd_dev
, &rbd_dev
->header_oid
,
5511 &rbd_dev
->header_oloc
, "get_object_prefix",
5512 NULL
, 0, reply_buf
, size
);
5513 dout("%s: rbd_obj_method_sync returned %d\n", __func__
, ret
);
5518 rbd_dev
->header
.object_prefix
= ceph_extract_encoded_string(&p
,
5519 p
+ ret
, NULL
, GFP_NOIO
);
5522 if (IS_ERR(rbd_dev
->header
.object_prefix
)) {
5523 ret
= PTR_ERR(rbd_dev
->header
.object_prefix
);
5524 rbd_dev
->header
.object_prefix
= NULL
;
5526 dout(" object_prefix = %s\n", rbd_dev
->header
.object_prefix
);
5534 static int _rbd_dev_v2_snap_features(struct rbd_device
*rbd_dev
, u64 snap_id
,
5535 bool read_only
, u64
*snap_features
)
5544 } __attribute__ ((packed
)) features_buf
= { 0 };
5548 features_in
.snap_id
= cpu_to_le64(snap_id
);
5549 features_in
.read_only
= read_only
;
5551 ret
= rbd_obj_method_sync(rbd_dev
, &rbd_dev
->header_oid
,
5552 &rbd_dev
->header_oloc
, "get_features",
5553 &features_in
, sizeof(features_in
),
5554 &features_buf
, sizeof(features_buf
));
5555 dout("%s: rbd_obj_method_sync returned %d\n", __func__
, ret
);
5558 if (ret
< sizeof (features_buf
))
5561 unsup
= le64_to_cpu(features_buf
.incompat
) & ~RBD_FEATURES_SUPPORTED
;
5563 rbd_warn(rbd_dev
, "image uses unsupported features: 0x%llx",
5568 *snap_features
= le64_to_cpu(features_buf
.features
);
5570 dout(" snap_id 0x%016llx features = 0x%016llx incompat = 0x%016llx\n",
5571 (unsigned long long)snap_id
,
5572 (unsigned long long)*snap_features
,
5573 (unsigned long long)le64_to_cpu(features_buf
.incompat
));
5578 static int rbd_dev_v2_features(struct rbd_device
*rbd_dev
)
5580 return _rbd_dev_v2_snap_features(rbd_dev
, CEPH_NOSNAP
,
5582 &rbd_dev
->header
.features
);
5586 * These are generic image flags, but since they are used only for
5587 * object map, store them in rbd_dev->object_map_flags.
5589 * For the same reason, this function is called only on object map
5590 * (re)load and not on header refresh.
5592 static int rbd_dev_v2_get_flags(struct rbd_device
*rbd_dev
)
5594 __le64 snapid
= cpu_to_le64(rbd_dev
->spec
->snap_id
);
5598 ret
= rbd_obj_method_sync(rbd_dev
, &rbd_dev
->header_oid
,
5599 &rbd_dev
->header_oloc
, "get_flags",
5600 &snapid
, sizeof(snapid
),
5601 &flags
, sizeof(flags
));
5604 if (ret
< sizeof(flags
))
5607 rbd_dev
->object_map_flags
= le64_to_cpu(flags
);
5611 struct parent_image_info
{
5613 const char *pool_ns
;
5614 const char *image_id
;
5622 * The caller is responsible for @pii.
5624 static int decode_parent_image_spec(void **p
, void *end
,
5625 struct parent_image_info
*pii
)
5631 ret
= ceph_start_decoding(p
, end
, 1, "ParentImageSpec",
5632 &struct_v
, &struct_len
);
5636 ceph_decode_64_safe(p
, end
, pii
->pool_id
, e_inval
);
5637 pii
->pool_ns
= ceph_extract_encoded_string(p
, end
, NULL
, GFP_KERNEL
);
5638 if (IS_ERR(pii
->pool_ns
)) {
5639 ret
= PTR_ERR(pii
->pool_ns
);
5640 pii
->pool_ns
= NULL
;
5643 pii
->image_id
= ceph_extract_encoded_string(p
, end
, NULL
, GFP_KERNEL
);
5644 if (IS_ERR(pii
->image_id
)) {
5645 ret
= PTR_ERR(pii
->image_id
);
5646 pii
->image_id
= NULL
;
5649 ceph_decode_64_safe(p
, end
, pii
->snap_id
, e_inval
);
5656 static int __get_parent_info(struct rbd_device
*rbd_dev
,
5657 struct page
*req_page
,
5658 struct page
*reply_page
,
5659 struct parent_image_info
*pii
)
5661 struct ceph_osd_client
*osdc
= &rbd_dev
->rbd_client
->client
->osdc
;
5662 size_t reply_len
= PAGE_SIZE
;
5666 ret
= ceph_osdc_call(osdc
, &rbd_dev
->header_oid
, &rbd_dev
->header_oloc
,
5667 "rbd", "parent_get", CEPH_OSD_FLAG_READ
,
5668 req_page
, sizeof(u64
), &reply_page
, &reply_len
);
5670 return ret
== -EOPNOTSUPP
? 1 : ret
;
5672 p
= page_address(reply_page
);
5673 end
= p
+ reply_len
;
5674 ret
= decode_parent_image_spec(&p
, end
, pii
);
5678 ret
= ceph_osdc_call(osdc
, &rbd_dev
->header_oid
, &rbd_dev
->header_oloc
,
5679 "rbd", "parent_overlap_get", CEPH_OSD_FLAG_READ
,
5680 req_page
, sizeof(u64
), &reply_page
, &reply_len
);
5684 p
= page_address(reply_page
);
5685 end
= p
+ reply_len
;
5686 ceph_decode_8_safe(&p
, end
, pii
->has_overlap
, e_inval
);
5687 if (pii
->has_overlap
)
5688 ceph_decode_64_safe(&p
, end
, pii
->overlap
, e_inval
);
5697 * The caller is responsible for @pii.
5699 static int __get_parent_info_legacy(struct rbd_device
*rbd_dev
,
5700 struct page
*req_page
,
5701 struct page
*reply_page
,
5702 struct parent_image_info
*pii
)
5704 struct ceph_osd_client
*osdc
= &rbd_dev
->rbd_client
->client
->osdc
;
5705 size_t reply_len
= PAGE_SIZE
;
5709 ret
= ceph_osdc_call(osdc
, &rbd_dev
->header_oid
, &rbd_dev
->header_oloc
,
5710 "rbd", "get_parent", CEPH_OSD_FLAG_READ
,
5711 req_page
, sizeof(u64
), &reply_page
, &reply_len
);
5715 p
= page_address(reply_page
);
5716 end
= p
+ reply_len
;
5717 ceph_decode_64_safe(&p
, end
, pii
->pool_id
, e_inval
);
5718 pii
->image_id
= ceph_extract_encoded_string(&p
, end
, NULL
, GFP_KERNEL
);
5719 if (IS_ERR(pii
->image_id
)) {
5720 ret
= PTR_ERR(pii
->image_id
);
5721 pii
->image_id
= NULL
;
5724 ceph_decode_64_safe(&p
, end
, pii
->snap_id
, e_inval
);
5725 pii
->has_overlap
= true;
5726 ceph_decode_64_safe(&p
, end
, pii
->overlap
, e_inval
);
5734 static int get_parent_info(struct rbd_device
*rbd_dev
,
5735 struct parent_image_info
*pii
)
5737 struct page
*req_page
, *reply_page
;
5741 req_page
= alloc_page(GFP_KERNEL
);
5745 reply_page
= alloc_page(GFP_KERNEL
);
5747 __free_page(req_page
);
5751 p
= page_address(req_page
);
5752 ceph_encode_64(&p
, rbd_dev
->spec
->snap_id
);
5753 ret
= __get_parent_info(rbd_dev
, req_page
, reply_page
, pii
);
5755 ret
= __get_parent_info_legacy(rbd_dev
, req_page
, reply_page
,
5758 __free_page(req_page
);
5759 __free_page(reply_page
);
5763 static int rbd_dev_v2_parent_info(struct rbd_device
*rbd_dev
)
5765 struct rbd_spec
*parent_spec
;
5766 struct parent_image_info pii
= { 0 };
5769 parent_spec
= rbd_spec_alloc();
5773 ret
= get_parent_info(rbd_dev
, &pii
);
5777 dout("%s pool_id %llu pool_ns %s image_id %s snap_id %llu has_overlap %d overlap %llu\n",
5778 __func__
, pii
.pool_id
, pii
.pool_ns
, pii
.image_id
, pii
.snap_id
,
5779 pii
.has_overlap
, pii
.overlap
);
5781 if (pii
.pool_id
== CEPH_NOPOOL
|| !pii
.has_overlap
) {
5783 * Either the parent never existed, or we have
5784 * record of it but the image got flattened so it no
5785 * longer has a parent. When the parent of a
5786 * layered image disappears we immediately set the
5787 * overlap to 0. The effect of this is that all new
5788 * requests will be treated as if the image had no
5791 * If !pii.has_overlap, the parent image spec is not
5792 * applicable. It's there to avoid duplication in each
5795 if (rbd_dev
->parent_overlap
) {
5796 rbd_dev
->parent_overlap
= 0;
5797 rbd_dev_parent_put(rbd_dev
);
5798 pr_info("%s: clone image has been flattened\n",
5799 rbd_dev
->disk
->disk_name
);
5802 goto out
; /* No parent? No problem. */
5805 /* The ceph file layout needs to fit pool id in 32 bits */
5808 if (pii
.pool_id
> (u64
)U32_MAX
) {
5809 rbd_warn(NULL
, "parent pool id too large (%llu > %u)",
5810 (unsigned long long)pii
.pool_id
, U32_MAX
);
5815 * The parent won't change (except when the clone is
5816 * flattened, already handled that). So we only need to
5817 * record the parent spec we have not already done so.
5819 if (!rbd_dev
->parent_spec
) {
5820 parent_spec
->pool_id
= pii
.pool_id
;
5821 if (pii
.pool_ns
&& *pii
.pool_ns
) {
5822 parent_spec
->pool_ns
= pii
.pool_ns
;
5825 parent_spec
->image_id
= pii
.image_id
;
5826 pii
.image_id
= NULL
;
5827 parent_spec
->snap_id
= pii
.snap_id
;
5829 rbd_dev
->parent_spec
= parent_spec
;
5830 parent_spec
= NULL
; /* rbd_dev now owns this */
5834 * We always update the parent overlap. If it's zero we issue
5835 * a warning, as we will proceed as if there was no parent.
5839 /* refresh, careful to warn just once */
5840 if (rbd_dev
->parent_overlap
)
5842 "clone now standalone (overlap became 0)");
5845 rbd_warn(rbd_dev
, "clone is standalone (overlap 0)");
5848 rbd_dev
->parent_overlap
= pii
.overlap
;
5854 kfree(pii
.image_id
);
5855 rbd_spec_put(parent_spec
);
5859 static int rbd_dev_v2_striping_info(struct rbd_device
*rbd_dev
)
5863 __le64 stripe_count
;
5864 } __attribute__ ((packed
)) striping_info_buf
= { 0 };
5865 size_t size
= sizeof (striping_info_buf
);
5869 ret
= rbd_obj_method_sync(rbd_dev
, &rbd_dev
->header_oid
,
5870 &rbd_dev
->header_oloc
, "get_stripe_unit_count",
5871 NULL
, 0, &striping_info_buf
, size
);
5872 dout("%s: rbd_obj_method_sync returned %d\n", __func__
, ret
);
5878 p
= &striping_info_buf
;
5879 rbd_dev
->header
.stripe_unit
= ceph_decode_64(&p
);
5880 rbd_dev
->header
.stripe_count
= ceph_decode_64(&p
);
5884 static int rbd_dev_v2_data_pool(struct rbd_device
*rbd_dev
)
5886 __le64 data_pool_id
;
5889 ret
= rbd_obj_method_sync(rbd_dev
, &rbd_dev
->header_oid
,
5890 &rbd_dev
->header_oloc
, "get_data_pool",
5891 NULL
, 0, &data_pool_id
, sizeof(data_pool_id
));
5894 if (ret
< sizeof(data_pool_id
))
5897 rbd_dev
->header
.data_pool_id
= le64_to_cpu(data_pool_id
);
5898 WARN_ON(rbd_dev
->header
.data_pool_id
== CEPH_NOPOOL
);
5902 static char *rbd_dev_image_name(struct rbd_device
*rbd_dev
)
5904 CEPH_DEFINE_OID_ONSTACK(oid
);
5905 size_t image_id_size
;
5910 void *reply_buf
= NULL
;
5912 char *image_name
= NULL
;
5915 rbd_assert(!rbd_dev
->spec
->image_name
);
5917 len
= strlen(rbd_dev
->spec
->image_id
);
5918 image_id_size
= sizeof (__le32
) + len
;
5919 image_id
= kmalloc(image_id_size
, GFP_KERNEL
);
5924 end
= image_id
+ image_id_size
;
5925 ceph_encode_string(&p
, end
, rbd_dev
->spec
->image_id
, (u32
)len
);
5927 size
= sizeof (__le32
) + RBD_IMAGE_NAME_LEN_MAX
;
5928 reply_buf
= kmalloc(size
, GFP_KERNEL
);
5932 ceph_oid_printf(&oid
, "%s", RBD_DIRECTORY
);
5933 ret
= rbd_obj_method_sync(rbd_dev
, &oid
, &rbd_dev
->header_oloc
,
5934 "dir_get_name", image_id
, image_id_size
,
5939 end
= reply_buf
+ ret
;
5941 image_name
= ceph_extract_encoded_string(&p
, end
, &len
, GFP_KERNEL
);
5942 if (IS_ERR(image_name
))
5945 dout("%s: name is %s len is %zd\n", __func__
, image_name
, len
);
5953 static u64
rbd_v1_snap_id_by_name(struct rbd_device
*rbd_dev
, const char *name
)
5955 struct ceph_snap_context
*snapc
= rbd_dev
->header
.snapc
;
5956 const char *snap_name
;
5959 /* Skip over names until we find the one we are looking for */
5961 snap_name
= rbd_dev
->header
.snap_names
;
5962 while (which
< snapc
->num_snaps
) {
5963 if (!strcmp(name
, snap_name
))
5964 return snapc
->snaps
[which
];
5965 snap_name
+= strlen(snap_name
) + 1;
5971 static u64
rbd_v2_snap_id_by_name(struct rbd_device
*rbd_dev
, const char *name
)
5973 struct ceph_snap_context
*snapc
= rbd_dev
->header
.snapc
;
5978 for (which
= 0; !found
&& which
< snapc
->num_snaps
; which
++) {
5979 const char *snap_name
;
5981 snap_id
= snapc
->snaps
[which
];
5982 snap_name
= rbd_dev_v2_snap_name(rbd_dev
, snap_id
);
5983 if (IS_ERR(snap_name
)) {
5984 /* ignore no-longer existing snapshots */
5985 if (PTR_ERR(snap_name
) == -ENOENT
)
5990 found
= !strcmp(name
, snap_name
);
5993 return found
? snap_id
: CEPH_NOSNAP
;
5997 * Assumes name is never RBD_SNAP_HEAD_NAME; returns CEPH_NOSNAP if
5998 * no snapshot by that name is found, or if an error occurs.
6000 static u64
rbd_snap_id_by_name(struct rbd_device
*rbd_dev
, const char *name
)
6002 if (rbd_dev
->image_format
== 1)
6003 return rbd_v1_snap_id_by_name(rbd_dev
, name
);
6005 return rbd_v2_snap_id_by_name(rbd_dev
, name
);
6009 * An image being mapped will have everything but the snap id.
6011 static int rbd_spec_fill_snap_id(struct rbd_device
*rbd_dev
)
6013 struct rbd_spec
*spec
= rbd_dev
->spec
;
6015 rbd_assert(spec
->pool_id
!= CEPH_NOPOOL
&& spec
->pool_name
);
6016 rbd_assert(spec
->image_id
&& spec
->image_name
);
6017 rbd_assert(spec
->snap_name
);
6019 if (strcmp(spec
->snap_name
, RBD_SNAP_HEAD_NAME
)) {
6022 snap_id
= rbd_snap_id_by_name(rbd_dev
, spec
->snap_name
);
6023 if (snap_id
== CEPH_NOSNAP
)
6026 spec
->snap_id
= snap_id
;
6028 spec
->snap_id
= CEPH_NOSNAP
;
6035 * A parent image will have all ids but none of the names.
6037 * All names in an rbd spec are dynamically allocated. It's OK if we
6038 * can't figure out the name for an image id.
6040 static int rbd_spec_fill_names(struct rbd_device
*rbd_dev
)
6042 struct ceph_osd_client
*osdc
= &rbd_dev
->rbd_client
->client
->osdc
;
6043 struct rbd_spec
*spec
= rbd_dev
->spec
;
6044 const char *pool_name
;
6045 const char *image_name
;
6046 const char *snap_name
;
6049 rbd_assert(spec
->pool_id
!= CEPH_NOPOOL
);
6050 rbd_assert(spec
->image_id
);
6051 rbd_assert(spec
->snap_id
!= CEPH_NOSNAP
);
6053 /* Get the pool name; we have to make our own copy of this */
6055 pool_name
= ceph_pg_pool_name_by_id(osdc
->osdmap
, spec
->pool_id
);
6057 rbd_warn(rbd_dev
, "no pool with id %llu", spec
->pool_id
);
6060 pool_name
= kstrdup(pool_name
, GFP_KERNEL
);
6064 /* Fetch the image name; tolerate failure here */
6066 image_name
= rbd_dev_image_name(rbd_dev
);
6068 rbd_warn(rbd_dev
, "unable to get image name");
6070 /* Fetch the snapshot name */
6072 snap_name
= rbd_snap_name(rbd_dev
, spec
->snap_id
);
6073 if (IS_ERR(snap_name
)) {
6074 ret
= PTR_ERR(snap_name
);
6078 spec
->pool_name
= pool_name
;
6079 spec
->image_name
= image_name
;
6080 spec
->snap_name
= snap_name
;
6090 static int rbd_dev_v2_snap_context(struct rbd_device
*rbd_dev
)
6099 struct ceph_snap_context
*snapc
;
6103 * We'll need room for the seq value (maximum snapshot id),
6104 * snapshot count, and array of that many snapshot ids.
6105 * For now we have a fixed upper limit on the number we're
6106 * prepared to receive.
6108 size
= sizeof (__le64
) + sizeof (__le32
) +
6109 RBD_MAX_SNAP_COUNT
* sizeof (__le64
);
6110 reply_buf
= kzalloc(size
, GFP_KERNEL
);
6114 ret
= rbd_obj_method_sync(rbd_dev
, &rbd_dev
->header_oid
,
6115 &rbd_dev
->header_oloc
, "get_snapcontext",
6116 NULL
, 0, reply_buf
, size
);
6117 dout("%s: rbd_obj_method_sync returned %d\n", __func__
, ret
);
6122 end
= reply_buf
+ ret
;
6124 ceph_decode_64_safe(&p
, end
, seq
, out
);
6125 ceph_decode_32_safe(&p
, end
, snap_count
, out
);
6128 * Make sure the reported number of snapshot ids wouldn't go
6129 * beyond the end of our buffer. But before checking that,
6130 * make sure the computed size of the snapshot context we
6131 * allocate is representable in a size_t.
6133 if (snap_count
> (SIZE_MAX
- sizeof (struct ceph_snap_context
))
6138 if (!ceph_has_room(&p
, end
, snap_count
* sizeof (__le64
)))
6142 snapc
= ceph_create_snap_context(snap_count
, GFP_KERNEL
);
6148 for (i
= 0; i
< snap_count
; i
++)
6149 snapc
->snaps
[i
] = ceph_decode_64(&p
);
6151 ceph_put_snap_context(rbd_dev
->header
.snapc
);
6152 rbd_dev
->header
.snapc
= snapc
;
6154 dout(" snap context seq = %llu, snap_count = %u\n",
6155 (unsigned long long)seq
, (unsigned int)snap_count
);
6162 static const char *rbd_dev_v2_snap_name(struct rbd_device
*rbd_dev
,
6173 size
= sizeof (__le32
) + RBD_MAX_SNAP_NAME_LEN
;
6174 reply_buf
= kmalloc(size
, GFP_KERNEL
);
6176 return ERR_PTR(-ENOMEM
);
6178 snapid
= cpu_to_le64(snap_id
);
6179 ret
= rbd_obj_method_sync(rbd_dev
, &rbd_dev
->header_oid
,
6180 &rbd_dev
->header_oloc
, "get_snapshot_name",
6181 &snapid
, sizeof(snapid
), reply_buf
, size
);
6182 dout("%s: rbd_obj_method_sync returned %d\n", __func__
, ret
);
6184 snap_name
= ERR_PTR(ret
);
6189 end
= reply_buf
+ ret
;
6190 snap_name
= ceph_extract_encoded_string(&p
, end
, NULL
, GFP_KERNEL
);
6191 if (IS_ERR(snap_name
))
6194 dout(" snap_id 0x%016llx snap_name = %s\n",
6195 (unsigned long long)snap_id
, snap_name
);
6202 static int rbd_dev_v2_header_info(struct rbd_device
*rbd_dev
)
6204 bool first_time
= rbd_dev
->header
.object_prefix
== NULL
;
6207 ret
= rbd_dev_v2_image_size(rbd_dev
);
6212 ret
= rbd_dev_v2_header_onetime(rbd_dev
);
6217 ret
= rbd_dev_v2_snap_context(rbd_dev
);
6218 if (ret
&& first_time
) {
6219 kfree(rbd_dev
->header
.object_prefix
);
6220 rbd_dev
->header
.object_prefix
= NULL
;
6226 static int rbd_dev_header_info(struct rbd_device
*rbd_dev
)
6228 rbd_assert(rbd_image_format_valid(rbd_dev
->image_format
));
6230 if (rbd_dev
->image_format
== 1)
6231 return rbd_dev_v1_header_info(rbd_dev
);
6233 return rbd_dev_v2_header_info(rbd_dev
);
6237 * Skips over white space at *buf, and updates *buf to point to the
6238 * first found non-space character (if any). Returns the length of
6239 * the token (string of non-white space characters) found. Note
6240 * that *buf must be terminated with '\0'.
6242 static inline size_t next_token(const char **buf
)
6245 * These are the characters that produce nonzero for
6246 * isspace() in the "C" and "POSIX" locales.
6248 const char *spaces
= " \f\n\r\t\v";
6250 *buf
+= strspn(*buf
, spaces
); /* Find start of token */
6252 return strcspn(*buf
, spaces
); /* Return token length */
6256 * Finds the next token in *buf, dynamically allocates a buffer big
6257 * enough to hold a copy of it, and copies the token into the new
6258 * buffer. The copy is guaranteed to be terminated with '\0'. Note
6259 * that a duplicate buffer is created even for a zero-length token.
6261 * Returns a pointer to the newly-allocated duplicate, or a null
6262 * pointer if memory for the duplicate was not available. If
6263 * the lenp argument is a non-null pointer, the length of the token
6264 * (not including the '\0') is returned in *lenp.
6266 * If successful, the *buf pointer will be updated to point beyond
6267 * the end of the found token.
6269 * Note: uses GFP_KERNEL for allocation.
6271 static inline char *dup_token(const char **buf
, size_t *lenp
)
6276 len
= next_token(buf
);
6277 dup
= kmemdup(*buf
, len
+ 1, GFP_KERNEL
);
6280 *(dup
+ len
) = '\0';
6289 static int rbd_parse_param(struct fs_parameter
*param
,
6290 struct rbd_parse_opts_ctx
*pctx
)
6292 struct rbd_options
*opt
= pctx
->opts
;
6293 struct fs_parse_result result
;
6294 struct p_log log
= {.prefix
= "rbd"};
6297 ret
= ceph_parse_param(param
, pctx
->copts
, NULL
);
6298 if (ret
!= -ENOPARAM
)
6301 token
= __fs_parse(&log
, rbd_parameters
, param
, &result
);
6302 dout("%s fs_parse '%s' token %d\n", __func__
, param
->key
, token
);
6304 if (token
== -ENOPARAM
)
6305 return inval_plog(&log
, "Unknown parameter '%s'",
6311 case Opt_queue_depth
:
6312 if (result
.uint_32
< 1)
6314 opt
->queue_depth
= result
.uint_32
;
6316 case Opt_alloc_size
:
6317 if (result
.uint_32
< SECTOR_SIZE
)
6319 if (!is_power_of_2(result
.uint_32
))
6320 return inval_plog(&log
, "alloc_size must be a power of 2");
6321 opt
->alloc_size
= result
.uint_32
;
6323 case Opt_lock_timeout
:
6324 /* 0 is "wait forever" (i.e. infinite timeout) */
6325 if (result
.uint_32
> INT_MAX
/ 1000)
6327 opt
->lock_timeout
= msecs_to_jiffies(result
.uint_32
* 1000);
6330 kfree(pctx
->spec
->pool_ns
);
6331 pctx
->spec
->pool_ns
= param
->string
;
6332 param
->string
= NULL
;
6335 opt
->read_only
= true;
6337 case Opt_read_write
:
6338 opt
->read_only
= false;
6340 case Opt_lock_on_read
:
6341 opt
->lock_on_read
= true;
6344 opt
->exclusive
= true;
6356 return inval_plog(&log
, "%s out of range", param
->key
);
6360 * This duplicates most of generic_parse_monolithic(), untying it from
6361 * fs_context and skipping standard superblock and security options.
6363 static int rbd_parse_options(char *options
, struct rbd_parse_opts_ctx
*pctx
)
6368 dout("%s '%s'\n", __func__
, options
);
6369 while ((key
= strsep(&options
, ",")) != NULL
) {
6371 struct fs_parameter param
= {
6373 .type
= fs_value_is_flag
,
6375 char *value
= strchr(key
, '=');
6382 v_len
= strlen(value
);
6383 param
.string
= kmemdup_nul(value
, v_len
,
6387 param
.type
= fs_value_is_string
;
6391 ret
= rbd_parse_param(¶m
, pctx
);
6392 kfree(param
.string
);
6402 * Parse the options provided for an "rbd add" (i.e., rbd image
6403 * mapping) request. These arrive via a write to /sys/bus/rbd/add,
6404 * and the data written is passed here via a NUL-terminated buffer.
6405 * Returns 0 if successful or an error code otherwise.
6407 * The information extracted from these options is recorded in
6408 * the other parameters which return dynamically-allocated
6411 * The address of a pointer that will refer to a ceph options
6412 * structure. Caller must release the returned pointer using
6413 * ceph_destroy_options() when it is no longer needed.
6415 * Address of an rbd options pointer. Fully initialized by
6416 * this function; caller must release with kfree().
6418 * Address of an rbd image specification pointer. Fully
6419 * initialized by this function based on parsed options.
6420 * Caller must release with rbd_spec_put().
6422 * The options passed take this form:
6423 * <mon_addrs> <options> <pool_name> <image_name> [<snap_id>]
6426 * A comma-separated list of one or more monitor addresses.
6427 * A monitor address is an ip address, optionally followed
6428 * by a port number (separated by a colon).
6429 * I.e.: ip1[:port1][,ip2[:port2]...]
6431 * A comma-separated list of ceph and/or rbd options.
6433 * The name of the rados pool containing the rbd image.
6435 * The name of the image in that pool to map.
6437 * An optional snapshot id. If provided, the mapping will
6438 * present data from the image at the time that snapshot was
6439 * created. The image head is used if no snapshot id is
6440 * provided. Snapshot mappings are always read-only.
6442 static int rbd_add_parse_args(const char *buf
,
6443 struct ceph_options
**ceph_opts
,
6444 struct rbd_options
**opts
,
6445 struct rbd_spec
**rbd_spec
)
6449 const char *mon_addrs
;
6451 size_t mon_addrs_size
;
6452 struct rbd_parse_opts_ctx pctx
= { 0 };
6455 /* The first four tokens are required */
6457 len
= next_token(&buf
);
6459 rbd_warn(NULL
, "no monitor address(es) provided");
6463 mon_addrs_size
= len
;
6467 options
= dup_token(&buf
, NULL
);
6471 rbd_warn(NULL
, "no options provided");
6475 pctx
.spec
= rbd_spec_alloc();
6479 pctx
.spec
->pool_name
= dup_token(&buf
, NULL
);
6480 if (!pctx
.spec
->pool_name
)
6482 if (!*pctx
.spec
->pool_name
) {
6483 rbd_warn(NULL
, "no pool name provided");
6487 pctx
.spec
->image_name
= dup_token(&buf
, NULL
);
6488 if (!pctx
.spec
->image_name
)
6490 if (!*pctx
.spec
->image_name
) {
6491 rbd_warn(NULL
, "no image name provided");
6496 * Snapshot name is optional; default is to use "-"
6497 * (indicating the head/no snapshot).
6499 len
= next_token(&buf
);
6501 buf
= RBD_SNAP_HEAD_NAME
; /* No snapshot supplied */
6502 len
= sizeof (RBD_SNAP_HEAD_NAME
) - 1;
6503 } else if (len
> RBD_MAX_SNAP_NAME_LEN
) {
6504 ret
= -ENAMETOOLONG
;
6507 snap_name
= kmemdup(buf
, len
+ 1, GFP_KERNEL
);
6510 *(snap_name
+ len
) = '\0';
6511 pctx
.spec
->snap_name
= snap_name
;
6513 pctx
.copts
= ceph_alloc_options();
6517 /* Initialize all rbd options to the defaults */
6519 pctx
.opts
= kzalloc(sizeof(*pctx
.opts
), GFP_KERNEL
);
6523 pctx
.opts
->read_only
= RBD_READ_ONLY_DEFAULT
;
6524 pctx
.opts
->queue_depth
= RBD_QUEUE_DEPTH_DEFAULT
;
6525 pctx
.opts
->alloc_size
= RBD_ALLOC_SIZE_DEFAULT
;
6526 pctx
.opts
->lock_timeout
= RBD_LOCK_TIMEOUT_DEFAULT
;
6527 pctx
.opts
->lock_on_read
= RBD_LOCK_ON_READ_DEFAULT
;
6528 pctx
.opts
->exclusive
= RBD_EXCLUSIVE_DEFAULT
;
6529 pctx
.opts
->trim
= RBD_TRIM_DEFAULT
;
6531 ret
= ceph_parse_mon_ips(mon_addrs
, mon_addrs_size
, pctx
.copts
, NULL
);
6535 ret
= rbd_parse_options(options
, &pctx
);
6539 *ceph_opts
= pctx
.copts
;
6541 *rbd_spec
= pctx
.spec
;
6549 ceph_destroy_options(pctx
.copts
);
6550 rbd_spec_put(pctx
.spec
);
6555 static void rbd_dev_image_unlock(struct rbd_device
*rbd_dev
)
6557 down_write(&rbd_dev
->lock_rwsem
);
6558 if (__rbd_is_lock_owner(rbd_dev
))
6559 __rbd_release_lock(rbd_dev
);
6560 up_write(&rbd_dev
->lock_rwsem
);
6564 * If the wait is interrupted, an error is returned even if the lock
6565 * was successfully acquired. rbd_dev_image_unlock() will release it
6568 static int rbd_add_acquire_lock(struct rbd_device
*rbd_dev
)
6572 if (!(rbd_dev
->header
.features
& RBD_FEATURE_EXCLUSIVE_LOCK
)) {
6573 if (!rbd_dev
->opts
->exclusive
&& !rbd_dev
->opts
->lock_on_read
)
6576 rbd_warn(rbd_dev
, "exclusive-lock feature is not enabled");
6580 if (rbd_is_ro(rbd_dev
))
6583 rbd_assert(!rbd_is_lock_owner(rbd_dev
));
6584 queue_delayed_work(rbd_dev
->task_wq
, &rbd_dev
->lock_dwork
, 0);
6585 ret
= wait_for_completion_killable_timeout(&rbd_dev
->acquire_wait
,
6586 ceph_timeout_jiffies(rbd_dev
->opts
->lock_timeout
));
6588 ret
= rbd_dev
->acquire_err
;
6590 cancel_delayed_work_sync(&rbd_dev
->lock_dwork
);
6596 rbd_warn(rbd_dev
, "failed to acquire exclusive lock: %ld", ret
);
6601 * The lock may have been released by now, unless automatic lock
6602 * transitions are disabled.
6604 rbd_assert(!rbd_dev
->opts
->exclusive
|| rbd_is_lock_owner(rbd_dev
));
6609 * An rbd format 2 image has a unique identifier, distinct from the
6610 * name given to it by the user. Internally, that identifier is
6611 * what's used to specify the names of objects related to the image.
6613 * A special "rbd id" object is used to map an rbd image name to its
6614 * id. If that object doesn't exist, then there is no v2 rbd image
6615 * with the supplied name.
6617 * This function will record the given rbd_dev's image_id field if
6618 * it can be determined, and in that case will return 0. If any
6619 * errors occur a negative errno will be returned and the rbd_dev's
6620 * image_id field will be unchanged (and should be NULL).
6622 static int rbd_dev_image_id(struct rbd_device
*rbd_dev
)
6626 CEPH_DEFINE_OID_ONSTACK(oid
);
6631 * When probing a parent image, the image id is already
6632 * known (and the image name likely is not). There's no
6633 * need to fetch the image id again in this case. We
6634 * do still need to set the image format though.
6636 if (rbd_dev
->spec
->image_id
) {
6637 rbd_dev
->image_format
= *rbd_dev
->spec
->image_id
? 2 : 1;
6643 * First, see if the format 2 image id file exists, and if
6644 * so, get the image's persistent id from it.
6646 ret
= ceph_oid_aprintf(&oid
, GFP_KERNEL
, "%s%s", RBD_ID_PREFIX
,
6647 rbd_dev
->spec
->image_name
);
6651 dout("rbd id object name is %s\n", oid
.name
);
6653 /* Response will be an encoded string, which includes a length */
6654 size
= sizeof (__le32
) + RBD_IMAGE_ID_LEN_MAX
;
6655 response
= kzalloc(size
, GFP_NOIO
);
6661 /* If it doesn't exist we'll assume it's a format 1 image */
6663 ret
= rbd_obj_method_sync(rbd_dev
, &oid
, &rbd_dev
->header_oloc
,
6666 dout("%s: rbd_obj_method_sync returned %d\n", __func__
, ret
);
6667 if (ret
== -ENOENT
) {
6668 image_id
= kstrdup("", GFP_KERNEL
);
6669 ret
= image_id
? 0 : -ENOMEM
;
6671 rbd_dev
->image_format
= 1;
6672 } else if (ret
>= 0) {
6675 image_id
= ceph_extract_encoded_string(&p
, p
+ ret
,
6677 ret
= PTR_ERR_OR_ZERO(image_id
);
6679 rbd_dev
->image_format
= 2;
6683 rbd_dev
->spec
->image_id
= image_id
;
6684 dout("image_id is %s\n", image_id
);
6688 ceph_oid_destroy(&oid
);
6693 * Undo whatever state changes are made by v1 or v2 header info
6696 static void rbd_dev_unprobe(struct rbd_device
*rbd_dev
)
6698 struct rbd_image_header
*header
;
6700 rbd_dev_parent_put(rbd_dev
);
6701 rbd_object_map_free(rbd_dev
);
6702 rbd_dev_mapping_clear(rbd_dev
);
6704 /* Free dynamic fields from the header, then zero it out */
6706 header
= &rbd_dev
->header
;
6707 ceph_put_snap_context(header
->snapc
);
6708 kfree(header
->snap_sizes
);
6709 kfree(header
->snap_names
);
6710 kfree(header
->object_prefix
);
6711 memset(header
, 0, sizeof (*header
));
6714 static int rbd_dev_v2_header_onetime(struct rbd_device
*rbd_dev
)
6718 ret
= rbd_dev_v2_object_prefix(rbd_dev
);
6723 * Get the and check features for the image. Currently the
6724 * features are assumed to never change.
6726 ret
= rbd_dev_v2_features(rbd_dev
);
6730 /* If the image supports fancy striping, get its parameters */
6732 if (rbd_dev
->header
.features
& RBD_FEATURE_STRIPINGV2
) {
6733 ret
= rbd_dev_v2_striping_info(rbd_dev
);
6738 if (rbd_dev
->header
.features
& RBD_FEATURE_DATA_POOL
) {
6739 ret
= rbd_dev_v2_data_pool(rbd_dev
);
6744 rbd_init_layout(rbd_dev
);
6748 rbd_dev
->header
.features
= 0;
6749 kfree(rbd_dev
->header
.object_prefix
);
6750 rbd_dev
->header
.object_prefix
= NULL
;
6755 * @depth is rbd_dev_image_probe() -> rbd_dev_probe_parent() ->
6756 * rbd_dev_image_probe() recursion depth, which means it's also the
6757 * length of the already discovered part of the parent chain.
6759 static int rbd_dev_probe_parent(struct rbd_device
*rbd_dev
, int depth
)
6761 struct rbd_device
*parent
= NULL
;
6764 if (!rbd_dev
->parent_spec
)
6767 if (++depth
> RBD_MAX_PARENT_CHAIN_LEN
) {
6768 pr_info("parent chain is too long (%d)\n", depth
);
6773 parent
= __rbd_dev_create(rbd_dev
->rbd_client
, rbd_dev
->parent_spec
);
6780 * Images related by parent/child relationships always share
6781 * rbd_client and spec/parent_spec, so bump their refcounts.
6783 __rbd_get_client(rbd_dev
->rbd_client
);
6784 rbd_spec_get(rbd_dev
->parent_spec
);
6786 __set_bit(RBD_DEV_FLAG_READONLY
, &parent
->flags
);
6788 ret
= rbd_dev_image_probe(parent
, depth
);
6792 rbd_dev
->parent
= parent
;
6793 atomic_set(&rbd_dev
->parent_ref
, 1);
6797 rbd_dev_unparent(rbd_dev
);
6798 rbd_dev_destroy(parent
);
6802 static void rbd_dev_device_release(struct rbd_device
*rbd_dev
)
6804 clear_bit(RBD_DEV_FLAG_EXISTS
, &rbd_dev
->flags
);
6805 rbd_free_disk(rbd_dev
);
6807 unregister_blkdev(rbd_dev
->major
, rbd_dev
->name
);
6811 * rbd_dev->header_rwsem must be locked for write and will be unlocked
6814 static int rbd_dev_device_setup(struct rbd_device
*rbd_dev
)
6818 /* Record our major and minor device numbers. */
6820 if (!single_major
) {
6821 ret
= register_blkdev(0, rbd_dev
->name
);
6823 goto err_out_unlock
;
6825 rbd_dev
->major
= ret
;
6828 rbd_dev
->major
= rbd_major
;
6829 rbd_dev
->minor
= rbd_dev_id_to_minor(rbd_dev
->dev_id
);
6832 /* Set up the blkdev mapping. */
6834 ret
= rbd_init_disk(rbd_dev
);
6836 goto err_out_blkdev
;
6838 set_capacity(rbd_dev
->disk
, rbd_dev
->mapping
.size
/ SECTOR_SIZE
);
6839 set_disk_ro(rbd_dev
->disk
, rbd_is_ro(rbd_dev
));
6841 ret
= dev_set_name(&rbd_dev
->dev
, "%d", rbd_dev
->dev_id
);
6845 set_bit(RBD_DEV_FLAG_EXISTS
, &rbd_dev
->flags
);
6846 up_write(&rbd_dev
->header_rwsem
);
6850 rbd_free_disk(rbd_dev
);
6853 unregister_blkdev(rbd_dev
->major
, rbd_dev
->name
);
6855 up_write(&rbd_dev
->header_rwsem
);
6859 static int rbd_dev_header_name(struct rbd_device
*rbd_dev
)
6861 struct rbd_spec
*spec
= rbd_dev
->spec
;
6864 /* Record the header object name for this rbd image. */
6866 rbd_assert(rbd_image_format_valid(rbd_dev
->image_format
));
6867 if (rbd_dev
->image_format
== 1)
6868 ret
= ceph_oid_aprintf(&rbd_dev
->header_oid
, GFP_KERNEL
, "%s%s",
6869 spec
->image_name
, RBD_SUFFIX
);
6871 ret
= ceph_oid_aprintf(&rbd_dev
->header_oid
, GFP_KERNEL
, "%s%s",
6872 RBD_HEADER_PREFIX
, spec
->image_id
);
6877 static void rbd_print_dne(struct rbd_device
*rbd_dev
, bool is_snap
)
6880 pr_info("image %s/%s%s%s does not exist\n",
6881 rbd_dev
->spec
->pool_name
,
6882 rbd_dev
->spec
->pool_ns
?: "",
6883 rbd_dev
->spec
->pool_ns
? "/" : "",
6884 rbd_dev
->spec
->image_name
);
6886 pr_info("snap %s/%s%s%s@%s does not exist\n",
6887 rbd_dev
->spec
->pool_name
,
6888 rbd_dev
->spec
->pool_ns
?: "",
6889 rbd_dev
->spec
->pool_ns
? "/" : "",
6890 rbd_dev
->spec
->image_name
,
6891 rbd_dev
->spec
->snap_name
);
6895 static void rbd_dev_image_release(struct rbd_device
*rbd_dev
)
6897 rbd_dev_unprobe(rbd_dev
);
6899 rbd_unregister_watch(rbd_dev
);
6900 rbd_dev
->image_format
= 0;
6901 kfree(rbd_dev
->spec
->image_id
);
6902 rbd_dev
->spec
->image_id
= NULL
;
6906 * Probe for the existence of the header object for the given rbd
6907 * device. If this image is the one being mapped (i.e., not a
6908 * parent), initiate a watch on its header object before using that
6909 * object to get detailed information about the rbd image.
6911 static int rbd_dev_image_probe(struct rbd_device
*rbd_dev
, int depth
)
6913 bool need_watch
= !rbd_is_ro(rbd_dev
);
6917 * Get the id from the image id object. Unless there's an
6918 * error, rbd_dev->spec->image_id will be filled in with
6919 * a dynamically-allocated string, and rbd_dev->image_format
6920 * will be set to either 1 or 2.
6922 ret
= rbd_dev_image_id(rbd_dev
);
6926 ret
= rbd_dev_header_name(rbd_dev
);
6928 goto err_out_format
;
6931 ret
= rbd_register_watch(rbd_dev
);
6934 rbd_print_dne(rbd_dev
, false);
6935 goto err_out_format
;
6939 ret
= rbd_dev_header_info(rbd_dev
);
6941 if (ret
== -ENOENT
&& !need_watch
)
6942 rbd_print_dne(rbd_dev
, false);
6947 * If this image is the one being mapped, we have pool name and
6948 * id, image name and id, and snap name - need to fill snap id.
6949 * Otherwise this is a parent image, identified by pool, image
6950 * and snap ids - need to fill in names for those ids.
6953 ret
= rbd_spec_fill_snap_id(rbd_dev
);
6955 ret
= rbd_spec_fill_names(rbd_dev
);
6958 rbd_print_dne(rbd_dev
, true);
6962 ret
= rbd_dev_mapping_set(rbd_dev
);
6966 if (rbd_is_snap(rbd_dev
) &&
6967 (rbd_dev
->header
.features
& RBD_FEATURE_OBJECT_MAP
)) {
6968 ret
= rbd_object_map_load(rbd_dev
);
6973 if (rbd_dev
->header
.features
& RBD_FEATURE_LAYERING
) {
6974 ret
= rbd_dev_v2_parent_info(rbd_dev
);
6979 ret
= rbd_dev_probe_parent(rbd_dev
, depth
);
6983 dout("discovered format %u image, header name is %s\n",
6984 rbd_dev
->image_format
, rbd_dev
->header_oid
.name
);
6988 rbd_dev_unprobe(rbd_dev
);
6991 rbd_unregister_watch(rbd_dev
);
6993 rbd_dev
->image_format
= 0;
6994 kfree(rbd_dev
->spec
->image_id
);
6995 rbd_dev
->spec
->image_id
= NULL
;
6999 static ssize_t
do_rbd_add(struct bus_type
*bus
,
7003 struct rbd_device
*rbd_dev
= NULL
;
7004 struct ceph_options
*ceph_opts
= NULL
;
7005 struct rbd_options
*rbd_opts
= NULL
;
7006 struct rbd_spec
*spec
= NULL
;
7007 struct rbd_client
*rbdc
;
7010 if (!try_module_get(THIS_MODULE
))
7013 /* parse add command */
7014 rc
= rbd_add_parse_args(buf
, &ceph_opts
, &rbd_opts
, &spec
);
7018 rbdc
= rbd_get_client(ceph_opts
);
7025 rc
= ceph_pg_poolid_by_name(rbdc
->client
->osdc
.osdmap
, spec
->pool_name
);
7028 pr_info("pool %s does not exist\n", spec
->pool_name
);
7029 goto err_out_client
;
7031 spec
->pool_id
= (u64
)rc
;
7033 rbd_dev
= rbd_dev_create(rbdc
, spec
, rbd_opts
);
7036 goto err_out_client
;
7038 rbdc
= NULL
; /* rbd_dev now owns this */
7039 spec
= NULL
; /* rbd_dev now owns this */
7040 rbd_opts
= NULL
; /* rbd_dev now owns this */
7042 /* if we are mapping a snapshot it will be a read-only mapping */
7043 if (rbd_dev
->opts
->read_only
||
7044 strcmp(rbd_dev
->spec
->snap_name
, RBD_SNAP_HEAD_NAME
))
7045 __set_bit(RBD_DEV_FLAG_READONLY
, &rbd_dev
->flags
);
7047 rbd_dev
->config_info
= kstrdup(buf
, GFP_KERNEL
);
7048 if (!rbd_dev
->config_info
) {
7050 goto err_out_rbd_dev
;
7053 down_write(&rbd_dev
->header_rwsem
);
7054 rc
= rbd_dev_image_probe(rbd_dev
, 0);
7056 up_write(&rbd_dev
->header_rwsem
);
7057 goto err_out_rbd_dev
;
7060 if (rbd_dev
->opts
->alloc_size
> rbd_dev
->layout
.object_size
) {
7061 rbd_warn(rbd_dev
, "alloc_size adjusted to %u",
7062 rbd_dev
->layout
.object_size
);
7063 rbd_dev
->opts
->alloc_size
= rbd_dev
->layout
.object_size
;
7066 rc
= rbd_dev_device_setup(rbd_dev
);
7068 goto err_out_image_probe
;
7070 rc
= rbd_add_acquire_lock(rbd_dev
);
7072 goto err_out_image_lock
;
7074 /* Everything's ready. Announce the disk to the world. */
7076 rc
= device_add(&rbd_dev
->dev
);
7078 goto err_out_image_lock
;
7080 device_add_disk(&rbd_dev
->dev
, rbd_dev
->disk
, NULL
);
7081 /* see rbd_init_disk() */
7082 blk_put_queue(rbd_dev
->disk
->queue
);
7084 spin_lock(&rbd_dev_list_lock
);
7085 list_add_tail(&rbd_dev
->node
, &rbd_dev_list
);
7086 spin_unlock(&rbd_dev_list_lock
);
7088 pr_info("%s: capacity %llu features 0x%llx\n", rbd_dev
->disk
->disk_name
,
7089 (unsigned long long)get_capacity(rbd_dev
->disk
) << SECTOR_SHIFT
,
7090 rbd_dev
->header
.features
);
7093 module_put(THIS_MODULE
);
7097 rbd_dev_image_unlock(rbd_dev
);
7098 rbd_dev_device_release(rbd_dev
);
7099 err_out_image_probe
:
7100 rbd_dev_image_release(rbd_dev
);
7102 rbd_dev_destroy(rbd_dev
);
7104 rbd_put_client(rbdc
);
7111 static ssize_t
add_store(struct bus_type
*bus
, const char *buf
, size_t count
)
7116 return do_rbd_add(bus
, buf
, count
);
7119 static ssize_t
add_single_major_store(struct bus_type
*bus
, const char *buf
,
7122 return do_rbd_add(bus
, buf
, count
);
7125 static void rbd_dev_remove_parent(struct rbd_device
*rbd_dev
)
7127 while (rbd_dev
->parent
) {
7128 struct rbd_device
*first
= rbd_dev
;
7129 struct rbd_device
*second
= first
->parent
;
7130 struct rbd_device
*third
;
7133 * Follow to the parent with no grandparent and
7136 while (second
&& (third
= second
->parent
)) {
7141 rbd_dev_image_release(second
);
7142 rbd_dev_destroy(second
);
7143 first
->parent
= NULL
;
7144 first
->parent_overlap
= 0;
7146 rbd_assert(first
->parent_spec
);
7147 rbd_spec_put(first
->parent_spec
);
7148 first
->parent_spec
= NULL
;
7152 static ssize_t
do_rbd_remove(struct bus_type
*bus
,
7156 struct rbd_device
*rbd_dev
= NULL
;
7157 struct list_head
*tmp
;
7165 sscanf(buf
, "%d %5s", &dev_id
, opt_buf
);
7167 pr_err("dev_id out of range\n");
7170 if (opt_buf
[0] != '\0') {
7171 if (!strcmp(opt_buf
, "force")) {
7174 pr_err("bad remove option at '%s'\n", opt_buf
);
7180 spin_lock(&rbd_dev_list_lock
);
7181 list_for_each(tmp
, &rbd_dev_list
) {
7182 rbd_dev
= list_entry(tmp
, struct rbd_device
, node
);
7183 if (rbd_dev
->dev_id
== dev_id
) {
7189 spin_lock_irq(&rbd_dev
->lock
);
7190 if (rbd_dev
->open_count
&& !force
)
7192 else if (test_and_set_bit(RBD_DEV_FLAG_REMOVING
,
7195 spin_unlock_irq(&rbd_dev
->lock
);
7197 spin_unlock(&rbd_dev_list_lock
);
7203 * Prevent new IO from being queued and wait for existing
7204 * IO to complete/fail.
7206 blk_mq_freeze_queue(rbd_dev
->disk
->queue
);
7207 blk_set_queue_dying(rbd_dev
->disk
->queue
);
7210 del_gendisk(rbd_dev
->disk
);
7211 spin_lock(&rbd_dev_list_lock
);
7212 list_del_init(&rbd_dev
->node
);
7213 spin_unlock(&rbd_dev_list_lock
);
7214 device_del(&rbd_dev
->dev
);
7216 rbd_dev_image_unlock(rbd_dev
);
7217 rbd_dev_device_release(rbd_dev
);
7218 rbd_dev_image_release(rbd_dev
);
7219 rbd_dev_destroy(rbd_dev
);
7223 static ssize_t
remove_store(struct bus_type
*bus
, const char *buf
, size_t count
)
7228 return do_rbd_remove(bus
, buf
, count
);
7231 static ssize_t
remove_single_major_store(struct bus_type
*bus
, const char *buf
,
7234 return do_rbd_remove(bus
, buf
, count
);
7238 * create control files in sysfs
7241 static int __init
rbd_sysfs_init(void)
7245 ret
= device_register(&rbd_root_dev
);
7249 ret
= bus_register(&rbd_bus_type
);
7251 device_unregister(&rbd_root_dev
);
7256 static void __exit
rbd_sysfs_cleanup(void)
7258 bus_unregister(&rbd_bus_type
);
7259 device_unregister(&rbd_root_dev
);
7262 static int __init
rbd_slab_init(void)
7264 rbd_assert(!rbd_img_request_cache
);
7265 rbd_img_request_cache
= KMEM_CACHE(rbd_img_request
, 0);
7266 if (!rbd_img_request_cache
)
7269 rbd_assert(!rbd_obj_request_cache
);
7270 rbd_obj_request_cache
= KMEM_CACHE(rbd_obj_request
, 0);
7271 if (!rbd_obj_request_cache
)
7277 kmem_cache_destroy(rbd_img_request_cache
);
7278 rbd_img_request_cache
= NULL
;
7282 static void rbd_slab_exit(void)
7284 rbd_assert(rbd_obj_request_cache
);
7285 kmem_cache_destroy(rbd_obj_request_cache
);
7286 rbd_obj_request_cache
= NULL
;
7288 rbd_assert(rbd_img_request_cache
);
7289 kmem_cache_destroy(rbd_img_request_cache
);
7290 rbd_img_request_cache
= NULL
;
7293 static int __init
rbd_init(void)
7297 if (!libceph_compatible(NULL
)) {
7298 rbd_warn(NULL
, "libceph incompatibility (quitting)");
7302 rc
= rbd_slab_init();
7307 * The number of active work items is limited by the number of
7308 * rbd devices * queue depth, so leave @max_active at default.
7310 rbd_wq
= alloc_workqueue(RBD_DRV_NAME
, WQ_MEM_RECLAIM
, 0);
7317 rbd_major
= register_blkdev(0, RBD_DRV_NAME
);
7318 if (rbd_major
< 0) {
7324 rc
= rbd_sysfs_init();
7326 goto err_out_blkdev
;
7329 pr_info("loaded (major %d)\n", rbd_major
);
7331 pr_info("loaded\n");
7337 unregister_blkdev(rbd_major
, RBD_DRV_NAME
);
7339 destroy_workqueue(rbd_wq
);
7345 static void __exit
rbd_exit(void)
7347 ida_destroy(&rbd_dev_id_ida
);
7348 rbd_sysfs_cleanup();
7350 unregister_blkdev(rbd_major
, RBD_DRV_NAME
);
7351 destroy_workqueue(rbd_wq
);
7355 module_init(rbd_init
);
7356 module_exit(rbd_exit
);
7358 MODULE_AUTHOR("Alex Elder <elder@inktank.com>");
7359 MODULE_AUTHOR("Sage Weil <sage@newdream.net>");
7360 MODULE_AUTHOR("Yehuda Sadeh <yehuda@hq.newdream.net>");
7361 /* following authorship retained from original osdblk.c */
7362 MODULE_AUTHOR("Jeff Garzik <jeff@garzik.org>");
7364 MODULE_DESCRIPTION("RADOS Block Device (RBD) driver");
7365 MODULE_LICENSE("GPL");