2 #include <linux/ceph/ceph_debug.h>
4 #include <linux/module.h>
6 #include <linux/highmem.h>
8 #include <linux/pagemap.h>
9 #include <linux/slab.h>
10 #include <linux/uaccess.h>
12 #include <linux/bio.h>
15 #include <linux/ceph/libceph.h>
16 #include <linux/ceph/osd_client.h>
17 #include <linux/ceph/messenger.h>
18 #include <linux/ceph/decode.h>
19 #include <linux/ceph/auth.h>
20 #include <linux/ceph/pagelist.h>
22 #define OSD_OPREPLY_FRONT_LEN 512
24 static struct kmem_cache
*ceph_osd_request_cache
;
26 static const struct ceph_connection_operations osd_con_ops
;
29 * Implement client access to distributed object storage cluster.
31 * All data objects are stored within a cluster/cloud of OSDs, or
32 * "object storage devices." (Note that Ceph OSDs have _nothing_ to
33 * do with the T10 OSD extensions to SCSI.) Ceph OSDs are simply
34 * remote daemons serving up and coordinating consistent and safe
37 * Cluster membership and the mapping of data objects onto storage devices
38 * are described by the osd map.
40 * We keep track of pending OSD requests (read, write), resubmit
41 * requests to different OSDs when the cluster topology/data layout
42 * change, or retry the affected requests when the communications
43 * channel with an OSD is reset.
46 static void link_request(struct ceph_osd
*osd
, struct ceph_osd_request
*req
);
47 static void unlink_request(struct ceph_osd
*osd
, struct ceph_osd_request
*req
);
48 static void link_linger(struct ceph_osd
*osd
,
49 struct ceph_osd_linger_request
*lreq
);
50 static void unlink_linger(struct ceph_osd
*osd
,
51 struct ceph_osd_linger_request
*lreq
);
54 static inline bool rwsem_is_wrlocked(struct rw_semaphore
*sem
)
58 if (unlikely(down_read_trylock(sem
))) {
65 static inline void verify_osdc_locked(struct ceph_osd_client
*osdc
)
67 WARN_ON(!rwsem_is_locked(&osdc
->lock
));
69 static inline void verify_osdc_wrlocked(struct ceph_osd_client
*osdc
)
71 WARN_ON(!rwsem_is_wrlocked(&osdc
->lock
));
73 static inline void verify_osd_locked(struct ceph_osd
*osd
)
75 struct ceph_osd_client
*osdc
= osd
->o_osdc
;
77 WARN_ON(!(mutex_is_locked(&osd
->lock
) &&
78 rwsem_is_locked(&osdc
->lock
)) &&
79 !rwsem_is_wrlocked(&osdc
->lock
));
81 static inline void verify_lreq_locked(struct ceph_osd_linger_request
*lreq
)
83 WARN_ON(!mutex_is_locked(&lreq
->lock
));
86 static inline void verify_osdc_locked(struct ceph_osd_client
*osdc
) { }
87 static inline void verify_osdc_wrlocked(struct ceph_osd_client
*osdc
) { }
88 static inline void verify_osd_locked(struct ceph_osd
*osd
) { }
89 static inline void verify_lreq_locked(struct ceph_osd_linger_request
*lreq
) { }
93 * calculate the mapping of a file extent onto an object, and fill out the
94 * request accordingly. shorten extent as necessary if it crosses an
97 * fill osd op in request message.
99 static int calc_layout(struct ceph_file_layout
*layout
, u64 off
, u64
*plen
,
100 u64
*objnum
, u64
*objoff
, u64
*objlen
)
102 u64 orig_len
= *plen
;
106 r
= ceph_calc_file_object_mapping(layout
, off
, orig_len
, objnum
,
110 if (*objlen
< orig_len
) {
112 dout(" skipping last %llu, final file extent %llu~%llu\n",
113 orig_len
- *plen
, off
, *plen
);
116 dout("calc_layout objnum=%llx %llu~%llu\n", *objnum
, *objoff
, *objlen
);
121 static void ceph_osd_data_init(struct ceph_osd_data
*osd_data
)
123 memset(osd_data
, 0, sizeof (*osd_data
));
124 osd_data
->type
= CEPH_OSD_DATA_TYPE_NONE
;
127 static void ceph_osd_data_pages_init(struct ceph_osd_data
*osd_data
,
128 struct page
**pages
, u64 length
, u32 alignment
,
129 bool pages_from_pool
, bool own_pages
)
131 osd_data
->type
= CEPH_OSD_DATA_TYPE_PAGES
;
132 osd_data
->pages
= pages
;
133 osd_data
->length
= length
;
134 osd_data
->alignment
= alignment
;
135 osd_data
->pages_from_pool
= pages_from_pool
;
136 osd_data
->own_pages
= own_pages
;
139 static void ceph_osd_data_pagelist_init(struct ceph_osd_data
*osd_data
,
140 struct ceph_pagelist
*pagelist
)
142 osd_data
->type
= CEPH_OSD_DATA_TYPE_PAGELIST
;
143 osd_data
->pagelist
= pagelist
;
147 static void ceph_osd_data_bio_init(struct ceph_osd_data
*osd_data
,
148 struct bio
*bio
, size_t bio_length
)
150 osd_data
->type
= CEPH_OSD_DATA_TYPE_BIO
;
152 osd_data
->bio_length
= bio_length
;
154 #endif /* CONFIG_BLOCK */
156 #define osd_req_op_data(oreq, whch, typ, fld) \
158 struct ceph_osd_request *__oreq = (oreq); \
159 unsigned int __whch = (whch); \
160 BUG_ON(__whch >= __oreq->r_num_ops); \
161 &__oreq->r_ops[__whch].typ.fld; \
164 static struct ceph_osd_data
*
165 osd_req_op_raw_data_in(struct ceph_osd_request
*osd_req
, unsigned int which
)
167 BUG_ON(which
>= osd_req
->r_num_ops
);
169 return &osd_req
->r_ops
[which
].raw_data_in
;
172 struct ceph_osd_data
*
173 osd_req_op_extent_osd_data(struct ceph_osd_request
*osd_req
,
176 return osd_req_op_data(osd_req
, which
, extent
, osd_data
);
178 EXPORT_SYMBOL(osd_req_op_extent_osd_data
);
180 void osd_req_op_raw_data_in_pages(struct ceph_osd_request
*osd_req
,
181 unsigned int which
, struct page
**pages
,
182 u64 length
, u32 alignment
,
183 bool pages_from_pool
, bool own_pages
)
185 struct ceph_osd_data
*osd_data
;
187 osd_data
= osd_req_op_raw_data_in(osd_req
, which
);
188 ceph_osd_data_pages_init(osd_data
, pages
, length
, alignment
,
189 pages_from_pool
, own_pages
);
191 EXPORT_SYMBOL(osd_req_op_raw_data_in_pages
);
193 void osd_req_op_extent_osd_data_pages(struct ceph_osd_request
*osd_req
,
194 unsigned int which
, struct page
**pages
,
195 u64 length
, u32 alignment
,
196 bool pages_from_pool
, bool own_pages
)
198 struct ceph_osd_data
*osd_data
;
200 osd_data
= osd_req_op_data(osd_req
, which
, extent
, osd_data
);
201 ceph_osd_data_pages_init(osd_data
, pages
, length
, alignment
,
202 pages_from_pool
, own_pages
);
204 EXPORT_SYMBOL(osd_req_op_extent_osd_data_pages
);
206 void osd_req_op_extent_osd_data_pagelist(struct ceph_osd_request
*osd_req
,
207 unsigned int which
, struct ceph_pagelist
*pagelist
)
209 struct ceph_osd_data
*osd_data
;
211 osd_data
= osd_req_op_data(osd_req
, which
, extent
, osd_data
);
212 ceph_osd_data_pagelist_init(osd_data
, pagelist
);
214 EXPORT_SYMBOL(osd_req_op_extent_osd_data_pagelist
);
217 void osd_req_op_extent_osd_data_bio(struct ceph_osd_request
*osd_req
,
218 unsigned int which
, struct bio
*bio
, size_t bio_length
)
220 struct ceph_osd_data
*osd_data
;
222 osd_data
= osd_req_op_data(osd_req
, which
, extent
, osd_data
);
223 ceph_osd_data_bio_init(osd_data
, bio
, bio_length
);
225 EXPORT_SYMBOL(osd_req_op_extent_osd_data_bio
);
226 #endif /* CONFIG_BLOCK */
228 static void osd_req_op_cls_request_info_pagelist(
229 struct ceph_osd_request
*osd_req
,
230 unsigned int which
, struct ceph_pagelist
*pagelist
)
232 struct ceph_osd_data
*osd_data
;
234 osd_data
= osd_req_op_data(osd_req
, which
, cls
, request_info
);
235 ceph_osd_data_pagelist_init(osd_data
, pagelist
);
238 void osd_req_op_cls_request_data_pagelist(
239 struct ceph_osd_request
*osd_req
,
240 unsigned int which
, struct ceph_pagelist
*pagelist
)
242 struct ceph_osd_data
*osd_data
;
244 osd_data
= osd_req_op_data(osd_req
, which
, cls
, request_data
);
245 ceph_osd_data_pagelist_init(osd_data
, pagelist
);
246 osd_req
->r_ops
[which
].cls
.indata_len
+= pagelist
->length
;
247 osd_req
->r_ops
[which
].indata_len
+= pagelist
->length
;
249 EXPORT_SYMBOL(osd_req_op_cls_request_data_pagelist
);
251 void osd_req_op_cls_request_data_pages(struct ceph_osd_request
*osd_req
,
252 unsigned int which
, struct page
**pages
, u64 length
,
253 u32 alignment
, bool pages_from_pool
, bool own_pages
)
255 struct ceph_osd_data
*osd_data
;
257 osd_data
= osd_req_op_data(osd_req
, which
, cls
, request_data
);
258 ceph_osd_data_pages_init(osd_data
, pages
, length
, alignment
,
259 pages_from_pool
, own_pages
);
260 osd_req
->r_ops
[which
].cls
.indata_len
+= length
;
261 osd_req
->r_ops
[which
].indata_len
+= length
;
263 EXPORT_SYMBOL(osd_req_op_cls_request_data_pages
);
265 void osd_req_op_cls_response_data_pages(struct ceph_osd_request
*osd_req
,
266 unsigned int which
, struct page
**pages
, u64 length
,
267 u32 alignment
, bool pages_from_pool
, bool own_pages
)
269 struct ceph_osd_data
*osd_data
;
271 osd_data
= osd_req_op_data(osd_req
, which
, cls
, response_data
);
272 ceph_osd_data_pages_init(osd_data
, pages
, length
, alignment
,
273 pages_from_pool
, own_pages
);
275 EXPORT_SYMBOL(osd_req_op_cls_response_data_pages
);
277 static u64
ceph_osd_data_length(struct ceph_osd_data
*osd_data
)
279 switch (osd_data
->type
) {
280 case CEPH_OSD_DATA_TYPE_NONE
:
282 case CEPH_OSD_DATA_TYPE_PAGES
:
283 return osd_data
->length
;
284 case CEPH_OSD_DATA_TYPE_PAGELIST
:
285 return (u64
)osd_data
->pagelist
->length
;
287 case CEPH_OSD_DATA_TYPE_BIO
:
288 return (u64
)osd_data
->bio_length
;
289 #endif /* CONFIG_BLOCK */
291 WARN(true, "unrecognized data type %d\n", (int)osd_data
->type
);
296 static void ceph_osd_data_release(struct ceph_osd_data
*osd_data
)
298 if (osd_data
->type
== CEPH_OSD_DATA_TYPE_PAGES
&& osd_data
->own_pages
) {
301 num_pages
= calc_pages_for((u64
)osd_data
->alignment
,
302 (u64
)osd_data
->length
);
303 ceph_release_page_vector(osd_data
->pages
, num_pages
);
305 ceph_osd_data_init(osd_data
);
308 static void osd_req_op_data_release(struct ceph_osd_request
*osd_req
,
311 struct ceph_osd_req_op
*op
;
313 BUG_ON(which
>= osd_req
->r_num_ops
);
314 op
= &osd_req
->r_ops
[which
];
317 case CEPH_OSD_OP_READ
:
318 case CEPH_OSD_OP_WRITE
:
319 case CEPH_OSD_OP_WRITEFULL
:
320 ceph_osd_data_release(&op
->extent
.osd_data
);
322 case CEPH_OSD_OP_CALL
:
323 ceph_osd_data_release(&op
->cls
.request_info
);
324 ceph_osd_data_release(&op
->cls
.request_data
);
325 ceph_osd_data_release(&op
->cls
.response_data
);
327 case CEPH_OSD_OP_SETXATTR
:
328 case CEPH_OSD_OP_CMPXATTR
:
329 ceph_osd_data_release(&op
->xattr
.osd_data
);
331 case CEPH_OSD_OP_STAT
:
332 ceph_osd_data_release(&op
->raw_data_in
);
334 case CEPH_OSD_OP_NOTIFY_ACK
:
335 ceph_osd_data_release(&op
->notify_ack
.request_data
);
337 case CEPH_OSD_OP_NOTIFY
:
338 ceph_osd_data_release(&op
->notify
.request_data
);
339 ceph_osd_data_release(&op
->notify
.response_data
);
347 * Assumes @t is zero-initialized.
349 static void target_init(struct ceph_osd_request_target
*t
)
351 ceph_oid_init(&t
->base_oid
);
352 ceph_oloc_init(&t
->base_oloc
);
353 ceph_oid_init(&t
->target_oid
);
354 ceph_oloc_init(&t
->target_oloc
);
356 ceph_osds_init(&t
->acting
);
357 ceph_osds_init(&t
->up
);
361 t
->osd
= CEPH_HOMELESS_OSD
;
364 static void target_copy(struct ceph_osd_request_target
*dest
,
365 const struct ceph_osd_request_target
*src
)
367 ceph_oid_copy(&dest
->base_oid
, &src
->base_oid
);
368 ceph_oloc_copy(&dest
->base_oloc
, &src
->base_oloc
);
369 ceph_oid_copy(&dest
->target_oid
, &src
->target_oid
);
370 ceph_oloc_copy(&dest
->target_oloc
, &src
->target_oloc
);
372 dest
->pgid
= src
->pgid
; /* struct */
373 dest
->pg_num
= src
->pg_num
;
374 dest
->pg_num_mask
= src
->pg_num_mask
;
375 ceph_osds_copy(&dest
->acting
, &src
->acting
);
376 ceph_osds_copy(&dest
->up
, &src
->up
);
377 dest
->size
= src
->size
;
378 dest
->min_size
= src
->min_size
;
379 dest
->sort_bitwise
= src
->sort_bitwise
;
381 dest
->flags
= src
->flags
;
382 dest
->paused
= src
->paused
;
384 dest
->osd
= src
->osd
;
387 static void target_destroy(struct ceph_osd_request_target
*t
)
389 ceph_oid_destroy(&t
->base_oid
);
390 ceph_oloc_destroy(&t
->base_oloc
);
391 ceph_oid_destroy(&t
->target_oid
);
392 ceph_oloc_destroy(&t
->target_oloc
);
398 static void request_release_checks(struct ceph_osd_request
*req
)
400 WARN_ON(!RB_EMPTY_NODE(&req
->r_node
));
401 WARN_ON(!RB_EMPTY_NODE(&req
->r_mc_node
));
402 WARN_ON(!list_empty(&req
->r_unsafe_item
));
406 static void ceph_osdc_release_request(struct kref
*kref
)
408 struct ceph_osd_request
*req
= container_of(kref
,
409 struct ceph_osd_request
, r_kref
);
412 dout("%s %p (r_request %p r_reply %p)\n", __func__
, req
,
413 req
->r_request
, req
->r_reply
);
414 request_release_checks(req
);
417 ceph_msg_put(req
->r_request
);
419 ceph_msg_put(req
->r_reply
);
421 for (which
= 0; which
< req
->r_num_ops
; which
++)
422 osd_req_op_data_release(req
, which
);
424 target_destroy(&req
->r_t
);
425 ceph_put_snap_context(req
->r_snapc
);
428 mempool_free(req
, req
->r_osdc
->req_mempool
);
429 else if (req
->r_num_ops
<= CEPH_OSD_SLAB_OPS
)
430 kmem_cache_free(ceph_osd_request_cache
, req
);
435 void ceph_osdc_get_request(struct ceph_osd_request
*req
)
437 dout("%s %p (was %d)\n", __func__
, req
,
438 atomic_read(&req
->r_kref
.refcount
));
439 kref_get(&req
->r_kref
);
441 EXPORT_SYMBOL(ceph_osdc_get_request
);
443 void ceph_osdc_put_request(struct ceph_osd_request
*req
)
446 dout("%s %p (was %d)\n", __func__
, req
,
447 atomic_read(&req
->r_kref
.refcount
));
448 kref_put(&req
->r_kref
, ceph_osdc_release_request
);
451 EXPORT_SYMBOL(ceph_osdc_put_request
);
453 static void request_init(struct ceph_osd_request
*req
)
455 /* req only, each op is zeroed in _osd_req_op_init() */
456 memset(req
, 0, sizeof(*req
));
458 kref_init(&req
->r_kref
);
459 init_completion(&req
->r_completion
);
460 init_completion(&req
->r_safe_completion
);
461 RB_CLEAR_NODE(&req
->r_node
);
462 RB_CLEAR_NODE(&req
->r_mc_node
);
463 INIT_LIST_HEAD(&req
->r_unsafe_item
);
465 target_init(&req
->r_t
);
469 * This is ugly, but it allows us to reuse linger registration and ping
470 * requests, keeping the structure of the code around send_linger{_ping}()
471 * reasonable. Setting up a min_nr=2 mempool for each linger request
472 * and dealing with copying ops (this blasts req only, watch op remains
473 * intact) isn't any better.
475 static void request_reinit(struct ceph_osd_request
*req
)
477 struct ceph_osd_client
*osdc
= req
->r_osdc
;
478 bool mempool
= req
->r_mempool
;
479 unsigned int num_ops
= req
->r_num_ops
;
480 u64 snapid
= req
->r_snapid
;
481 struct ceph_snap_context
*snapc
= req
->r_snapc
;
482 bool linger
= req
->r_linger
;
483 struct ceph_msg
*request_msg
= req
->r_request
;
484 struct ceph_msg
*reply_msg
= req
->r_reply
;
486 dout("%s req %p\n", __func__
, req
);
487 WARN_ON(atomic_read(&req
->r_kref
.refcount
) != 1);
488 request_release_checks(req
);
490 WARN_ON(atomic_read(&request_msg
->kref
.refcount
) != 1);
491 WARN_ON(atomic_read(&reply_msg
->kref
.refcount
) != 1);
492 target_destroy(&req
->r_t
);
496 req
->r_mempool
= mempool
;
497 req
->r_num_ops
= num_ops
;
498 req
->r_snapid
= snapid
;
499 req
->r_snapc
= snapc
;
500 req
->r_linger
= linger
;
501 req
->r_request
= request_msg
;
502 req
->r_reply
= reply_msg
;
505 struct ceph_osd_request
*ceph_osdc_alloc_request(struct ceph_osd_client
*osdc
,
506 struct ceph_snap_context
*snapc
,
507 unsigned int num_ops
,
511 struct ceph_osd_request
*req
;
514 BUG_ON(num_ops
> CEPH_OSD_SLAB_OPS
);
515 req
= mempool_alloc(osdc
->req_mempool
, gfp_flags
);
516 } else if (num_ops
<= CEPH_OSD_SLAB_OPS
) {
517 req
= kmem_cache_alloc(ceph_osd_request_cache
, gfp_flags
);
519 BUG_ON(num_ops
> CEPH_OSD_MAX_OPS
);
520 req
= kmalloc(sizeof(*req
) + num_ops
* sizeof(req
->r_ops
[0]),
528 req
->r_mempool
= use_mempool
;
529 req
->r_num_ops
= num_ops
;
530 req
->r_snapid
= CEPH_NOSNAP
;
531 req
->r_snapc
= ceph_get_snap_context(snapc
);
533 dout("%s req %p\n", __func__
, req
);
536 EXPORT_SYMBOL(ceph_osdc_alloc_request
);
538 static int ceph_oloc_encoding_size(struct ceph_object_locator
*oloc
)
540 return 8 + 4 + 4 + 4 + (oloc
->pool_ns
? oloc
->pool_ns
->len
: 0);
543 int ceph_osdc_alloc_messages(struct ceph_osd_request
*req
, gfp_t gfp
)
545 struct ceph_osd_client
*osdc
= req
->r_osdc
;
546 struct ceph_msg
*msg
;
549 WARN_ON(ceph_oid_empty(&req
->r_base_oid
));
550 WARN_ON(ceph_oloc_empty(&req
->r_base_oloc
));
552 /* create request message */
553 msg_size
= 4 + 4 + 4; /* client_inc, osdmap_epoch, flags */
554 msg_size
+= 4 + 4 + 4 + 8; /* mtime, reassert_version */
555 msg_size
+= CEPH_ENCODING_START_BLK_LEN
+
556 ceph_oloc_encoding_size(&req
->r_base_oloc
); /* oloc */
557 msg_size
+= 1 + 8 + 4 + 4; /* pgid */
558 msg_size
+= 4 + req
->r_base_oid
.name_len
; /* oid */
559 msg_size
+= 2 + req
->r_num_ops
* sizeof(struct ceph_osd_op
);
560 msg_size
+= 8; /* snapid */
561 msg_size
+= 8; /* snap_seq */
562 msg_size
+= 4 + 8 * (req
->r_snapc
? req
->r_snapc
->num_snaps
: 0);
563 msg_size
+= 4; /* retry_attempt */
566 msg
= ceph_msgpool_get(&osdc
->msgpool_op
, 0);
568 msg
= ceph_msg_new(CEPH_MSG_OSD_OP
, msg_size
, gfp
, true);
572 memset(msg
->front
.iov_base
, 0, msg
->front
.iov_len
);
573 req
->r_request
= msg
;
575 /* create reply message */
576 msg_size
= OSD_OPREPLY_FRONT_LEN
;
577 msg_size
+= req
->r_base_oid
.name_len
;
578 msg_size
+= req
->r_num_ops
* sizeof(struct ceph_osd_op
);
581 msg
= ceph_msgpool_get(&osdc
->msgpool_op_reply
, 0);
583 msg
= ceph_msg_new(CEPH_MSG_OSD_OPREPLY
, msg_size
, gfp
, true);
591 EXPORT_SYMBOL(ceph_osdc_alloc_messages
);
593 static bool osd_req_opcode_valid(u16 opcode
)
596 #define GENERATE_CASE(op, opcode, str) case CEPH_OSD_OP_##op: return true;
597 __CEPH_FORALL_OSD_OPS(GENERATE_CASE
)
605 * This is an osd op init function for opcodes that have no data or
606 * other information associated with them. It also serves as a
607 * common init routine for all the other init functions, below.
609 static struct ceph_osd_req_op
*
610 _osd_req_op_init(struct ceph_osd_request
*osd_req
, unsigned int which
,
611 u16 opcode
, u32 flags
)
613 struct ceph_osd_req_op
*op
;
615 BUG_ON(which
>= osd_req
->r_num_ops
);
616 BUG_ON(!osd_req_opcode_valid(opcode
));
618 op
= &osd_req
->r_ops
[which
];
619 memset(op
, 0, sizeof (*op
));
626 void osd_req_op_init(struct ceph_osd_request
*osd_req
,
627 unsigned int which
, u16 opcode
, u32 flags
)
629 (void)_osd_req_op_init(osd_req
, which
, opcode
, flags
);
631 EXPORT_SYMBOL(osd_req_op_init
);
633 void osd_req_op_extent_init(struct ceph_osd_request
*osd_req
,
634 unsigned int which
, u16 opcode
,
635 u64 offset
, u64 length
,
636 u64 truncate_size
, u32 truncate_seq
)
638 struct ceph_osd_req_op
*op
= _osd_req_op_init(osd_req
, which
,
640 size_t payload_len
= 0;
642 BUG_ON(opcode
!= CEPH_OSD_OP_READ
&& opcode
!= CEPH_OSD_OP_WRITE
&&
643 opcode
!= CEPH_OSD_OP_WRITEFULL
&& opcode
!= CEPH_OSD_OP_ZERO
&&
644 opcode
!= CEPH_OSD_OP_TRUNCATE
);
646 op
->extent
.offset
= offset
;
647 op
->extent
.length
= length
;
648 op
->extent
.truncate_size
= truncate_size
;
649 op
->extent
.truncate_seq
= truncate_seq
;
650 if (opcode
== CEPH_OSD_OP_WRITE
|| opcode
== CEPH_OSD_OP_WRITEFULL
)
651 payload_len
+= length
;
653 op
->indata_len
= payload_len
;
655 EXPORT_SYMBOL(osd_req_op_extent_init
);
657 void osd_req_op_extent_update(struct ceph_osd_request
*osd_req
,
658 unsigned int which
, u64 length
)
660 struct ceph_osd_req_op
*op
;
663 BUG_ON(which
>= osd_req
->r_num_ops
);
664 op
= &osd_req
->r_ops
[which
];
665 previous
= op
->extent
.length
;
667 if (length
== previous
)
668 return; /* Nothing to do */
669 BUG_ON(length
> previous
);
671 op
->extent
.length
= length
;
672 op
->indata_len
-= previous
- length
;
674 EXPORT_SYMBOL(osd_req_op_extent_update
);
676 void osd_req_op_extent_dup_last(struct ceph_osd_request
*osd_req
,
677 unsigned int which
, u64 offset_inc
)
679 struct ceph_osd_req_op
*op
, *prev_op
;
681 BUG_ON(which
+ 1 >= osd_req
->r_num_ops
);
683 prev_op
= &osd_req
->r_ops
[which
];
684 op
= _osd_req_op_init(osd_req
, which
+ 1, prev_op
->op
, prev_op
->flags
);
685 /* dup previous one */
686 op
->indata_len
= prev_op
->indata_len
;
687 op
->outdata_len
= prev_op
->outdata_len
;
688 op
->extent
= prev_op
->extent
;
690 op
->extent
.offset
+= offset_inc
;
691 op
->extent
.length
-= offset_inc
;
693 if (op
->op
== CEPH_OSD_OP_WRITE
|| op
->op
== CEPH_OSD_OP_WRITEFULL
)
694 op
->indata_len
-= offset_inc
;
696 EXPORT_SYMBOL(osd_req_op_extent_dup_last
);
698 void osd_req_op_cls_init(struct ceph_osd_request
*osd_req
, unsigned int which
,
699 u16 opcode
, const char *class, const char *method
)
701 struct ceph_osd_req_op
*op
= _osd_req_op_init(osd_req
, which
,
703 struct ceph_pagelist
*pagelist
;
704 size_t payload_len
= 0;
707 BUG_ON(opcode
!= CEPH_OSD_OP_CALL
);
709 pagelist
= kmalloc(sizeof (*pagelist
), GFP_NOFS
);
711 ceph_pagelist_init(pagelist
);
713 op
->cls
.class_name
= class;
714 size
= strlen(class);
715 BUG_ON(size
> (size_t) U8_MAX
);
716 op
->cls
.class_len
= size
;
717 ceph_pagelist_append(pagelist
, class, size
);
720 op
->cls
.method_name
= method
;
721 size
= strlen(method
);
722 BUG_ON(size
> (size_t) U8_MAX
);
723 op
->cls
.method_len
= size
;
724 ceph_pagelist_append(pagelist
, method
, size
);
727 osd_req_op_cls_request_info_pagelist(osd_req
, which
, pagelist
);
729 op
->indata_len
= payload_len
;
731 EXPORT_SYMBOL(osd_req_op_cls_init
);
733 int osd_req_op_xattr_init(struct ceph_osd_request
*osd_req
, unsigned int which
,
734 u16 opcode
, const char *name
, const void *value
,
735 size_t size
, u8 cmp_op
, u8 cmp_mode
)
737 struct ceph_osd_req_op
*op
= _osd_req_op_init(osd_req
, which
,
739 struct ceph_pagelist
*pagelist
;
742 BUG_ON(opcode
!= CEPH_OSD_OP_SETXATTR
&& opcode
!= CEPH_OSD_OP_CMPXATTR
);
744 pagelist
= kmalloc(sizeof(*pagelist
), GFP_NOFS
);
748 ceph_pagelist_init(pagelist
);
750 payload_len
= strlen(name
);
751 op
->xattr
.name_len
= payload_len
;
752 ceph_pagelist_append(pagelist
, name
, payload_len
);
754 op
->xattr
.value_len
= size
;
755 ceph_pagelist_append(pagelist
, value
, size
);
758 op
->xattr
.cmp_op
= cmp_op
;
759 op
->xattr
.cmp_mode
= cmp_mode
;
761 ceph_osd_data_pagelist_init(&op
->xattr
.osd_data
, pagelist
);
762 op
->indata_len
= payload_len
;
765 EXPORT_SYMBOL(osd_req_op_xattr_init
);
768 * @watch_opcode: CEPH_OSD_WATCH_OP_*
770 static void osd_req_op_watch_init(struct ceph_osd_request
*req
, int which
,
771 u64 cookie
, u8 watch_opcode
)
773 struct ceph_osd_req_op
*op
;
775 op
= _osd_req_op_init(req
, which
, CEPH_OSD_OP_WATCH
, 0);
776 op
->watch
.cookie
= cookie
;
777 op
->watch
.op
= watch_opcode
;
781 void osd_req_op_alloc_hint_init(struct ceph_osd_request
*osd_req
,
783 u64 expected_object_size
,
784 u64 expected_write_size
)
786 struct ceph_osd_req_op
*op
= _osd_req_op_init(osd_req
, which
,
787 CEPH_OSD_OP_SETALLOCHINT
,
790 op
->alloc_hint
.expected_object_size
= expected_object_size
;
791 op
->alloc_hint
.expected_write_size
= expected_write_size
;
794 * CEPH_OSD_OP_SETALLOCHINT op is advisory and therefore deemed
795 * not worth a feature bit. Set FAILOK per-op flag to make
796 * sure older osds don't trip over an unsupported opcode.
798 op
->flags
|= CEPH_OSD_OP_FLAG_FAILOK
;
800 EXPORT_SYMBOL(osd_req_op_alloc_hint_init
);
802 static void ceph_osdc_msg_data_add(struct ceph_msg
*msg
,
803 struct ceph_osd_data
*osd_data
)
805 u64 length
= ceph_osd_data_length(osd_data
);
807 if (osd_data
->type
== CEPH_OSD_DATA_TYPE_PAGES
) {
808 BUG_ON(length
> (u64
) SIZE_MAX
);
810 ceph_msg_data_add_pages(msg
, osd_data
->pages
,
811 length
, osd_data
->alignment
);
812 } else if (osd_data
->type
== CEPH_OSD_DATA_TYPE_PAGELIST
) {
814 ceph_msg_data_add_pagelist(msg
, osd_data
->pagelist
);
816 } else if (osd_data
->type
== CEPH_OSD_DATA_TYPE_BIO
) {
817 ceph_msg_data_add_bio(msg
, osd_data
->bio
, length
);
820 BUG_ON(osd_data
->type
!= CEPH_OSD_DATA_TYPE_NONE
);
824 static u32
osd_req_encode_op(struct ceph_osd_op
*dst
,
825 const struct ceph_osd_req_op
*src
)
827 if (WARN_ON(!osd_req_opcode_valid(src
->op
))) {
828 pr_err("unrecognized osd opcode %d\n", src
->op
);
834 case CEPH_OSD_OP_STAT
:
836 case CEPH_OSD_OP_READ
:
837 case CEPH_OSD_OP_WRITE
:
838 case CEPH_OSD_OP_WRITEFULL
:
839 case CEPH_OSD_OP_ZERO
:
840 case CEPH_OSD_OP_TRUNCATE
:
841 dst
->extent
.offset
= cpu_to_le64(src
->extent
.offset
);
842 dst
->extent
.length
= cpu_to_le64(src
->extent
.length
);
843 dst
->extent
.truncate_size
=
844 cpu_to_le64(src
->extent
.truncate_size
);
845 dst
->extent
.truncate_seq
=
846 cpu_to_le32(src
->extent
.truncate_seq
);
848 case CEPH_OSD_OP_CALL
:
849 dst
->cls
.class_len
= src
->cls
.class_len
;
850 dst
->cls
.method_len
= src
->cls
.method_len
;
851 dst
->cls
.indata_len
= cpu_to_le32(src
->cls
.indata_len
);
853 case CEPH_OSD_OP_STARTSYNC
:
855 case CEPH_OSD_OP_WATCH
:
856 dst
->watch
.cookie
= cpu_to_le64(src
->watch
.cookie
);
857 dst
->watch
.ver
= cpu_to_le64(0);
858 dst
->watch
.op
= src
->watch
.op
;
859 dst
->watch
.gen
= cpu_to_le32(src
->watch
.gen
);
861 case CEPH_OSD_OP_NOTIFY_ACK
:
863 case CEPH_OSD_OP_NOTIFY
:
864 dst
->notify
.cookie
= cpu_to_le64(src
->notify
.cookie
);
866 case CEPH_OSD_OP_SETALLOCHINT
:
867 dst
->alloc_hint
.expected_object_size
=
868 cpu_to_le64(src
->alloc_hint
.expected_object_size
);
869 dst
->alloc_hint
.expected_write_size
=
870 cpu_to_le64(src
->alloc_hint
.expected_write_size
);
872 case CEPH_OSD_OP_SETXATTR
:
873 case CEPH_OSD_OP_CMPXATTR
:
874 dst
->xattr
.name_len
= cpu_to_le32(src
->xattr
.name_len
);
875 dst
->xattr
.value_len
= cpu_to_le32(src
->xattr
.value_len
);
876 dst
->xattr
.cmp_op
= src
->xattr
.cmp_op
;
877 dst
->xattr
.cmp_mode
= src
->xattr
.cmp_mode
;
879 case CEPH_OSD_OP_CREATE
:
880 case CEPH_OSD_OP_DELETE
:
883 pr_err("unsupported osd opcode %s\n",
884 ceph_osd_op_name(src
->op
));
890 dst
->op
= cpu_to_le16(src
->op
);
891 dst
->flags
= cpu_to_le32(src
->flags
);
892 dst
->payload_len
= cpu_to_le32(src
->indata_len
);
894 return src
->indata_len
;
898 * build new request AND message, calculate layout, and adjust file
901 * if the file was recently truncated, we include information about its
902 * old and new size so that the object can be updated appropriately. (we
903 * avoid synchronously deleting truncated objects because it's slow.)
905 * if @do_sync, include a 'startsync' command so that the osd will flush
908 struct ceph_osd_request
*ceph_osdc_new_request(struct ceph_osd_client
*osdc
,
909 struct ceph_file_layout
*layout
,
910 struct ceph_vino vino
,
912 unsigned int which
, int num_ops
,
913 int opcode
, int flags
,
914 struct ceph_snap_context
*snapc
,
919 struct ceph_osd_request
*req
;
925 BUG_ON(opcode
!= CEPH_OSD_OP_READ
&& opcode
!= CEPH_OSD_OP_WRITE
&&
926 opcode
!= CEPH_OSD_OP_ZERO
&& opcode
!= CEPH_OSD_OP_TRUNCATE
&&
927 opcode
!= CEPH_OSD_OP_CREATE
&& opcode
!= CEPH_OSD_OP_DELETE
);
929 req
= ceph_osdc_alloc_request(osdc
, snapc
, num_ops
, use_mempool
,
936 /* calculate max write size */
937 r
= calc_layout(layout
, off
, plen
, &objnum
, &objoff
, &objlen
);
941 if (opcode
== CEPH_OSD_OP_CREATE
|| opcode
== CEPH_OSD_OP_DELETE
) {
942 osd_req_op_init(req
, which
, opcode
, 0);
944 u32 object_size
= layout
->object_size
;
945 u32 object_base
= off
- objoff
;
946 if (!(truncate_seq
== 1 && truncate_size
== -1ULL)) {
947 if (truncate_size
<= object_base
) {
950 truncate_size
-= object_base
;
951 if (truncate_size
> object_size
)
952 truncate_size
= object_size
;
955 osd_req_op_extent_init(req
, which
, opcode
, objoff
, objlen
,
956 truncate_size
, truncate_seq
);
959 req
->r_flags
= flags
;
960 req
->r_base_oloc
.pool
= layout
->pool_id
;
961 req
->r_base_oloc
.pool_ns
= ceph_try_get_string(layout
->pool_ns
);
962 ceph_oid_printf(&req
->r_base_oid
, "%llx.%08llx", vino
.ino
, objnum
);
964 req
->r_snapid
= vino
.snap
;
965 if (flags
& CEPH_OSD_FLAG_WRITE
)
966 req
->r_data_offset
= off
;
968 r
= ceph_osdc_alloc_messages(req
, GFP_NOFS
);
975 ceph_osdc_put_request(req
);
978 EXPORT_SYMBOL(ceph_osdc_new_request
);
981 * We keep osd requests in an rbtree, sorted by ->r_tid.
983 DEFINE_RB_FUNCS(request
, struct ceph_osd_request
, r_tid
, r_node
)
984 DEFINE_RB_FUNCS(request_mc
, struct ceph_osd_request
, r_tid
, r_mc_node
)
986 static bool osd_homeless(struct ceph_osd
*osd
)
988 return osd
->o_osd
== CEPH_HOMELESS_OSD
;
991 static bool osd_registered(struct ceph_osd
*osd
)
993 verify_osdc_locked(osd
->o_osdc
);
995 return !RB_EMPTY_NODE(&osd
->o_node
);
999 * Assumes @osd is zero-initialized.
1001 static void osd_init(struct ceph_osd
*osd
)
1003 atomic_set(&osd
->o_ref
, 1);
1004 RB_CLEAR_NODE(&osd
->o_node
);
1005 osd
->o_requests
= RB_ROOT
;
1006 osd
->o_linger_requests
= RB_ROOT
;
1007 INIT_LIST_HEAD(&osd
->o_osd_lru
);
1008 INIT_LIST_HEAD(&osd
->o_keepalive_item
);
1009 osd
->o_incarnation
= 1;
1010 mutex_init(&osd
->lock
);
1013 static void osd_cleanup(struct ceph_osd
*osd
)
1015 WARN_ON(!RB_EMPTY_NODE(&osd
->o_node
));
1016 WARN_ON(!RB_EMPTY_ROOT(&osd
->o_requests
));
1017 WARN_ON(!RB_EMPTY_ROOT(&osd
->o_linger_requests
));
1018 WARN_ON(!list_empty(&osd
->o_osd_lru
));
1019 WARN_ON(!list_empty(&osd
->o_keepalive_item
));
1021 if (osd
->o_auth
.authorizer
) {
1022 WARN_ON(osd_homeless(osd
));
1023 ceph_auth_destroy_authorizer(osd
->o_auth
.authorizer
);
1028 * Track open sessions with osds.
1030 static struct ceph_osd
*create_osd(struct ceph_osd_client
*osdc
, int onum
)
1032 struct ceph_osd
*osd
;
1034 WARN_ON(onum
== CEPH_HOMELESS_OSD
);
1036 osd
= kzalloc(sizeof(*osd
), GFP_NOIO
| __GFP_NOFAIL
);
1041 ceph_con_init(&osd
->o_con
, osd
, &osd_con_ops
, &osdc
->client
->msgr
);
1046 static struct ceph_osd
*get_osd(struct ceph_osd
*osd
)
1048 if (atomic_inc_not_zero(&osd
->o_ref
)) {
1049 dout("get_osd %p %d -> %d\n", osd
, atomic_read(&osd
->o_ref
)-1,
1050 atomic_read(&osd
->o_ref
));
1053 dout("get_osd %p FAIL\n", osd
);
1058 static void put_osd(struct ceph_osd
*osd
)
1060 dout("put_osd %p %d -> %d\n", osd
, atomic_read(&osd
->o_ref
),
1061 atomic_read(&osd
->o_ref
) - 1);
1062 if (atomic_dec_and_test(&osd
->o_ref
)) {
1068 DEFINE_RB_FUNCS(osd
, struct ceph_osd
, o_osd
, o_node
)
1070 static void __move_osd_to_lru(struct ceph_osd
*osd
)
1072 struct ceph_osd_client
*osdc
= osd
->o_osdc
;
1074 dout("%s osd %p osd%d\n", __func__
, osd
, osd
->o_osd
);
1075 BUG_ON(!list_empty(&osd
->o_osd_lru
));
1077 spin_lock(&osdc
->osd_lru_lock
);
1078 list_add_tail(&osd
->o_osd_lru
, &osdc
->osd_lru
);
1079 spin_unlock(&osdc
->osd_lru_lock
);
1081 osd
->lru_ttl
= jiffies
+ osdc
->client
->options
->osd_idle_ttl
;
1084 static void maybe_move_osd_to_lru(struct ceph_osd
*osd
)
1086 if (RB_EMPTY_ROOT(&osd
->o_requests
) &&
1087 RB_EMPTY_ROOT(&osd
->o_linger_requests
))
1088 __move_osd_to_lru(osd
);
1091 static void __remove_osd_from_lru(struct ceph_osd
*osd
)
1093 struct ceph_osd_client
*osdc
= osd
->o_osdc
;
1095 dout("%s osd %p osd%d\n", __func__
, osd
, osd
->o_osd
);
1097 spin_lock(&osdc
->osd_lru_lock
);
1098 if (!list_empty(&osd
->o_osd_lru
))
1099 list_del_init(&osd
->o_osd_lru
);
1100 spin_unlock(&osdc
->osd_lru_lock
);
1104 * Close the connection and assign any leftover requests to the
1107 static void close_osd(struct ceph_osd
*osd
)
1109 struct ceph_osd_client
*osdc
= osd
->o_osdc
;
1112 verify_osdc_wrlocked(osdc
);
1113 dout("%s osd %p osd%d\n", __func__
, osd
, osd
->o_osd
);
1115 ceph_con_close(&osd
->o_con
);
1117 for (n
= rb_first(&osd
->o_requests
); n
; ) {
1118 struct ceph_osd_request
*req
=
1119 rb_entry(n
, struct ceph_osd_request
, r_node
);
1121 n
= rb_next(n
); /* unlink_request() */
1123 dout(" reassigning req %p tid %llu\n", req
, req
->r_tid
);
1124 unlink_request(osd
, req
);
1125 link_request(&osdc
->homeless_osd
, req
);
1127 for (n
= rb_first(&osd
->o_linger_requests
); n
; ) {
1128 struct ceph_osd_linger_request
*lreq
=
1129 rb_entry(n
, struct ceph_osd_linger_request
, node
);
1131 n
= rb_next(n
); /* unlink_linger() */
1133 dout(" reassigning lreq %p linger_id %llu\n", lreq
,
1135 unlink_linger(osd
, lreq
);
1136 link_linger(&osdc
->homeless_osd
, lreq
);
1139 __remove_osd_from_lru(osd
);
1140 erase_osd(&osdc
->osds
, osd
);
1147 static int reopen_osd(struct ceph_osd
*osd
)
1149 struct ceph_entity_addr
*peer_addr
;
1151 dout("%s osd %p osd%d\n", __func__
, osd
, osd
->o_osd
);
1153 if (RB_EMPTY_ROOT(&osd
->o_requests
) &&
1154 RB_EMPTY_ROOT(&osd
->o_linger_requests
)) {
1159 peer_addr
= &osd
->o_osdc
->osdmap
->osd_addr
[osd
->o_osd
];
1160 if (!memcmp(peer_addr
, &osd
->o_con
.peer_addr
, sizeof (*peer_addr
)) &&
1161 !ceph_con_opened(&osd
->o_con
)) {
1164 dout("osd addr hasn't changed and connection never opened, "
1165 "letting msgr retry\n");
1166 /* touch each r_stamp for handle_timeout()'s benfit */
1167 for (n
= rb_first(&osd
->o_requests
); n
; n
= rb_next(n
)) {
1168 struct ceph_osd_request
*req
=
1169 rb_entry(n
, struct ceph_osd_request
, r_node
);
1170 req
->r_stamp
= jiffies
;
1176 ceph_con_close(&osd
->o_con
);
1177 ceph_con_open(&osd
->o_con
, CEPH_ENTITY_TYPE_OSD
, osd
->o_osd
, peer_addr
);
1178 osd
->o_incarnation
++;
1183 static struct ceph_osd
*lookup_create_osd(struct ceph_osd_client
*osdc
, int o
,
1186 struct ceph_osd
*osd
;
1189 verify_osdc_wrlocked(osdc
);
1191 verify_osdc_locked(osdc
);
1193 if (o
!= CEPH_HOMELESS_OSD
)
1194 osd
= lookup_osd(&osdc
->osds
, o
);
1196 osd
= &osdc
->homeless_osd
;
1199 return ERR_PTR(-EAGAIN
);
1201 osd
= create_osd(osdc
, o
);
1202 insert_osd(&osdc
->osds
, osd
);
1203 ceph_con_open(&osd
->o_con
, CEPH_ENTITY_TYPE_OSD
, osd
->o_osd
,
1204 &osdc
->osdmap
->osd_addr
[osd
->o_osd
]);
1207 dout("%s osdc %p osd%d -> osd %p\n", __func__
, osdc
, o
, osd
);
1212 * Create request <-> OSD session relation.
1214 * @req has to be assigned a tid, @osd may be homeless.
1216 static void link_request(struct ceph_osd
*osd
, struct ceph_osd_request
*req
)
1218 verify_osd_locked(osd
);
1219 WARN_ON(!req
->r_tid
|| req
->r_osd
);
1220 dout("%s osd %p osd%d req %p tid %llu\n", __func__
, osd
, osd
->o_osd
,
1223 if (!osd_homeless(osd
))
1224 __remove_osd_from_lru(osd
);
1226 atomic_inc(&osd
->o_osdc
->num_homeless
);
1229 insert_request(&osd
->o_requests
, req
);
1233 static void unlink_request(struct ceph_osd
*osd
, struct ceph_osd_request
*req
)
1235 verify_osd_locked(osd
);
1236 WARN_ON(req
->r_osd
!= osd
);
1237 dout("%s osd %p osd%d req %p tid %llu\n", __func__
, osd
, osd
->o_osd
,
1241 erase_request(&osd
->o_requests
, req
);
1244 if (!osd_homeless(osd
))
1245 maybe_move_osd_to_lru(osd
);
1247 atomic_dec(&osd
->o_osdc
->num_homeless
);
1250 static bool __pool_full(struct ceph_pg_pool_info
*pi
)
1252 return pi
->flags
& CEPH_POOL_FLAG_FULL
;
1255 static bool have_pool_full(struct ceph_osd_client
*osdc
)
1259 for (n
= rb_first(&osdc
->osdmap
->pg_pools
); n
; n
= rb_next(n
)) {
1260 struct ceph_pg_pool_info
*pi
=
1261 rb_entry(n
, struct ceph_pg_pool_info
, node
);
1263 if (__pool_full(pi
))
1270 static bool pool_full(struct ceph_osd_client
*osdc
, s64 pool_id
)
1272 struct ceph_pg_pool_info
*pi
;
1274 pi
= ceph_pg_pool_by_id(osdc
->osdmap
, pool_id
);
1278 return __pool_full(pi
);
1282 * Returns whether a request should be blocked from being sent
1283 * based on the current osdmap and osd_client settings.
1285 static bool target_should_be_paused(struct ceph_osd_client
*osdc
,
1286 const struct ceph_osd_request_target
*t
,
1287 struct ceph_pg_pool_info
*pi
)
1289 bool pauserd
= ceph_osdmap_flag(osdc
, CEPH_OSDMAP_PAUSERD
);
1290 bool pausewr
= ceph_osdmap_flag(osdc
, CEPH_OSDMAP_PAUSEWR
) ||
1291 ceph_osdmap_flag(osdc
, CEPH_OSDMAP_FULL
) ||
1294 WARN_ON(pi
->id
!= t
->base_oloc
.pool
);
1295 return (t
->flags
& CEPH_OSD_FLAG_READ
&& pauserd
) ||
1296 (t
->flags
& CEPH_OSD_FLAG_WRITE
&& pausewr
);
1299 enum calc_target_result
{
1300 CALC_TARGET_NO_ACTION
= 0,
1301 CALC_TARGET_NEED_RESEND
,
1302 CALC_TARGET_POOL_DNE
,
1305 static enum calc_target_result
calc_target(struct ceph_osd_client
*osdc
,
1306 struct ceph_osd_request_target
*t
,
1307 u32
*last_force_resend
,
1310 struct ceph_pg_pool_info
*pi
;
1311 struct ceph_pg pgid
, last_pgid
;
1312 struct ceph_osds up
, acting
;
1313 bool force_resend
= false;
1314 bool need_check_tiering
= false;
1315 bool need_resend
= false;
1316 bool sort_bitwise
= ceph_osdmap_flag(osdc
, CEPH_OSDMAP_SORTBITWISE
);
1317 enum calc_target_result ct_res
;
1320 pi
= ceph_pg_pool_by_id(osdc
->osdmap
, t
->base_oloc
.pool
);
1322 t
->osd
= CEPH_HOMELESS_OSD
;
1323 ct_res
= CALC_TARGET_POOL_DNE
;
1327 if (osdc
->osdmap
->epoch
== pi
->last_force_request_resend
) {
1328 if (last_force_resend
&&
1329 *last_force_resend
< pi
->last_force_request_resend
) {
1330 *last_force_resend
= pi
->last_force_request_resend
;
1331 force_resend
= true;
1332 } else if (!last_force_resend
) {
1333 force_resend
= true;
1336 if (ceph_oid_empty(&t
->target_oid
) || force_resend
) {
1337 ceph_oid_copy(&t
->target_oid
, &t
->base_oid
);
1338 need_check_tiering
= true;
1340 if (ceph_oloc_empty(&t
->target_oloc
) || force_resend
) {
1341 ceph_oloc_copy(&t
->target_oloc
, &t
->base_oloc
);
1342 need_check_tiering
= true;
1345 if (need_check_tiering
&&
1346 (t
->flags
& CEPH_OSD_FLAG_IGNORE_OVERLAY
) == 0) {
1347 if (t
->flags
& CEPH_OSD_FLAG_READ
&& pi
->read_tier
>= 0)
1348 t
->target_oloc
.pool
= pi
->read_tier
;
1349 if (t
->flags
& CEPH_OSD_FLAG_WRITE
&& pi
->write_tier
>= 0)
1350 t
->target_oloc
.pool
= pi
->write_tier
;
1353 ret
= ceph_object_locator_to_pg(osdc
->osdmap
, &t
->target_oid
,
1354 &t
->target_oloc
, &pgid
);
1356 WARN_ON(ret
!= -ENOENT
);
1357 t
->osd
= CEPH_HOMELESS_OSD
;
1358 ct_res
= CALC_TARGET_POOL_DNE
;
1361 last_pgid
.pool
= pgid
.pool
;
1362 last_pgid
.seed
= ceph_stable_mod(pgid
.seed
, t
->pg_num
, t
->pg_num_mask
);
1364 ceph_pg_to_up_acting_osds(osdc
->osdmap
, &pgid
, &up
, &acting
);
1366 ceph_is_new_interval(&t
->acting
,
1379 force_resend
= true;
1381 if (t
->paused
&& !target_should_be_paused(osdc
, t
, pi
)) {
1386 if (ceph_pg_compare(&t
->pgid
, &pgid
) ||
1387 ceph_osds_changed(&t
->acting
, &acting
, any_change
) ||
1389 t
->pgid
= pgid
; /* struct */
1390 ceph_osds_copy(&t
->acting
, &acting
);
1391 ceph_osds_copy(&t
->up
, &up
);
1393 t
->min_size
= pi
->min_size
;
1394 t
->pg_num
= pi
->pg_num
;
1395 t
->pg_num_mask
= pi
->pg_num_mask
;
1396 t
->sort_bitwise
= sort_bitwise
;
1398 t
->osd
= acting
.primary
;
1402 ct_res
= need_resend
? CALC_TARGET_NEED_RESEND
: CALC_TARGET_NO_ACTION
;
1404 dout("%s t %p -> ct_res %d osd %d\n", __func__
, t
, ct_res
, t
->osd
);
1408 static void setup_request_data(struct ceph_osd_request
*req
,
1409 struct ceph_msg
*msg
)
1414 if (!list_empty(&msg
->data
))
1417 WARN_ON(msg
->data_length
);
1418 for (i
= 0; i
< req
->r_num_ops
; i
++) {
1419 struct ceph_osd_req_op
*op
= &req
->r_ops
[i
];
1423 case CEPH_OSD_OP_WRITE
:
1424 case CEPH_OSD_OP_WRITEFULL
:
1425 WARN_ON(op
->indata_len
!= op
->extent
.length
);
1426 ceph_osdc_msg_data_add(msg
, &op
->extent
.osd_data
);
1428 case CEPH_OSD_OP_SETXATTR
:
1429 case CEPH_OSD_OP_CMPXATTR
:
1430 WARN_ON(op
->indata_len
!= op
->xattr
.name_len
+
1431 op
->xattr
.value_len
);
1432 ceph_osdc_msg_data_add(msg
, &op
->xattr
.osd_data
);
1434 case CEPH_OSD_OP_NOTIFY_ACK
:
1435 ceph_osdc_msg_data_add(msg
,
1436 &op
->notify_ack
.request_data
);
1440 case CEPH_OSD_OP_STAT
:
1441 ceph_osdc_msg_data_add(req
->r_reply
,
1444 case CEPH_OSD_OP_READ
:
1445 ceph_osdc_msg_data_add(req
->r_reply
,
1446 &op
->extent
.osd_data
);
1450 case CEPH_OSD_OP_CALL
:
1451 WARN_ON(op
->indata_len
!= op
->cls
.class_len
+
1452 op
->cls
.method_len
+
1453 op
->cls
.indata_len
);
1454 ceph_osdc_msg_data_add(msg
, &op
->cls
.request_info
);
1455 /* optional, can be NONE */
1456 ceph_osdc_msg_data_add(msg
, &op
->cls
.request_data
);
1457 /* optional, can be NONE */
1458 ceph_osdc_msg_data_add(req
->r_reply
,
1459 &op
->cls
.response_data
);
1461 case CEPH_OSD_OP_NOTIFY
:
1462 ceph_osdc_msg_data_add(msg
,
1463 &op
->notify
.request_data
);
1464 ceph_osdc_msg_data_add(req
->r_reply
,
1465 &op
->notify
.response_data
);
1469 data_len
+= op
->indata_len
;
1472 WARN_ON(data_len
!= msg
->data_length
);
1475 static void encode_request(struct ceph_osd_request
*req
, struct ceph_msg
*msg
)
1477 void *p
= msg
->front
.iov_base
;
1478 void *const end
= p
+ msg
->front_alloc_len
;
1482 if (req
->r_flags
& CEPH_OSD_FLAG_WRITE
) {
1483 /* snapshots aren't writeable */
1484 WARN_ON(req
->r_snapid
!= CEPH_NOSNAP
);
1486 WARN_ON(req
->r_mtime
.tv_sec
|| req
->r_mtime
.tv_nsec
||
1487 req
->r_data_offset
|| req
->r_snapc
);
1490 setup_request_data(req
, msg
);
1492 ceph_encode_32(&p
, 1); /* client_inc, always 1 */
1493 ceph_encode_32(&p
, req
->r_osdc
->osdmap
->epoch
);
1494 ceph_encode_32(&p
, req
->r_flags
);
1495 ceph_encode_timespec(p
, &req
->r_mtime
);
1496 p
+= sizeof(struct ceph_timespec
);
1497 /* aka reassert_version */
1498 memcpy(p
, &req
->r_replay_version
, sizeof(req
->r_replay_version
));
1499 p
+= sizeof(req
->r_replay_version
);
1502 ceph_start_encoding(&p
, 5, 4,
1503 ceph_oloc_encoding_size(&req
->r_t
.target_oloc
));
1504 ceph_encode_64(&p
, req
->r_t
.target_oloc
.pool
);
1505 ceph_encode_32(&p
, -1); /* preferred */
1506 ceph_encode_32(&p
, 0); /* key len */
1507 if (req
->r_t
.target_oloc
.pool_ns
)
1508 ceph_encode_string(&p
, end
, req
->r_t
.target_oloc
.pool_ns
->str
,
1509 req
->r_t
.target_oloc
.pool_ns
->len
);
1511 ceph_encode_32(&p
, 0);
1514 ceph_encode_8(&p
, 1);
1515 ceph_encode_64(&p
, req
->r_t
.pgid
.pool
);
1516 ceph_encode_32(&p
, req
->r_t
.pgid
.seed
);
1517 ceph_encode_32(&p
, -1); /* preferred */
1520 ceph_encode_32(&p
, req
->r_t
.target_oid
.name_len
);
1521 memcpy(p
, req
->r_t
.target_oid
.name
, req
->r_t
.target_oid
.name_len
);
1522 p
+= req
->r_t
.target_oid
.name_len
;
1524 /* ops, can imply data */
1525 ceph_encode_16(&p
, req
->r_num_ops
);
1526 for (i
= 0; i
< req
->r_num_ops
; i
++) {
1527 data_len
+= osd_req_encode_op(p
, &req
->r_ops
[i
]);
1528 p
+= sizeof(struct ceph_osd_op
);
1531 ceph_encode_64(&p
, req
->r_snapid
); /* snapid */
1533 ceph_encode_64(&p
, req
->r_snapc
->seq
);
1534 ceph_encode_32(&p
, req
->r_snapc
->num_snaps
);
1535 for (i
= 0; i
< req
->r_snapc
->num_snaps
; i
++)
1536 ceph_encode_64(&p
, req
->r_snapc
->snaps
[i
]);
1538 ceph_encode_64(&p
, 0); /* snap_seq */
1539 ceph_encode_32(&p
, 0); /* snaps len */
1542 ceph_encode_32(&p
, req
->r_attempts
); /* retry_attempt */
1545 msg
->front
.iov_len
= p
- msg
->front
.iov_base
;
1546 msg
->hdr
.version
= cpu_to_le16(4); /* MOSDOp v4 */
1547 msg
->hdr
.front_len
= cpu_to_le32(msg
->front
.iov_len
);
1548 msg
->hdr
.data_len
= cpu_to_le32(data_len
);
1550 * The header "data_off" is a hint to the receiver allowing it
1551 * to align received data into its buffers such that there's no
1552 * need to re-copy it before writing it to disk (direct I/O).
1554 msg
->hdr
.data_off
= cpu_to_le16(req
->r_data_offset
);
1556 dout("%s req %p oid %s oid_len %d front %zu data %u\n", __func__
,
1557 req
, req
->r_t
.target_oid
.name
, req
->r_t
.target_oid
.name_len
,
1558 msg
->front
.iov_len
, data_len
);
1562 * @req has to be assigned a tid and registered.
1564 static void send_request(struct ceph_osd_request
*req
)
1566 struct ceph_osd
*osd
= req
->r_osd
;
1568 verify_osd_locked(osd
);
1569 WARN_ON(osd
->o_osd
!= req
->r_t
.osd
);
1572 * We may have a previously queued request message hanging
1573 * around. Cancel it to avoid corrupting the msgr.
1576 ceph_msg_revoke(req
->r_request
);
1578 req
->r_flags
|= CEPH_OSD_FLAG_KNOWN_REDIR
;
1579 if (req
->r_attempts
)
1580 req
->r_flags
|= CEPH_OSD_FLAG_RETRY
;
1582 WARN_ON(req
->r_flags
& CEPH_OSD_FLAG_RETRY
);
1584 encode_request(req
, req
->r_request
);
1586 dout("%s req %p tid %llu to pg %llu.%x osd%d flags 0x%x attempt %d\n",
1587 __func__
, req
, req
->r_tid
, req
->r_t
.pgid
.pool
, req
->r_t
.pgid
.seed
,
1588 req
->r_t
.osd
, req
->r_flags
, req
->r_attempts
);
1590 req
->r_t
.paused
= false;
1591 req
->r_stamp
= jiffies
;
1594 req
->r_sent
= osd
->o_incarnation
;
1595 req
->r_request
->hdr
.tid
= cpu_to_le64(req
->r_tid
);
1596 ceph_con_send(&osd
->o_con
, ceph_msg_get(req
->r_request
));
1599 static void maybe_request_map(struct ceph_osd_client
*osdc
)
1601 bool continuous
= false;
1603 verify_osdc_locked(osdc
);
1604 WARN_ON(!osdc
->osdmap
->epoch
);
1606 if (ceph_osdmap_flag(osdc
, CEPH_OSDMAP_FULL
) ||
1607 ceph_osdmap_flag(osdc
, CEPH_OSDMAP_PAUSERD
) ||
1608 ceph_osdmap_flag(osdc
, CEPH_OSDMAP_PAUSEWR
)) {
1609 dout("%s osdc %p continuous\n", __func__
, osdc
);
1612 dout("%s osdc %p onetime\n", __func__
, osdc
);
1615 if (ceph_monc_want_map(&osdc
->client
->monc
, CEPH_SUB_OSDMAP
,
1616 osdc
->osdmap
->epoch
+ 1, continuous
))
1617 ceph_monc_renew_subs(&osdc
->client
->monc
);
1620 static void send_map_check(struct ceph_osd_request
*req
);
1622 static void __submit_request(struct ceph_osd_request
*req
, bool wrlocked
)
1624 struct ceph_osd_client
*osdc
= req
->r_osdc
;
1625 struct ceph_osd
*osd
;
1626 enum calc_target_result ct_res
;
1627 bool need_send
= false;
1628 bool promoted
= false;
1630 WARN_ON(req
->r_tid
|| req
->r_got_reply
);
1631 dout("%s req %p wrlocked %d\n", __func__
, req
, wrlocked
);
1634 ct_res
= calc_target(osdc
, &req
->r_t
, &req
->r_last_force_resend
, false);
1635 if (ct_res
== CALC_TARGET_POOL_DNE
&& !wrlocked
)
1638 osd
= lookup_create_osd(osdc
, req
->r_t
.osd
, wrlocked
);
1640 WARN_ON(PTR_ERR(osd
) != -EAGAIN
|| wrlocked
);
1644 if ((req
->r_flags
& CEPH_OSD_FLAG_WRITE
) &&
1645 ceph_osdmap_flag(osdc
, CEPH_OSDMAP_PAUSEWR
)) {
1646 dout("req %p pausewr\n", req
);
1647 req
->r_t
.paused
= true;
1648 maybe_request_map(osdc
);
1649 } else if ((req
->r_flags
& CEPH_OSD_FLAG_READ
) &&
1650 ceph_osdmap_flag(osdc
, CEPH_OSDMAP_PAUSERD
)) {
1651 dout("req %p pauserd\n", req
);
1652 req
->r_t
.paused
= true;
1653 maybe_request_map(osdc
);
1654 } else if ((req
->r_flags
& CEPH_OSD_FLAG_WRITE
) &&
1655 !(req
->r_flags
& (CEPH_OSD_FLAG_FULL_TRY
|
1656 CEPH_OSD_FLAG_FULL_FORCE
)) &&
1657 (ceph_osdmap_flag(osdc
, CEPH_OSDMAP_FULL
) ||
1658 pool_full(osdc
, req
->r_t
.base_oloc
.pool
))) {
1659 dout("req %p full/pool_full\n", req
);
1660 pr_warn_ratelimited("FULL or reached pool quota\n");
1661 req
->r_t
.paused
= true;
1662 maybe_request_map(osdc
);
1663 } else if (!osd_homeless(osd
)) {
1666 maybe_request_map(osdc
);
1669 mutex_lock(&osd
->lock
);
1671 * Assign the tid atomically with send_request() to protect
1672 * multiple writes to the same object from racing with each
1673 * other, resulting in out of order ops on the OSDs.
1675 req
->r_tid
= atomic64_inc_return(&osdc
->last_tid
);
1676 link_request(osd
, req
);
1679 mutex_unlock(&osd
->lock
);
1681 if (ct_res
== CALC_TARGET_POOL_DNE
)
1682 send_map_check(req
);
1685 downgrade_write(&osdc
->lock
);
1689 up_read(&osdc
->lock
);
1690 down_write(&osdc
->lock
);
1696 static void account_request(struct ceph_osd_request
*req
)
1698 unsigned int mask
= CEPH_OSD_FLAG_ACK
| CEPH_OSD_FLAG_ONDISK
;
1700 if (req
->r_flags
& CEPH_OSD_FLAG_READ
) {
1701 WARN_ON(req
->r_flags
& mask
);
1702 req
->r_flags
|= CEPH_OSD_FLAG_ACK
;
1703 } else if (req
->r_flags
& CEPH_OSD_FLAG_WRITE
)
1704 WARN_ON(!(req
->r_flags
& mask
));
1708 WARN_ON(req
->r_unsafe_callback
&& (req
->r_flags
& mask
) != mask
);
1709 atomic_inc(&req
->r_osdc
->num_requests
);
1712 static void submit_request(struct ceph_osd_request
*req
, bool wrlocked
)
1714 ceph_osdc_get_request(req
);
1715 account_request(req
);
1716 __submit_request(req
, wrlocked
);
1719 static void __finish_request(struct ceph_osd_request
*req
)
1721 struct ceph_osd_client
*osdc
= req
->r_osdc
;
1722 struct ceph_osd
*osd
= req
->r_osd
;
1724 verify_osd_locked(osd
);
1725 dout("%s req %p tid %llu\n", __func__
, req
, req
->r_tid
);
1727 WARN_ON(lookup_request_mc(&osdc
->map_checks
, req
->r_tid
));
1728 unlink_request(osd
, req
);
1729 atomic_dec(&osdc
->num_requests
);
1732 * If an OSD has failed or returned and a request has been sent
1733 * twice, it's possible to get a reply and end up here while the
1734 * request message is queued for delivery. We will ignore the
1735 * reply, so not a big deal, but better to try and catch it.
1737 ceph_msg_revoke(req
->r_request
);
1738 ceph_msg_revoke_incoming(req
->r_reply
);
1741 static void finish_request(struct ceph_osd_request
*req
)
1743 __finish_request(req
);
1744 ceph_osdc_put_request(req
);
1747 static void __complete_request(struct ceph_osd_request
*req
)
1749 if (req
->r_callback
)
1750 req
->r_callback(req
);
1752 complete_all(&req
->r_completion
);
1756 * Note that this is open-coded in handle_reply(), which has to deal
1757 * with ack vs commit, dup acks, etc.
1759 static void complete_request(struct ceph_osd_request
*req
, int err
)
1761 dout("%s req %p tid %llu err %d\n", __func__
, req
, req
->r_tid
, err
);
1763 req
->r_result
= err
;
1764 __finish_request(req
);
1765 __complete_request(req
);
1766 complete_all(&req
->r_safe_completion
);
1767 ceph_osdc_put_request(req
);
1770 static void cancel_map_check(struct ceph_osd_request
*req
)
1772 struct ceph_osd_client
*osdc
= req
->r_osdc
;
1773 struct ceph_osd_request
*lookup_req
;
1775 verify_osdc_wrlocked(osdc
);
1777 lookup_req
= lookup_request_mc(&osdc
->map_checks
, req
->r_tid
);
1781 WARN_ON(lookup_req
!= req
);
1782 erase_request_mc(&osdc
->map_checks
, req
);
1783 ceph_osdc_put_request(req
);
1786 static void cancel_request(struct ceph_osd_request
*req
)
1788 dout("%s req %p tid %llu\n", __func__
, req
, req
->r_tid
);
1790 cancel_map_check(req
);
1791 finish_request(req
);
1794 static void check_pool_dne(struct ceph_osd_request
*req
)
1796 struct ceph_osd_client
*osdc
= req
->r_osdc
;
1797 struct ceph_osdmap
*map
= osdc
->osdmap
;
1799 verify_osdc_wrlocked(osdc
);
1800 WARN_ON(!map
->epoch
);
1802 if (req
->r_attempts
) {
1804 * We sent a request earlier, which means that
1805 * previously the pool existed, and now it does not
1806 * (i.e., it was deleted).
1808 req
->r_map_dne_bound
= map
->epoch
;
1809 dout("%s req %p tid %llu pool disappeared\n", __func__
, req
,
1812 dout("%s req %p tid %llu map_dne_bound %u have %u\n", __func__
,
1813 req
, req
->r_tid
, req
->r_map_dne_bound
, map
->epoch
);
1816 if (req
->r_map_dne_bound
) {
1817 if (map
->epoch
>= req
->r_map_dne_bound
) {
1818 /* we had a new enough map */
1819 pr_info_ratelimited("tid %llu pool does not exist\n",
1821 complete_request(req
, -ENOENT
);
1824 send_map_check(req
);
1828 static void map_check_cb(struct ceph_mon_generic_request
*greq
)
1830 struct ceph_osd_client
*osdc
= &greq
->monc
->client
->osdc
;
1831 struct ceph_osd_request
*req
;
1832 u64 tid
= greq
->private_data
;
1834 WARN_ON(greq
->result
|| !greq
->u
.newest
);
1836 down_write(&osdc
->lock
);
1837 req
= lookup_request_mc(&osdc
->map_checks
, tid
);
1839 dout("%s tid %llu dne\n", __func__
, tid
);
1843 dout("%s req %p tid %llu map_dne_bound %u newest %llu\n", __func__
,
1844 req
, req
->r_tid
, req
->r_map_dne_bound
, greq
->u
.newest
);
1845 if (!req
->r_map_dne_bound
)
1846 req
->r_map_dne_bound
= greq
->u
.newest
;
1847 erase_request_mc(&osdc
->map_checks
, req
);
1848 check_pool_dne(req
);
1850 ceph_osdc_put_request(req
);
1852 up_write(&osdc
->lock
);
1855 static void send_map_check(struct ceph_osd_request
*req
)
1857 struct ceph_osd_client
*osdc
= req
->r_osdc
;
1858 struct ceph_osd_request
*lookup_req
;
1861 verify_osdc_wrlocked(osdc
);
1863 lookup_req
= lookup_request_mc(&osdc
->map_checks
, req
->r_tid
);
1865 WARN_ON(lookup_req
!= req
);
1869 ceph_osdc_get_request(req
);
1870 insert_request_mc(&osdc
->map_checks
, req
);
1871 ret
= ceph_monc_get_version_async(&osdc
->client
->monc
, "osdmap",
1872 map_check_cb
, req
->r_tid
);
1877 * lingering requests, watch/notify v2 infrastructure
1879 static void linger_release(struct kref
*kref
)
1881 struct ceph_osd_linger_request
*lreq
=
1882 container_of(kref
, struct ceph_osd_linger_request
, kref
);
1884 dout("%s lreq %p reg_req %p ping_req %p\n", __func__
, lreq
,
1885 lreq
->reg_req
, lreq
->ping_req
);
1886 WARN_ON(!RB_EMPTY_NODE(&lreq
->node
));
1887 WARN_ON(!RB_EMPTY_NODE(&lreq
->osdc_node
));
1888 WARN_ON(!RB_EMPTY_NODE(&lreq
->mc_node
));
1889 WARN_ON(!list_empty(&lreq
->scan_item
));
1890 WARN_ON(!list_empty(&lreq
->pending_lworks
));
1894 ceph_osdc_put_request(lreq
->reg_req
);
1896 ceph_osdc_put_request(lreq
->ping_req
);
1897 target_destroy(&lreq
->t
);
1901 static void linger_put(struct ceph_osd_linger_request
*lreq
)
1904 kref_put(&lreq
->kref
, linger_release
);
1907 static struct ceph_osd_linger_request
*
1908 linger_get(struct ceph_osd_linger_request
*lreq
)
1910 kref_get(&lreq
->kref
);
1914 static struct ceph_osd_linger_request
*
1915 linger_alloc(struct ceph_osd_client
*osdc
)
1917 struct ceph_osd_linger_request
*lreq
;
1919 lreq
= kzalloc(sizeof(*lreq
), GFP_NOIO
);
1923 kref_init(&lreq
->kref
);
1924 mutex_init(&lreq
->lock
);
1925 RB_CLEAR_NODE(&lreq
->node
);
1926 RB_CLEAR_NODE(&lreq
->osdc_node
);
1927 RB_CLEAR_NODE(&lreq
->mc_node
);
1928 INIT_LIST_HEAD(&lreq
->scan_item
);
1929 INIT_LIST_HEAD(&lreq
->pending_lworks
);
1930 init_completion(&lreq
->reg_commit_wait
);
1931 init_completion(&lreq
->notify_finish_wait
);
1934 target_init(&lreq
->t
);
1936 dout("%s lreq %p\n", __func__
, lreq
);
1940 DEFINE_RB_INSDEL_FUNCS(linger
, struct ceph_osd_linger_request
, linger_id
, node
)
1941 DEFINE_RB_FUNCS(linger_osdc
, struct ceph_osd_linger_request
, linger_id
, osdc_node
)
1942 DEFINE_RB_FUNCS(linger_mc
, struct ceph_osd_linger_request
, linger_id
, mc_node
)
1945 * Create linger request <-> OSD session relation.
1947 * @lreq has to be registered, @osd may be homeless.
1949 static void link_linger(struct ceph_osd
*osd
,
1950 struct ceph_osd_linger_request
*lreq
)
1952 verify_osd_locked(osd
);
1953 WARN_ON(!lreq
->linger_id
|| lreq
->osd
);
1954 dout("%s osd %p osd%d lreq %p linger_id %llu\n", __func__
, osd
,
1955 osd
->o_osd
, lreq
, lreq
->linger_id
);
1957 if (!osd_homeless(osd
))
1958 __remove_osd_from_lru(osd
);
1960 atomic_inc(&osd
->o_osdc
->num_homeless
);
1963 insert_linger(&osd
->o_linger_requests
, lreq
);
1967 static void unlink_linger(struct ceph_osd
*osd
,
1968 struct ceph_osd_linger_request
*lreq
)
1970 verify_osd_locked(osd
);
1971 WARN_ON(lreq
->osd
!= osd
);
1972 dout("%s osd %p osd%d lreq %p linger_id %llu\n", __func__
, osd
,
1973 osd
->o_osd
, lreq
, lreq
->linger_id
);
1976 erase_linger(&osd
->o_linger_requests
, lreq
);
1979 if (!osd_homeless(osd
))
1980 maybe_move_osd_to_lru(osd
);
1982 atomic_dec(&osd
->o_osdc
->num_homeless
);
1985 static bool __linger_registered(struct ceph_osd_linger_request
*lreq
)
1987 verify_osdc_locked(lreq
->osdc
);
1989 return !RB_EMPTY_NODE(&lreq
->osdc_node
);
1992 static bool linger_registered(struct ceph_osd_linger_request
*lreq
)
1994 struct ceph_osd_client
*osdc
= lreq
->osdc
;
1997 down_read(&osdc
->lock
);
1998 registered
= __linger_registered(lreq
);
1999 up_read(&osdc
->lock
);
2004 static void linger_register(struct ceph_osd_linger_request
*lreq
)
2006 struct ceph_osd_client
*osdc
= lreq
->osdc
;
2008 verify_osdc_wrlocked(osdc
);
2009 WARN_ON(lreq
->linger_id
);
2012 lreq
->linger_id
= ++osdc
->last_linger_id
;
2013 insert_linger_osdc(&osdc
->linger_requests
, lreq
);
2016 static void linger_unregister(struct ceph_osd_linger_request
*lreq
)
2018 struct ceph_osd_client
*osdc
= lreq
->osdc
;
2020 verify_osdc_wrlocked(osdc
);
2022 erase_linger_osdc(&osdc
->linger_requests
, lreq
);
2026 static void cancel_linger_request(struct ceph_osd_request
*req
)
2028 struct ceph_osd_linger_request
*lreq
= req
->r_priv
;
2030 WARN_ON(!req
->r_linger
);
2031 cancel_request(req
);
2035 struct linger_work
{
2036 struct work_struct work
;
2037 struct ceph_osd_linger_request
*lreq
;
2038 struct list_head pending_item
;
2039 unsigned long queued_stamp
;
2045 void *payload
; /* points into @msg front */
2048 struct ceph_msg
*msg
; /* for ceph_msg_put() */
2056 static struct linger_work
*lwork_alloc(struct ceph_osd_linger_request
*lreq
,
2059 struct linger_work
*lwork
;
2061 lwork
= kzalloc(sizeof(*lwork
), GFP_NOIO
);
2065 INIT_WORK(&lwork
->work
, workfn
);
2066 INIT_LIST_HEAD(&lwork
->pending_item
);
2067 lwork
->lreq
= linger_get(lreq
);
2072 static void lwork_free(struct linger_work
*lwork
)
2074 struct ceph_osd_linger_request
*lreq
= lwork
->lreq
;
2076 mutex_lock(&lreq
->lock
);
2077 list_del(&lwork
->pending_item
);
2078 mutex_unlock(&lreq
->lock
);
2084 static void lwork_queue(struct linger_work
*lwork
)
2086 struct ceph_osd_linger_request
*lreq
= lwork
->lreq
;
2087 struct ceph_osd_client
*osdc
= lreq
->osdc
;
2089 verify_lreq_locked(lreq
);
2090 WARN_ON(!list_empty(&lwork
->pending_item
));
2092 lwork
->queued_stamp
= jiffies
;
2093 list_add_tail(&lwork
->pending_item
, &lreq
->pending_lworks
);
2094 queue_work(osdc
->notify_wq
, &lwork
->work
);
2097 static void do_watch_notify(struct work_struct
*w
)
2099 struct linger_work
*lwork
= container_of(w
, struct linger_work
, work
);
2100 struct ceph_osd_linger_request
*lreq
= lwork
->lreq
;
2102 if (!linger_registered(lreq
)) {
2103 dout("%s lreq %p not registered\n", __func__
, lreq
);
2107 WARN_ON(!lreq
->is_watch
);
2108 dout("%s lreq %p notify_id %llu notifier_id %llu payload_len %zu\n",
2109 __func__
, lreq
, lwork
->notify
.notify_id
, lwork
->notify
.notifier_id
,
2110 lwork
->notify
.payload_len
);
2111 lreq
->wcb(lreq
->data
, lwork
->notify
.notify_id
, lreq
->linger_id
,
2112 lwork
->notify
.notifier_id
, lwork
->notify
.payload
,
2113 lwork
->notify
.payload_len
);
2116 ceph_msg_put(lwork
->notify
.msg
);
2120 static void do_watch_error(struct work_struct
*w
)
2122 struct linger_work
*lwork
= container_of(w
, struct linger_work
, work
);
2123 struct ceph_osd_linger_request
*lreq
= lwork
->lreq
;
2125 if (!linger_registered(lreq
)) {
2126 dout("%s lreq %p not registered\n", __func__
, lreq
);
2130 dout("%s lreq %p err %d\n", __func__
, lreq
, lwork
->error
.err
);
2131 lreq
->errcb(lreq
->data
, lreq
->linger_id
, lwork
->error
.err
);
2137 static void queue_watch_error(struct ceph_osd_linger_request
*lreq
)
2139 struct linger_work
*lwork
;
2141 lwork
= lwork_alloc(lreq
, do_watch_error
);
2143 pr_err("failed to allocate error-lwork\n");
2147 lwork
->error
.err
= lreq
->last_error
;
2151 static void linger_reg_commit_complete(struct ceph_osd_linger_request
*lreq
,
2154 if (!completion_done(&lreq
->reg_commit_wait
)) {
2155 lreq
->reg_commit_error
= (result
<= 0 ? result
: 0);
2156 complete_all(&lreq
->reg_commit_wait
);
2160 static void linger_commit_cb(struct ceph_osd_request
*req
)
2162 struct ceph_osd_linger_request
*lreq
= req
->r_priv
;
2164 mutex_lock(&lreq
->lock
);
2165 dout("%s lreq %p linger_id %llu result %d\n", __func__
, lreq
,
2166 lreq
->linger_id
, req
->r_result
);
2167 WARN_ON(!__linger_registered(lreq
));
2168 linger_reg_commit_complete(lreq
, req
->r_result
);
2169 lreq
->committed
= true;
2171 if (!lreq
->is_watch
) {
2172 struct ceph_osd_data
*osd_data
=
2173 osd_req_op_data(req
, 0, notify
, response_data
);
2174 void *p
= page_address(osd_data
->pages
[0]);
2176 WARN_ON(req
->r_ops
[0].op
!= CEPH_OSD_OP_NOTIFY
||
2177 osd_data
->type
!= CEPH_OSD_DATA_TYPE_PAGES
);
2179 /* make note of the notify_id */
2180 if (req
->r_ops
[0].outdata_len
>= sizeof(u64
)) {
2181 lreq
->notify_id
= ceph_decode_64(&p
);
2182 dout("lreq %p notify_id %llu\n", lreq
,
2185 dout("lreq %p no notify_id\n", lreq
);
2189 mutex_unlock(&lreq
->lock
);
2193 static int normalize_watch_error(int err
)
2196 * Translate ENOENT -> ENOTCONN so that a delete->disconnection
2197 * notification and a failure to reconnect because we raced with
2198 * the delete appear the same to the user.
2206 static void linger_reconnect_cb(struct ceph_osd_request
*req
)
2208 struct ceph_osd_linger_request
*lreq
= req
->r_priv
;
2210 mutex_lock(&lreq
->lock
);
2211 dout("%s lreq %p linger_id %llu result %d last_error %d\n", __func__
,
2212 lreq
, lreq
->linger_id
, req
->r_result
, lreq
->last_error
);
2213 if (req
->r_result
< 0) {
2214 if (!lreq
->last_error
) {
2215 lreq
->last_error
= normalize_watch_error(req
->r_result
);
2216 queue_watch_error(lreq
);
2220 mutex_unlock(&lreq
->lock
);
2224 static void send_linger(struct ceph_osd_linger_request
*lreq
)
2226 struct ceph_osd_request
*req
= lreq
->reg_req
;
2227 struct ceph_osd_req_op
*op
= &req
->r_ops
[0];
2229 verify_osdc_wrlocked(req
->r_osdc
);
2230 dout("%s lreq %p linger_id %llu\n", __func__
, lreq
, lreq
->linger_id
);
2233 cancel_linger_request(req
);
2235 request_reinit(req
);
2236 ceph_oid_copy(&req
->r_base_oid
, &lreq
->t
.base_oid
);
2237 ceph_oloc_copy(&req
->r_base_oloc
, &lreq
->t
.base_oloc
);
2238 req
->r_flags
= lreq
->t
.flags
;
2239 req
->r_mtime
= lreq
->mtime
;
2241 mutex_lock(&lreq
->lock
);
2242 if (lreq
->is_watch
&& lreq
->committed
) {
2243 WARN_ON(op
->op
!= CEPH_OSD_OP_WATCH
||
2244 op
->watch
.cookie
!= lreq
->linger_id
);
2245 op
->watch
.op
= CEPH_OSD_WATCH_OP_RECONNECT
;
2246 op
->watch
.gen
= ++lreq
->register_gen
;
2247 dout("lreq %p reconnect register_gen %u\n", lreq
,
2249 req
->r_callback
= linger_reconnect_cb
;
2251 if (!lreq
->is_watch
)
2252 lreq
->notify_id
= 0;
2254 WARN_ON(op
->watch
.op
!= CEPH_OSD_WATCH_OP_WATCH
);
2255 dout("lreq %p register\n", lreq
);
2256 req
->r_callback
= linger_commit_cb
;
2258 mutex_unlock(&lreq
->lock
);
2260 req
->r_priv
= linger_get(lreq
);
2261 req
->r_linger
= true;
2263 submit_request(req
, true);
2266 static void linger_ping_cb(struct ceph_osd_request
*req
)
2268 struct ceph_osd_linger_request
*lreq
= req
->r_priv
;
2270 mutex_lock(&lreq
->lock
);
2271 dout("%s lreq %p linger_id %llu result %d ping_sent %lu last_error %d\n",
2272 __func__
, lreq
, lreq
->linger_id
, req
->r_result
, lreq
->ping_sent
,
2274 if (lreq
->register_gen
== req
->r_ops
[0].watch
.gen
) {
2275 if (!req
->r_result
) {
2276 lreq
->watch_valid_thru
= lreq
->ping_sent
;
2277 } else if (!lreq
->last_error
) {
2278 lreq
->last_error
= normalize_watch_error(req
->r_result
);
2279 queue_watch_error(lreq
);
2282 dout("lreq %p register_gen %u ignoring old pong %u\n", lreq
,
2283 lreq
->register_gen
, req
->r_ops
[0].watch
.gen
);
2286 mutex_unlock(&lreq
->lock
);
2290 static void send_linger_ping(struct ceph_osd_linger_request
*lreq
)
2292 struct ceph_osd_client
*osdc
= lreq
->osdc
;
2293 struct ceph_osd_request
*req
= lreq
->ping_req
;
2294 struct ceph_osd_req_op
*op
= &req
->r_ops
[0];
2296 if (ceph_osdmap_flag(osdc
, CEPH_OSDMAP_PAUSERD
)) {
2297 dout("%s PAUSERD\n", __func__
);
2301 lreq
->ping_sent
= jiffies
;
2302 dout("%s lreq %p linger_id %llu ping_sent %lu register_gen %u\n",
2303 __func__
, lreq
, lreq
->linger_id
, lreq
->ping_sent
,
2304 lreq
->register_gen
);
2307 cancel_linger_request(req
);
2309 request_reinit(req
);
2310 target_copy(&req
->r_t
, &lreq
->t
);
2312 WARN_ON(op
->op
!= CEPH_OSD_OP_WATCH
||
2313 op
->watch
.cookie
!= lreq
->linger_id
||
2314 op
->watch
.op
!= CEPH_OSD_WATCH_OP_PING
);
2315 op
->watch
.gen
= lreq
->register_gen
;
2316 req
->r_callback
= linger_ping_cb
;
2317 req
->r_priv
= linger_get(lreq
);
2318 req
->r_linger
= true;
2320 ceph_osdc_get_request(req
);
2321 account_request(req
);
2322 req
->r_tid
= atomic64_inc_return(&osdc
->last_tid
);
2323 link_request(lreq
->osd
, req
);
2327 static void linger_submit(struct ceph_osd_linger_request
*lreq
)
2329 struct ceph_osd_client
*osdc
= lreq
->osdc
;
2330 struct ceph_osd
*osd
;
2332 calc_target(osdc
, &lreq
->t
, &lreq
->last_force_resend
, false);
2333 osd
= lookup_create_osd(osdc
, lreq
->t
.osd
, true);
2334 link_linger(osd
, lreq
);
2339 static void cancel_linger_map_check(struct ceph_osd_linger_request
*lreq
)
2341 struct ceph_osd_client
*osdc
= lreq
->osdc
;
2342 struct ceph_osd_linger_request
*lookup_lreq
;
2344 verify_osdc_wrlocked(osdc
);
2346 lookup_lreq
= lookup_linger_mc(&osdc
->linger_map_checks
,
2351 WARN_ON(lookup_lreq
!= lreq
);
2352 erase_linger_mc(&osdc
->linger_map_checks
, lreq
);
2357 * @lreq has to be both registered and linked.
2359 static void __linger_cancel(struct ceph_osd_linger_request
*lreq
)
2361 if (lreq
->is_watch
&& lreq
->ping_req
->r_osd
)
2362 cancel_linger_request(lreq
->ping_req
);
2363 if (lreq
->reg_req
->r_osd
)
2364 cancel_linger_request(lreq
->reg_req
);
2365 cancel_linger_map_check(lreq
);
2366 unlink_linger(lreq
->osd
, lreq
);
2367 linger_unregister(lreq
);
2370 static void linger_cancel(struct ceph_osd_linger_request
*lreq
)
2372 struct ceph_osd_client
*osdc
= lreq
->osdc
;
2374 down_write(&osdc
->lock
);
2375 if (__linger_registered(lreq
))
2376 __linger_cancel(lreq
);
2377 up_write(&osdc
->lock
);
2380 static void send_linger_map_check(struct ceph_osd_linger_request
*lreq
);
2382 static void check_linger_pool_dne(struct ceph_osd_linger_request
*lreq
)
2384 struct ceph_osd_client
*osdc
= lreq
->osdc
;
2385 struct ceph_osdmap
*map
= osdc
->osdmap
;
2387 verify_osdc_wrlocked(osdc
);
2388 WARN_ON(!map
->epoch
);
2390 if (lreq
->register_gen
) {
2391 lreq
->map_dne_bound
= map
->epoch
;
2392 dout("%s lreq %p linger_id %llu pool disappeared\n", __func__
,
2393 lreq
, lreq
->linger_id
);
2395 dout("%s lreq %p linger_id %llu map_dne_bound %u have %u\n",
2396 __func__
, lreq
, lreq
->linger_id
, lreq
->map_dne_bound
,
2400 if (lreq
->map_dne_bound
) {
2401 if (map
->epoch
>= lreq
->map_dne_bound
) {
2402 /* we had a new enough map */
2403 pr_info("linger_id %llu pool does not exist\n",
2405 linger_reg_commit_complete(lreq
, -ENOENT
);
2406 __linger_cancel(lreq
);
2409 send_linger_map_check(lreq
);
2413 static void linger_map_check_cb(struct ceph_mon_generic_request
*greq
)
2415 struct ceph_osd_client
*osdc
= &greq
->monc
->client
->osdc
;
2416 struct ceph_osd_linger_request
*lreq
;
2417 u64 linger_id
= greq
->private_data
;
2419 WARN_ON(greq
->result
|| !greq
->u
.newest
);
2421 down_write(&osdc
->lock
);
2422 lreq
= lookup_linger_mc(&osdc
->linger_map_checks
, linger_id
);
2424 dout("%s linger_id %llu dne\n", __func__
, linger_id
);
2428 dout("%s lreq %p linger_id %llu map_dne_bound %u newest %llu\n",
2429 __func__
, lreq
, lreq
->linger_id
, lreq
->map_dne_bound
,
2431 if (!lreq
->map_dne_bound
)
2432 lreq
->map_dne_bound
= greq
->u
.newest
;
2433 erase_linger_mc(&osdc
->linger_map_checks
, lreq
);
2434 check_linger_pool_dne(lreq
);
2438 up_write(&osdc
->lock
);
2441 static void send_linger_map_check(struct ceph_osd_linger_request
*lreq
)
2443 struct ceph_osd_client
*osdc
= lreq
->osdc
;
2444 struct ceph_osd_linger_request
*lookup_lreq
;
2447 verify_osdc_wrlocked(osdc
);
2449 lookup_lreq
= lookup_linger_mc(&osdc
->linger_map_checks
,
2452 WARN_ON(lookup_lreq
!= lreq
);
2457 insert_linger_mc(&osdc
->linger_map_checks
, lreq
);
2458 ret
= ceph_monc_get_version_async(&osdc
->client
->monc
, "osdmap",
2459 linger_map_check_cb
, lreq
->linger_id
);
2463 static int linger_reg_commit_wait(struct ceph_osd_linger_request
*lreq
)
2467 dout("%s lreq %p linger_id %llu\n", __func__
, lreq
, lreq
->linger_id
);
2468 ret
= wait_for_completion_interruptible(&lreq
->reg_commit_wait
);
2469 return ret
?: lreq
->reg_commit_error
;
2472 static int linger_notify_finish_wait(struct ceph_osd_linger_request
*lreq
)
2476 dout("%s lreq %p linger_id %llu\n", __func__
, lreq
, lreq
->linger_id
);
2477 ret
= wait_for_completion_interruptible(&lreq
->notify_finish_wait
);
2478 return ret
?: lreq
->notify_finish_error
;
2482 * Timeout callback, called every N seconds. When 1 or more OSD
2483 * requests has been active for more than N seconds, we send a keepalive
2484 * (tag + timestamp) to its OSD to ensure any communications channel
2485 * reset is detected.
2487 static void handle_timeout(struct work_struct
*work
)
2489 struct ceph_osd_client
*osdc
=
2490 container_of(work
, struct ceph_osd_client
, timeout_work
.work
);
2491 struct ceph_options
*opts
= osdc
->client
->options
;
2492 unsigned long cutoff
= jiffies
- opts
->osd_keepalive_timeout
;
2493 LIST_HEAD(slow_osds
);
2494 struct rb_node
*n
, *p
;
2496 dout("%s osdc %p\n", __func__
, osdc
);
2497 down_write(&osdc
->lock
);
2500 * ping osds that are a bit slow. this ensures that if there
2501 * is a break in the TCP connection we will notice, and reopen
2502 * a connection with that osd (from the fault callback).
2504 for (n
= rb_first(&osdc
->osds
); n
; n
= rb_next(n
)) {
2505 struct ceph_osd
*osd
= rb_entry(n
, struct ceph_osd
, o_node
);
2508 for (p
= rb_first(&osd
->o_requests
); p
; p
= rb_next(p
)) {
2509 struct ceph_osd_request
*req
=
2510 rb_entry(p
, struct ceph_osd_request
, r_node
);
2512 if (time_before(req
->r_stamp
, cutoff
)) {
2513 dout(" req %p tid %llu on osd%d is laggy\n",
2514 req
, req
->r_tid
, osd
->o_osd
);
2518 for (p
= rb_first(&osd
->o_linger_requests
); p
; p
= rb_next(p
)) {
2519 struct ceph_osd_linger_request
*lreq
=
2520 rb_entry(p
, struct ceph_osd_linger_request
, node
);
2522 dout(" lreq %p linger_id %llu is served by osd%d\n",
2523 lreq
, lreq
->linger_id
, osd
->o_osd
);
2526 mutex_lock(&lreq
->lock
);
2527 if (lreq
->is_watch
&& lreq
->committed
&& !lreq
->last_error
)
2528 send_linger_ping(lreq
);
2529 mutex_unlock(&lreq
->lock
);
2533 list_move_tail(&osd
->o_keepalive_item
, &slow_osds
);
2536 if (atomic_read(&osdc
->num_homeless
) || !list_empty(&slow_osds
))
2537 maybe_request_map(osdc
);
2539 while (!list_empty(&slow_osds
)) {
2540 struct ceph_osd
*osd
= list_first_entry(&slow_osds
,
2543 list_del_init(&osd
->o_keepalive_item
);
2544 ceph_con_keepalive(&osd
->o_con
);
2547 up_write(&osdc
->lock
);
2548 schedule_delayed_work(&osdc
->timeout_work
,
2549 osdc
->client
->options
->osd_keepalive_timeout
);
2552 static void handle_osds_timeout(struct work_struct
*work
)
2554 struct ceph_osd_client
*osdc
=
2555 container_of(work
, struct ceph_osd_client
,
2556 osds_timeout_work
.work
);
2557 unsigned long delay
= osdc
->client
->options
->osd_idle_ttl
/ 4;
2558 struct ceph_osd
*osd
, *nosd
;
2560 dout("%s osdc %p\n", __func__
, osdc
);
2561 down_write(&osdc
->lock
);
2562 list_for_each_entry_safe(osd
, nosd
, &osdc
->osd_lru
, o_osd_lru
) {
2563 if (time_before(jiffies
, osd
->lru_ttl
))
2566 WARN_ON(!RB_EMPTY_ROOT(&osd
->o_requests
));
2567 WARN_ON(!RB_EMPTY_ROOT(&osd
->o_linger_requests
));
2571 up_write(&osdc
->lock
);
2572 schedule_delayed_work(&osdc
->osds_timeout_work
,
2573 round_jiffies_relative(delay
));
2576 static int ceph_oloc_decode(void **p
, void *end
,
2577 struct ceph_object_locator
*oloc
)
2579 u8 struct_v
, struct_cv
;
2584 ceph_decode_need(p
, end
, 1 + 1 + 4, e_inval
);
2585 struct_v
= ceph_decode_8(p
);
2586 struct_cv
= ceph_decode_8(p
);
2588 pr_warn("got v %d < 3 cv %d of ceph_object_locator\n",
2589 struct_v
, struct_cv
);
2592 if (struct_cv
> 6) {
2593 pr_warn("got v %d cv %d > 6 of ceph_object_locator\n",
2594 struct_v
, struct_cv
);
2597 len
= ceph_decode_32(p
);
2598 ceph_decode_need(p
, end
, len
, e_inval
);
2599 struct_end
= *p
+ len
;
2601 oloc
->pool
= ceph_decode_64(p
);
2602 *p
+= 4; /* skip preferred */
2604 len
= ceph_decode_32(p
);
2606 pr_warn("ceph_object_locator::key is set\n");
2610 if (struct_v
>= 5) {
2611 bool changed
= false;
2613 len
= ceph_decode_32(p
);
2615 ceph_decode_need(p
, end
, len
, e_inval
);
2616 if (!oloc
->pool_ns
||
2617 ceph_compare_string(oloc
->pool_ns
, *p
, len
))
2625 /* redirect changes namespace */
2626 pr_warn("ceph_object_locator::nspace is changed\n");
2631 if (struct_v
>= 6) {
2632 s64 hash
= ceph_decode_64(p
);
2634 pr_warn("ceph_object_locator::hash is set\n");
2649 static int ceph_redirect_decode(void **p
, void *end
,
2650 struct ceph_request_redirect
*redir
)
2652 u8 struct_v
, struct_cv
;
2657 ceph_decode_need(p
, end
, 1 + 1 + 4, e_inval
);
2658 struct_v
= ceph_decode_8(p
);
2659 struct_cv
= ceph_decode_8(p
);
2660 if (struct_cv
> 1) {
2661 pr_warn("got v %d cv %d > 1 of ceph_request_redirect\n",
2662 struct_v
, struct_cv
);
2665 len
= ceph_decode_32(p
);
2666 ceph_decode_need(p
, end
, len
, e_inval
);
2667 struct_end
= *p
+ len
;
2669 ret
= ceph_oloc_decode(p
, end
, &redir
->oloc
);
2673 len
= ceph_decode_32(p
);
2675 pr_warn("ceph_request_redirect::object_name is set\n");
2679 len
= ceph_decode_32(p
);
2680 *p
+= len
; /* skip osd_instructions */
2692 struct MOSDOpReply
{
2693 struct ceph_pg pgid
;
2698 u32 outdata_len
[CEPH_OSD_MAX_OPS
];
2699 s32 rval
[CEPH_OSD_MAX_OPS
];
2701 struct ceph_eversion replay_version
;
2703 struct ceph_request_redirect redirect
;
2706 static int decode_MOSDOpReply(const struct ceph_msg
*msg
, struct MOSDOpReply
*m
)
2708 void *p
= msg
->front
.iov_base
;
2709 void *const end
= p
+ msg
->front
.iov_len
;
2710 u16 version
= le16_to_cpu(msg
->hdr
.version
);
2711 struct ceph_eversion bad_replay_version
;
2717 ceph_decode_32_safe(&p
, end
, len
, e_inval
);
2718 ceph_decode_need(&p
, end
, len
, e_inval
);
2719 p
+= len
; /* skip oid */
2721 ret
= ceph_decode_pgid(&p
, end
, &m
->pgid
);
2725 ceph_decode_64_safe(&p
, end
, m
->flags
, e_inval
);
2726 ceph_decode_32_safe(&p
, end
, m
->result
, e_inval
);
2727 ceph_decode_need(&p
, end
, sizeof(bad_replay_version
), e_inval
);
2728 memcpy(&bad_replay_version
, p
, sizeof(bad_replay_version
));
2729 p
+= sizeof(bad_replay_version
);
2730 ceph_decode_32_safe(&p
, end
, m
->epoch
, e_inval
);
2732 ceph_decode_32_safe(&p
, end
, m
->num_ops
, e_inval
);
2733 if (m
->num_ops
> ARRAY_SIZE(m
->outdata_len
))
2736 ceph_decode_need(&p
, end
, m
->num_ops
* sizeof(struct ceph_osd_op
),
2738 for (i
= 0; i
< m
->num_ops
; i
++) {
2739 struct ceph_osd_op
*op
= p
;
2741 m
->outdata_len
[i
] = le32_to_cpu(op
->payload_len
);
2745 ceph_decode_32_safe(&p
, end
, m
->retry_attempt
, e_inval
);
2746 for (i
= 0; i
< m
->num_ops
; i
++)
2747 ceph_decode_32_safe(&p
, end
, m
->rval
[i
], e_inval
);
2750 ceph_decode_need(&p
, end
, sizeof(m
->replay_version
), e_inval
);
2751 memcpy(&m
->replay_version
, p
, sizeof(m
->replay_version
));
2752 p
+= sizeof(m
->replay_version
);
2753 ceph_decode_64_safe(&p
, end
, m
->user_version
, e_inval
);
2755 m
->replay_version
= bad_replay_version
; /* struct */
2756 m
->user_version
= le64_to_cpu(m
->replay_version
.version
);
2761 ceph_decode_8_safe(&p
, end
, decode_redir
, e_inval
);
2769 ret
= ceph_redirect_decode(&p
, end
, &m
->redirect
);
2773 ceph_oloc_init(&m
->redirect
.oloc
);
2783 * We are done with @req if
2784 * - @m is a safe reply, or
2785 * - @m is an unsafe reply and we didn't want a safe one
2787 static bool done_request(const struct ceph_osd_request
*req
,
2788 const struct MOSDOpReply
*m
)
2790 return (m
->result
< 0 ||
2791 (m
->flags
& CEPH_OSD_FLAG_ONDISK
) ||
2792 !(req
->r_flags
& CEPH_OSD_FLAG_ONDISK
));
2796 * handle osd op reply. either call the callback if it is specified,
2797 * or do the completion to wake up the waiting thread.
2799 * ->r_unsafe_callback is set? yes no
2801 * first reply is OK (needed r_cb/r_completion, r_cb/r_completion,
2802 * any or needed/got safe) r_safe_completion r_safe_completion
2804 * first reply is unsafe r_unsafe_cb(true) (nothing)
2806 * when we get the safe reply r_unsafe_cb(false), r_cb/r_completion,
2807 * r_safe_completion r_safe_completion
2809 static void handle_reply(struct ceph_osd
*osd
, struct ceph_msg
*msg
)
2811 struct ceph_osd_client
*osdc
= osd
->o_osdc
;
2812 struct ceph_osd_request
*req
;
2813 struct MOSDOpReply m
;
2814 u64 tid
= le64_to_cpu(msg
->hdr
.tid
);
2820 dout("%s msg %p tid %llu\n", __func__
, msg
, tid
);
2822 down_read(&osdc
->lock
);
2823 if (!osd_registered(osd
)) {
2824 dout("%s osd%d unknown\n", __func__
, osd
->o_osd
);
2825 goto out_unlock_osdc
;
2827 WARN_ON(osd
->o_osd
!= le64_to_cpu(msg
->hdr
.src
.num
));
2829 mutex_lock(&osd
->lock
);
2830 req
= lookup_request(&osd
->o_requests
, tid
);
2832 dout("%s osd%d tid %llu unknown\n", __func__
, osd
->o_osd
, tid
);
2833 goto out_unlock_session
;
2836 m
.redirect
.oloc
.pool_ns
= req
->r_t
.target_oloc
.pool_ns
;
2837 ret
= decode_MOSDOpReply(msg
, &m
);
2838 m
.redirect
.oloc
.pool_ns
= NULL
;
2840 pr_err("failed to decode MOSDOpReply for tid %llu: %d\n",
2845 dout("%s req %p tid %llu flags 0x%llx pgid %llu.%x epoch %u attempt %d v %u'%llu uv %llu\n",
2846 __func__
, req
, req
->r_tid
, m
.flags
, m
.pgid
.pool
, m
.pgid
.seed
,
2847 m
.epoch
, m
.retry_attempt
, le32_to_cpu(m
.replay_version
.epoch
),
2848 le64_to_cpu(m
.replay_version
.version
), m
.user_version
);
2850 if (m
.retry_attempt
>= 0) {
2851 if (m
.retry_attempt
!= req
->r_attempts
- 1) {
2852 dout("req %p tid %llu retry_attempt %d != %d, ignoring\n",
2853 req
, req
->r_tid
, m
.retry_attempt
,
2854 req
->r_attempts
- 1);
2855 goto out_unlock_session
;
2858 WARN_ON(1); /* MOSDOpReply v4 is assumed */
2861 if (!ceph_oloc_empty(&m
.redirect
.oloc
)) {
2862 dout("req %p tid %llu redirect pool %lld\n", req
, req
->r_tid
,
2863 m
.redirect
.oloc
.pool
);
2864 unlink_request(osd
, req
);
2865 mutex_unlock(&osd
->lock
);
2868 * Not ceph_oloc_copy() - changing pool_ns is not
2871 req
->r_t
.target_oloc
.pool
= m
.redirect
.oloc
.pool
;
2872 req
->r_flags
|= CEPH_OSD_FLAG_REDIRECTED
;
2874 __submit_request(req
, false);
2875 goto out_unlock_osdc
;
2878 if (m
.num_ops
!= req
->r_num_ops
) {
2879 pr_err("num_ops %d != %d for tid %llu\n", m
.num_ops
,
2880 req
->r_num_ops
, req
->r_tid
);
2883 for (i
= 0; i
< req
->r_num_ops
; i
++) {
2884 dout(" req %p tid %llu op %d rval %d len %u\n", req
,
2885 req
->r_tid
, i
, m
.rval
[i
], m
.outdata_len
[i
]);
2886 req
->r_ops
[i
].rval
= m
.rval
[i
];
2887 req
->r_ops
[i
].outdata_len
= m
.outdata_len
[i
];
2888 data_len
+= m
.outdata_len
[i
];
2890 if (data_len
!= le32_to_cpu(msg
->hdr
.data_len
)) {
2891 pr_err("sum of lens %u != %u for tid %llu\n", data_len
,
2892 le32_to_cpu(msg
->hdr
.data_len
), req
->r_tid
);
2895 dout("%s req %p tid %llu acked %d result %d data_len %u\n", __func__
,
2896 req
, req
->r_tid
, req
->r_got_reply
, m
.result
, data_len
);
2898 already_acked
= req
->r_got_reply
;
2899 if (!already_acked
) {
2900 req
->r_result
= m
.result
?: data_len
;
2901 req
->r_replay_version
= m
.replay_version
; /* struct */
2902 req
->r_got_reply
= true;
2903 } else if (!(m
.flags
& CEPH_OSD_FLAG_ONDISK
)) {
2904 dout("req %p tid %llu dup ack\n", req
, req
->r_tid
);
2905 goto out_unlock_session
;
2908 if (done_request(req
, &m
)) {
2909 __finish_request(req
);
2910 if (req
->r_linger
) {
2911 WARN_ON(req
->r_unsafe_callback
);
2912 dout("req %p tid %llu cb (locked)\n", req
, req
->r_tid
);
2913 __complete_request(req
);
2917 mutex_unlock(&osd
->lock
);
2918 up_read(&osdc
->lock
);
2920 if (done_request(req
, &m
)) {
2921 if (already_acked
&& req
->r_unsafe_callback
) {
2922 dout("req %p tid %llu safe-cb\n", req
, req
->r_tid
);
2923 req
->r_unsafe_callback(req
, false);
2924 } else if (!req
->r_linger
) {
2925 dout("req %p tid %llu cb\n", req
, req
->r_tid
);
2926 __complete_request(req
);
2928 if (m
.flags
& CEPH_OSD_FLAG_ONDISK
)
2929 complete_all(&req
->r_safe_completion
);
2930 ceph_osdc_put_request(req
);
2932 if (req
->r_unsafe_callback
) {
2933 dout("req %p tid %llu unsafe-cb\n", req
, req
->r_tid
);
2934 req
->r_unsafe_callback(req
, true);
2943 complete_request(req
, -EIO
);
2945 mutex_unlock(&osd
->lock
);
2947 up_read(&osdc
->lock
);
2950 static void set_pool_was_full(struct ceph_osd_client
*osdc
)
2954 for (n
= rb_first(&osdc
->osdmap
->pg_pools
); n
; n
= rb_next(n
)) {
2955 struct ceph_pg_pool_info
*pi
=
2956 rb_entry(n
, struct ceph_pg_pool_info
, node
);
2958 pi
->was_full
= __pool_full(pi
);
2962 static bool pool_cleared_full(struct ceph_osd_client
*osdc
, s64 pool_id
)
2964 struct ceph_pg_pool_info
*pi
;
2966 pi
= ceph_pg_pool_by_id(osdc
->osdmap
, pool_id
);
2970 return pi
->was_full
&& !__pool_full(pi
);
2973 static enum calc_target_result
2974 recalc_linger_target(struct ceph_osd_linger_request
*lreq
)
2976 struct ceph_osd_client
*osdc
= lreq
->osdc
;
2977 enum calc_target_result ct_res
;
2979 ct_res
= calc_target(osdc
, &lreq
->t
, &lreq
->last_force_resend
, true);
2980 if (ct_res
== CALC_TARGET_NEED_RESEND
) {
2981 struct ceph_osd
*osd
;
2983 osd
= lookup_create_osd(osdc
, lreq
->t
.osd
, true);
2984 if (osd
!= lreq
->osd
) {
2985 unlink_linger(lreq
->osd
, lreq
);
2986 link_linger(osd
, lreq
);
2994 * Requeue requests whose mapping to an OSD has changed.
2996 static void scan_requests(struct ceph_osd
*osd
,
2999 bool check_pool_cleared_full
,
3000 struct rb_root
*need_resend
,
3001 struct list_head
*need_resend_linger
)
3003 struct ceph_osd_client
*osdc
= osd
->o_osdc
;
3005 bool force_resend_writes
;
3007 for (n
= rb_first(&osd
->o_linger_requests
); n
; ) {
3008 struct ceph_osd_linger_request
*lreq
=
3009 rb_entry(n
, struct ceph_osd_linger_request
, node
);
3010 enum calc_target_result ct_res
;
3012 n
= rb_next(n
); /* recalc_linger_target() */
3014 dout("%s lreq %p linger_id %llu\n", __func__
, lreq
,
3016 ct_res
= recalc_linger_target(lreq
);
3018 case CALC_TARGET_NO_ACTION
:
3019 force_resend_writes
= cleared_full
||
3020 (check_pool_cleared_full
&&
3021 pool_cleared_full(osdc
, lreq
->t
.base_oloc
.pool
));
3022 if (!force_resend
&& !force_resend_writes
)
3026 case CALC_TARGET_NEED_RESEND
:
3027 cancel_linger_map_check(lreq
);
3029 * scan_requests() for the previous epoch(s)
3030 * may have already added it to the list, since
3031 * it's not unlinked here.
3033 if (list_empty(&lreq
->scan_item
))
3034 list_add_tail(&lreq
->scan_item
, need_resend_linger
);
3036 case CALC_TARGET_POOL_DNE
:
3037 check_linger_pool_dne(lreq
);
3042 for (n
= rb_first(&osd
->o_requests
); n
; ) {
3043 struct ceph_osd_request
*req
=
3044 rb_entry(n
, struct ceph_osd_request
, r_node
);
3045 enum calc_target_result ct_res
;
3047 n
= rb_next(n
); /* unlink_request(), check_pool_dne() */
3049 dout("%s req %p tid %llu\n", __func__
, req
, req
->r_tid
);
3050 ct_res
= calc_target(osdc
, &req
->r_t
,
3051 &req
->r_last_force_resend
, false);
3053 case CALC_TARGET_NO_ACTION
:
3054 force_resend_writes
= cleared_full
||
3055 (check_pool_cleared_full
&&
3056 pool_cleared_full(osdc
, req
->r_t
.base_oloc
.pool
));
3057 if (!force_resend
&&
3058 (!(req
->r_flags
& CEPH_OSD_FLAG_WRITE
) ||
3059 !force_resend_writes
))
3063 case CALC_TARGET_NEED_RESEND
:
3064 cancel_map_check(req
);
3065 unlink_request(osd
, req
);
3066 insert_request(need_resend
, req
);
3068 case CALC_TARGET_POOL_DNE
:
3069 check_pool_dne(req
);
3075 static int handle_one_map(struct ceph_osd_client
*osdc
,
3076 void *p
, void *end
, bool incremental
,
3077 struct rb_root
*need_resend
,
3078 struct list_head
*need_resend_linger
)
3080 struct ceph_osdmap
*newmap
;
3082 bool skipped_map
= false;
3085 was_full
= ceph_osdmap_flag(osdc
, CEPH_OSDMAP_FULL
);
3086 set_pool_was_full(osdc
);
3089 newmap
= osdmap_apply_incremental(&p
, end
, osdc
->osdmap
);
3091 newmap
= ceph_osdmap_decode(&p
, end
);
3093 return PTR_ERR(newmap
);
3095 if (newmap
!= osdc
->osdmap
) {
3097 * Preserve ->was_full before destroying the old map.
3098 * For pools that weren't in the old map, ->was_full
3101 for (n
= rb_first(&newmap
->pg_pools
); n
; n
= rb_next(n
)) {
3102 struct ceph_pg_pool_info
*pi
=
3103 rb_entry(n
, struct ceph_pg_pool_info
, node
);
3104 struct ceph_pg_pool_info
*old_pi
;
3106 old_pi
= ceph_pg_pool_by_id(osdc
->osdmap
, pi
->id
);
3108 pi
->was_full
= old_pi
->was_full
;
3110 WARN_ON(pi
->was_full
);
3113 if (osdc
->osdmap
->epoch
&&
3114 osdc
->osdmap
->epoch
+ 1 < newmap
->epoch
) {
3115 WARN_ON(incremental
);
3119 ceph_osdmap_destroy(osdc
->osdmap
);
3120 osdc
->osdmap
= newmap
;
3123 was_full
&= !ceph_osdmap_flag(osdc
, CEPH_OSDMAP_FULL
);
3124 scan_requests(&osdc
->homeless_osd
, skipped_map
, was_full
, true,
3125 need_resend
, need_resend_linger
);
3127 for (n
= rb_first(&osdc
->osds
); n
; ) {
3128 struct ceph_osd
*osd
= rb_entry(n
, struct ceph_osd
, o_node
);
3130 n
= rb_next(n
); /* close_osd() */
3132 scan_requests(osd
, skipped_map
, was_full
, true, need_resend
,
3133 need_resend_linger
);
3134 if (!ceph_osd_is_up(osdc
->osdmap
, osd
->o_osd
) ||
3135 memcmp(&osd
->o_con
.peer_addr
,
3136 ceph_osd_addr(osdc
->osdmap
, osd
->o_osd
),
3137 sizeof(struct ceph_entity_addr
)))
3144 static void kick_requests(struct ceph_osd_client
*osdc
,
3145 struct rb_root
*need_resend
,
3146 struct list_head
*need_resend_linger
)
3148 struct ceph_osd_linger_request
*lreq
, *nlreq
;
3151 for (n
= rb_first(need_resend
); n
; ) {
3152 struct ceph_osd_request
*req
=
3153 rb_entry(n
, struct ceph_osd_request
, r_node
);
3154 struct ceph_osd
*osd
;
3157 erase_request(need_resend
, req
); /* before link_request() */
3159 WARN_ON(req
->r_osd
);
3160 calc_target(osdc
, &req
->r_t
, NULL
, false);
3161 osd
= lookup_create_osd(osdc
, req
->r_t
.osd
, true);
3162 link_request(osd
, req
);
3163 if (!req
->r_linger
) {
3164 if (!osd_homeless(osd
) && !req
->r_t
.paused
)
3167 cancel_linger_request(req
);
3171 list_for_each_entry_safe(lreq
, nlreq
, need_resend_linger
, scan_item
) {
3172 if (!osd_homeless(lreq
->osd
))
3175 list_del_init(&lreq
->scan_item
);
3180 * Process updated osd map.
3182 * The message contains any number of incremental and full maps, normally
3183 * indicating some sort of topology change in the cluster. Kick requests
3184 * off to different OSDs as needed.
3186 void ceph_osdc_handle_map(struct ceph_osd_client
*osdc
, struct ceph_msg
*msg
)
3188 void *p
= msg
->front
.iov_base
;
3189 void *const end
= p
+ msg
->front
.iov_len
;
3190 u32 nr_maps
, maplen
;
3192 struct ceph_fsid fsid
;
3193 struct rb_root need_resend
= RB_ROOT
;
3194 LIST_HEAD(need_resend_linger
);
3195 bool handled_incremental
= false;
3196 bool was_pauserd
, was_pausewr
;
3197 bool pauserd
, pausewr
;
3200 dout("%s have %u\n", __func__
, osdc
->osdmap
->epoch
);
3201 down_write(&osdc
->lock
);
3204 ceph_decode_need(&p
, end
, sizeof(fsid
), bad
);
3205 ceph_decode_copy(&p
, &fsid
, sizeof(fsid
));
3206 if (ceph_check_fsid(osdc
->client
, &fsid
) < 0)
3209 was_pauserd
= ceph_osdmap_flag(osdc
, CEPH_OSDMAP_PAUSERD
);
3210 was_pausewr
= ceph_osdmap_flag(osdc
, CEPH_OSDMAP_PAUSEWR
) ||
3211 ceph_osdmap_flag(osdc
, CEPH_OSDMAP_FULL
) ||
3212 have_pool_full(osdc
);
3214 /* incremental maps */
3215 ceph_decode_32_safe(&p
, end
, nr_maps
, bad
);
3216 dout(" %d inc maps\n", nr_maps
);
3217 while (nr_maps
> 0) {
3218 ceph_decode_need(&p
, end
, 2*sizeof(u32
), bad
);
3219 epoch
= ceph_decode_32(&p
);
3220 maplen
= ceph_decode_32(&p
);
3221 ceph_decode_need(&p
, end
, maplen
, bad
);
3222 if (osdc
->osdmap
->epoch
&&
3223 osdc
->osdmap
->epoch
+ 1 == epoch
) {
3224 dout("applying incremental map %u len %d\n",
3226 err
= handle_one_map(osdc
, p
, p
+ maplen
, true,
3227 &need_resend
, &need_resend_linger
);
3230 handled_incremental
= true;
3232 dout("ignoring incremental map %u len %d\n",
3238 if (handled_incremental
)
3242 ceph_decode_32_safe(&p
, end
, nr_maps
, bad
);
3243 dout(" %d full maps\n", nr_maps
);
3245 ceph_decode_need(&p
, end
, 2*sizeof(u32
), bad
);
3246 epoch
= ceph_decode_32(&p
);
3247 maplen
= ceph_decode_32(&p
);
3248 ceph_decode_need(&p
, end
, maplen
, bad
);
3250 dout("skipping non-latest full map %u len %d\n",
3252 } else if (osdc
->osdmap
->epoch
>= epoch
) {
3253 dout("skipping full map %u len %d, "
3254 "older than our %u\n", epoch
, maplen
,
3255 osdc
->osdmap
->epoch
);
3257 dout("taking full map %u len %d\n", epoch
, maplen
);
3258 err
= handle_one_map(osdc
, p
, p
+ maplen
, false,
3259 &need_resend
, &need_resend_linger
);
3269 * subscribe to subsequent osdmap updates if full to ensure
3270 * we find out when we are no longer full and stop returning
3273 pauserd
= ceph_osdmap_flag(osdc
, CEPH_OSDMAP_PAUSERD
);
3274 pausewr
= ceph_osdmap_flag(osdc
, CEPH_OSDMAP_PAUSEWR
) ||
3275 ceph_osdmap_flag(osdc
, CEPH_OSDMAP_FULL
) ||
3276 have_pool_full(osdc
);
3277 if (was_pauserd
|| was_pausewr
|| pauserd
|| pausewr
)
3278 maybe_request_map(osdc
);
3280 kick_requests(osdc
, &need_resend
, &need_resend_linger
);
3282 ceph_monc_got_map(&osdc
->client
->monc
, CEPH_SUB_OSDMAP
,
3283 osdc
->osdmap
->epoch
);
3284 up_write(&osdc
->lock
);
3285 wake_up_all(&osdc
->client
->auth_wq
);
3289 pr_err("osdc handle_map corrupt msg\n");
3291 up_write(&osdc
->lock
);
3295 * Resubmit requests pending on the given osd.
3297 static void kick_osd_requests(struct ceph_osd
*osd
)
3301 for (n
= rb_first(&osd
->o_requests
); n
; ) {
3302 struct ceph_osd_request
*req
=
3303 rb_entry(n
, struct ceph_osd_request
, r_node
);
3305 n
= rb_next(n
); /* cancel_linger_request() */
3307 if (!req
->r_linger
) {
3308 if (!req
->r_t
.paused
)
3311 cancel_linger_request(req
);
3314 for (n
= rb_first(&osd
->o_linger_requests
); n
; n
= rb_next(n
)) {
3315 struct ceph_osd_linger_request
*lreq
=
3316 rb_entry(n
, struct ceph_osd_linger_request
, node
);
3323 * If the osd connection drops, we need to resubmit all requests.
3325 static void osd_fault(struct ceph_connection
*con
)
3327 struct ceph_osd
*osd
= con
->private;
3328 struct ceph_osd_client
*osdc
= osd
->o_osdc
;
3330 dout("%s osd %p osd%d\n", __func__
, osd
, osd
->o_osd
);
3332 down_write(&osdc
->lock
);
3333 if (!osd_registered(osd
)) {
3334 dout("%s osd%d unknown\n", __func__
, osd
->o_osd
);
3338 if (!reopen_osd(osd
))
3339 kick_osd_requests(osd
);
3340 maybe_request_map(osdc
);
3343 up_write(&osdc
->lock
);
3347 * Process osd watch notifications
3349 static void handle_watch_notify(struct ceph_osd_client
*osdc
,
3350 struct ceph_msg
*msg
)
3352 void *p
= msg
->front
.iov_base
;
3353 void *const end
= p
+ msg
->front
.iov_len
;
3354 struct ceph_osd_linger_request
*lreq
;
3355 struct linger_work
*lwork
;
3356 u8 proto_ver
, opcode
;
3357 u64 cookie
, notify_id
;
3358 u64 notifier_id
= 0;
3359 s32 return_code
= 0;
3360 void *payload
= NULL
;
3361 u32 payload_len
= 0;
3363 ceph_decode_8_safe(&p
, end
, proto_ver
, bad
);
3364 ceph_decode_8_safe(&p
, end
, opcode
, bad
);
3365 ceph_decode_64_safe(&p
, end
, cookie
, bad
);
3366 p
+= 8; /* skip ver */
3367 ceph_decode_64_safe(&p
, end
, notify_id
, bad
);
3369 if (proto_ver
>= 1) {
3370 ceph_decode_32_safe(&p
, end
, payload_len
, bad
);
3371 ceph_decode_need(&p
, end
, payload_len
, bad
);
3376 if (le16_to_cpu(msg
->hdr
.version
) >= 2)
3377 ceph_decode_32_safe(&p
, end
, return_code
, bad
);
3379 if (le16_to_cpu(msg
->hdr
.version
) >= 3)
3380 ceph_decode_64_safe(&p
, end
, notifier_id
, bad
);
3382 down_read(&osdc
->lock
);
3383 lreq
= lookup_linger_osdc(&osdc
->linger_requests
, cookie
);
3385 dout("%s opcode %d cookie %llu dne\n", __func__
, opcode
,
3387 goto out_unlock_osdc
;
3390 mutex_lock(&lreq
->lock
);
3391 dout("%s opcode %d cookie %llu lreq %p is_watch %d\n", __func__
,
3392 opcode
, cookie
, lreq
, lreq
->is_watch
);
3393 if (opcode
== CEPH_WATCH_EVENT_DISCONNECT
) {
3394 if (!lreq
->last_error
) {
3395 lreq
->last_error
= -ENOTCONN
;
3396 queue_watch_error(lreq
);
3398 } else if (!lreq
->is_watch
) {
3399 /* CEPH_WATCH_EVENT_NOTIFY_COMPLETE */
3400 if (lreq
->notify_id
&& lreq
->notify_id
!= notify_id
) {
3401 dout("lreq %p notify_id %llu != %llu, ignoring\n", lreq
,
3402 lreq
->notify_id
, notify_id
);
3403 } else if (!completion_done(&lreq
->notify_finish_wait
)) {
3404 struct ceph_msg_data
*data
=
3405 list_first_entry_or_null(&msg
->data
,
3406 struct ceph_msg_data
,
3410 if (lreq
->preply_pages
) {
3411 WARN_ON(data
->type
!=
3412 CEPH_MSG_DATA_PAGES
);
3413 *lreq
->preply_pages
= data
->pages
;
3414 *lreq
->preply_len
= data
->length
;
3416 ceph_release_page_vector(data
->pages
,
3417 calc_pages_for(0, data
->length
));
3420 lreq
->notify_finish_error
= return_code
;
3421 complete_all(&lreq
->notify_finish_wait
);
3424 /* CEPH_WATCH_EVENT_NOTIFY */
3425 lwork
= lwork_alloc(lreq
, do_watch_notify
);
3427 pr_err("failed to allocate notify-lwork\n");
3428 goto out_unlock_lreq
;
3431 lwork
->notify
.notify_id
= notify_id
;
3432 lwork
->notify
.notifier_id
= notifier_id
;
3433 lwork
->notify
.payload
= payload
;
3434 lwork
->notify
.payload_len
= payload_len
;
3435 lwork
->notify
.msg
= ceph_msg_get(msg
);
3440 mutex_unlock(&lreq
->lock
);
3442 up_read(&osdc
->lock
);
3446 pr_err("osdc handle_watch_notify corrupt msg\n");
3450 * Register request, send initial attempt.
3452 int ceph_osdc_start_request(struct ceph_osd_client
*osdc
,
3453 struct ceph_osd_request
*req
,
3456 down_read(&osdc
->lock
);
3457 submit_request(req
, false);
3458 up_read(&osdc
->lock
);
3462 EXPORT_SYMBOL(ceph_osdc_start_request
);
3465 * Unregister a registered request. The request is not completed (i.e.
3466 * no callbacks or wakeups) - higher layers are supposed to know what
3467 * they are canceling.
3469 void ceph_osdc_cancel_request(struct ceph_osd_request
*req
)
3471 struct ceph_osd_client
*osdc
= req
->r_osdc
;
3473 down_write(&osdc
->lock
);
3475 cancel_request(req
);
3476 up_write(&osdc
->lock
);
3478 EXPORT_SYMBOL(ceph_osdc_cancel_request
);
3481 * @timeout: in jiffies, 0 means "wait forever"
3483 static int wait_request_timeout(struct ceph_osd_request
*req
,
3484 unsigned long timeout
)
3488 dout("%s req %p tid %llu\n", __func__
, req
, req
->r_tid
);
3489 left
= wait_for_completion_killable_timeout(&req
->r_completion
,
3490 ceph_timeout_jiffies(timeout
));
3492 left
= left
?: -ETIMEDOUT
;
3493 ceph_osdc_cancel_request(req
);
3495 /* kludge - need to to wake ceph_osdc_sync() */
3496 complete_all(&req
->r_safe_completion
);
3498 left
= req
->r_result
; /* completed */
3505 * wait for a request to complete
3507 int ceph_osdc_wait_request(struct ceph_osd_client
*osdc
,
3508 struct ceph_osd_request
*req
)
3510 return wait_request_timeout(req
, 0);
3512 EXPORT_SYMBOL(ceph_osdc_wait_request
);
3515 * sync - wait for all in-flight requests to flush. avoid starvation.
3517 void ceph_osdc_sync(struct ceph_osd_client
*osdc
)
3519 struct rb_node
*n
, *p
;
3520 u64 last_tid
= atomic64_read(&osdc
->last_tid
);
3523 down_read(&osdc
->lock
);
3524 for (n
= rb_first(&osdc
->osds
); n
; n
= rb_next(n
)) {
3525 struct ceph_osd
*osd
= rb_entry(n
, struct ceph_osd
, o_node
);
3527 mutex_lock(&osd
->lock
);
3528 for (p
= rb_first(&osd
->o_requests
); p
; p
= rb_next(p
)) {
3529 struct ceph_osd_request
*req
=
3530 rb_entry(p
, struct ceph_osd_request
, r_node
);
3532 if (req
->r_tid
> last_tid
)
3535 if (!(req
->r_flags
& CEPH_OSD_FLAG_WRITE
))
3538 ceph_osdc_get_request(req
);
3539 mutex_unlock(&osd
->lock
);
3540 up_read(&osdc
->lock
);
3541 dout("%s waiting on req %p tid %llu last_tid %llu\n",
3542 __func__
, req
, req
->r_tid
, last_tid
);
3543 wait_for_completion(&req
->r_safe_completion
);
3544 ceph_osdc_put_request(req
);
3548 mutex_unlock(&osd
->lock
);
3551 up_read(&osdc
->lock
);
3552 dout("%s done last_tid %llu\n", __func__
, last_tid
);
3554 EXPORT_SYMBOL(ceph_osdc_sync
);
3556 static struct ceph_osd_request
*
3557 alloc_linger_request(struct ceph_osd_linger_request
*lreq
)
3559 struct ceph_osd_request
*req
;
3561 req
= ceph_osdc_alloc_request(lreq
->osdc
, NULL
, 1, false, GFP_NOIO
);
3565 ceph_oid_copy(&req
->r_base_oid
, &lreq
->t
.base_oid
);
3566 ceph_oloc_copy(&req
->r_base_oloc
, &lreq
->t
.base_oloc
);
3568 if (ceph_osdc_alloc_messages(req
, GFP_NOIO
)) {
3569 ceph_osdc_put_request(req
);
3577 * Returns a handle, caller owns a ref.
3579 struct ceph_osd_linger_request
*
3580 ceph_osdc_watch(struct ceph_osd_client
*osdc
,
3581 struct ceph_object_id
*oid
,
3582 struct ceph_object_locator
*oloc
,
3583 rados_watchcb2_t wcb
,
3584 rados_watcherrcb_t errcb
,
3587 struct ceph_osd_linger_request
*lreq
;
3590 lreq
= linger_alloc(osdc
);
3592 return ERR_PTR(-ENOMEM
);
3594 lreq
->is_watch
= true;
3596 lreq
->errcb
= errcb
;
3598 lreq
->watch_valid_thru
= jiffies
;
3600 ceph_oid_copy(&lreq
->t
.base_oid
, oid
);
3601 ceph_oloc_copy(&lreq
->t
.base_oloc
, oloc
);
3602 lreq
->t
.flags
= CEPH_OSD_FLAG_WRITE
| CEPH_OSD_FLAG_ONDISK
;
3603 lreq
->mtime
= CURRENT_TIME
;
3605 lreq
->reg_req
= alloc_linger_request(lreq
);
3606 if (!lreq
->reg_req
) {
3611 lreq
->ping_req
= alloc_linger_request(lreq
);
3612 if (!lreq
->ping_req
) {
3617 down_write(&osdc
->lock
);
3618 linger_register(lreq
); /* before osd_req_op_* */
3619 osd_req_op_watch_init(lreq
->reg_req
, 0, lreq
->linger_id
,
3620 CEPH_OSD_WATCH_OP_WATCH
);
3621 osd_req_op_watch_init(lreq
->ping_req
, 0, lreq
->linger_id
,
3622 CEPH_OSD_WATCH_OP_PING
);
3623 linger_submit(lreq
);
3624 up_write(&osdc
->lock
);
3626 ret
= linger_reg_commit_wait(lreq
);
3628 linger_cancel(lreq
);
3636 return ERR_PTR(ret
);
3638 EXPORT_SYMBOL(ceph_osdc_watch
);
3643 * Times out after mount_timeout to preserve rbd unmap behaviour
3644 * introduced in 2894e1d76974 ("rbd: timeout watch teardown on unmap
3645 * with mount_timeout").
3647 int ceph_osdc_unwatch(struct ceph_osd_client
*osdc
,
3648 struct ceph_osd_linger_request
*lreq
)
3650 struct ceph_options
*opts
= osdc
->client
->options
;
3651 struct ceph_osd_request
*req
;
3654 req
= ceph_osdc_alloc_request(osdc
, NULL
, 1, false, GFP_NOIO
);
3658 ceph_oid_copy(&req
->r_base_oid
, &lreq
->t
.base_oid
);
3659 ceph_oloc_copy(&req
->r_base_oloc
, &lreq
->t
.base_oloc
);
3660 req
->r_flags
= CEPH_OSD_FLAG_WRITE
| CEPH_OSD_FLAG_ONDISK
;
3661 req
->r_mtime
= CURRENT_TIME
;
3662 osd_req_op_watch_init(req
, 0, lreq
->linger_id
,
3663 CEPH_OSD_WATCH_OP_UNWATCH
);
3665 ret
= ceph_osdc_alloc_messages(req
, GFP_NOIO
);
3669 ceph_osdc_start_request(osdc
, req
, false);
3670 linger_cancel(lreq
);
3672 ret
= wait_request_timeout(req
, opts
->mount_timeout
);
3675 ceph_osdc_put_request(req
);
3678 EXPORT_SYMBOL(ceph_osdc_unwatch
);
3680 static int osd_req_op_notify_ack_init(struct ceph_osd_request
*req
, int which
,
3681 u64 notify_id
, u64 cookie
, void *payload
,
3684 struct ceph_osd_req_op
*op
;
3685 struct ceph_pagelist
*pl
;
3688 op
= _osd_req_op_init(req
, which
, CEPH_OSD_OP_NOTIFY_ACK
, 0);
3690 pl
= kmalloc(sizeof(*pl
), GFP_NOIO
);
3694 ceph_pagelist_init(pl
);
3695 ret
= ceph_pagelist_encode_64(pl
, notify_id
);
3696 ret
|= ceph_pagelist_encode_64(pl
, cookie
);
3698 ret
|= ceph_pagelist_encode_32(pl
, payload_len
);
3699 ret
|= ceph_pagelist_append(pl
, payload
, payload_len
);
3701 ret
|= ceph_pagelist_encode_32(pl
, 0);
3704 ceph_pagelist_release(pl
);
3708 ceph_osd_data_pagelist_init(&op
->notify_ack
.request_data
, pl
);
3709 op
->indata_len
= pl
->length
;
3713 int ceph_osdc_notify_ack(struct ceph_osd_client
*osdc
,
3714 struct ceph_object_id
*oid
,
3715 struct ceph_object_locator
*oloc
,
3721 struct ceph_osd_request
*req
;
3724 req
= ceph_osdc_alloc_request(osdc
, NULL
, 1, false, GFP_NOIO
);
3728 ceph_oid_copy(&req
->r_base_oid
, oid
);
3729 ceph_oloc_copy(&req
->r_base_oloc
, oloc
);
3730 req
->r_flags
= CEPH_OSD_FLAG_READ
;
3732 ret
= ceph_osdc_alloc_messages(req
, GFP_NOIO
);
3736 ret
= osd_req_op_notify_ack_init(req
, 0, notify_id
, cookie
, payload
,
3741 ceph_osdc_start_request(osdc
, req
, false);
3742 ret
= ceph_osdc_wait_request(osdc
, req
);
3745 ceph_osdc_put_request(req
);
3748 EXPORT_SYMBOL(ceph_osdc_notify_ack
);
3750 static int osd_req_op_notify_init(struct ceph_osd_request
*req
, int which
,
3751 u64 cookie
, u32 prot_ver
, u32 timeout
,
3752 void *payload
, size_t payload_len
)
3754 struct ceph_osd_req_op
*op
;
3755 struct ceph_pagelist
*pl
;
3758 op
= _osd_req_op_init(req
, which
, CEPH_OSD_OP_NOTIFY
, 0);
3759 op
->notify
.cookie
= cookie
;
3761 pl
= kmalloc(sizeof(*pl
), GFP_NOIO
);
3765 ceph_pagelist_init(pl
);
3766 ret
= ceph_pagelist_encode_32(pl
, 1); /* prot_ver */
3767 ret
|= ceph_pagelist_encode_32(pl
, timeout
);
3768 ret
|= ceph_pagelist_encode_32(pl
, payload_len
);
3769 ret
|= ceph_pagelist_append(pl
, payload
, payload_len
);
3771 ceph_pagelist_release(pl
);
3775 ceph_osd_data_pagelist_init(&op
->notify
.request_data
, pl
);
3776 op
->indata_len
= pl
->length
;
3781 * @timeout: in seconds
3783 * @preply_{pages,len} are initialized both on success and error.
3784 * The caller is responsible for:
3786 * ceph_release_page_vector(reply_pages, calc_pages_for(0, reply_len))
3788 int ceph_osdc_notify(struct ceph_osd_client
*osdc
,
3789 struct ceph_object_id
*oid
,
3790 struct ceph_object_locator
*oloc
,
3794 struct page
***preply_pages
,
3797 struct ceph_osd_linger_request
*lreq
;
3798 struct page
**pages
;
3803 *preply_pages
= NULL
;
3807 lreq
= linger_alloc(osdc
);
3811 lreq
->preply_pages
= preply_pages
;
3812 lreq
->preply_len
= preply_len
;
3814 ceph_oid_copy(&lreq
->t
.base_oid
, oid
);
3815 ceph_oloc_copy(&lreq
->t
.base_oloc
, oloc
);
3816 lreq
->t
.flags
= CEPH_OSD_FLAG_READ
;
3818 lreq
->reg_req
= alloc_linger_request(lreq
);
3819 if (!lreq
->reg_req
) {
3825 pages
= ceph_alloc_page_vector(1, GFP_NOIO
);
3826 if (IS_ERR(pages
)) {
3827 ret
= PTR_ERR(pages
);
3831 down_write(&osdc
->lock
);
3832 linger_register(lreq
); /* before osd_req_op_* */
3833 ret
= osd_req_op_notify_init(lreq
->reg_req
, 0, lreq
->linger_id
, 1,
3834 timeout
, payload
, payload_len
);
3836 linger_unregister(lreq
);
3837 up_write(&osdc
->lock
);
3838 ceph_release_page_vector(pages
, 1);
3841 ceph_osd_data_pages_init(osd_req_op_data(lreq
->reg_req
, 0, notify
,
3843 pages
, PAGE_SIZE
, 0, false, true);
3844 linger_submit(lreq
);
3845 up_write(&osdc
->lock
);
3847 ret
= linger_reg_commit_wait(lreq
);
3849 ret
= linger_notify_finish_wait(lreq
);
3851 dout("lreq %p failed to initiate notify %d\n", lreq
, ret
);
3853 linger_cancel(lreq
);
3858 EXPORT_SYMBOL(ceph_osdc_notify
);
3861 * Return the number of milliseconds since the watch was last
3862 * confirmed, or an error. If there is an error, the watch is no
3863 * longer valid, and should be destroyed with ceph_osdc_unwatch().
3865 int ceph_osdc_watch_check(struct ceph_osd_client
*osdc
,
3866 struct ceph_osd_linger_request
*lreq
)
3868 unsigned long stamp
, age
;
3871 down_read(&osdc
->lock
);
3872 mutex_lock(&lreq
->lock
);
3873 stamp
= lreq
->watch_valid_thru
;
3874 if (!list_empty(&lreq
->pending_lworks
)) {
3875 struct linger_work
*lwork
=
3876 list_first_entry(&lreq
->pending_lworks
,
3880 if (time_before(lwork
->queued_stamp
, stamp
))
3881 stamp
= lwork
->queued_stamp
;
3883 age
= jiffies
- stamp
;
3884 dout("%s lreq %p linger_id %llu age %lu last_error %d\n", __func__
,
3885 lreq
, lreq
->linger_id
, age
, lreq
->last_error
);
3886 /* we are truncating to msecs, so return a safe upper bound */
3887 ret
= lreq
->last_error
?: 1 + jiffies_to_msecs(age
);
3889 mutex_unlock(&lreq
->lock
);
3890 up_read(&osdc
->lock
);
3895 * Call all pending notify callbacks - for use after a watch is
3896 * unregistered, to make sure no more callbacks for it will be invoked
3898 void ceph_osdc_flush_notifies(struct ceph_osd_client
*osdc
)
3900 flush_workqueue(osdc
->notify_wq
);
3902 EXPORT_SYMBOL(ceph_osdc_flush_notifies
);
3904 void ceph_osdc_maybe_request_map(struct ceph_osd_client
*osdc
)
3906 down_read(&osdc
->lock
);
3907 maybe_request_map(osdc
);
3908 up_read(&osdc
->lock
);
3910 EXPORT_SYMBOL(ceph_osdc_maybe_request_map
);
3915 int ceph_osdc_init(struct ceph_osd_client
*osdc
, struct ceph_client
*client
)
3920 osdc
->client
= client
;
3921 init_rwsem(&osdc
->lock
);
3922 osdc
->osds
= RB_ROOT
;
3923 INIT_LIST_HEAD(&osdc
->osd_lru
);
3924 spin_lock_init(&osdc
->osd_lru_lock
);
3925 osd_init(&osdc
->homeless_osd
);
3926 osdc
->homeless_osd
.o_osdc
= osdc
;
3927 osdc
->homeless_osd
.o_osd
= CEPH_HOMELESS_OSD
;
3928 osdc
->linger_requests
= RB_ROOT
;
3929 osdc
->map_checks
= RB_ROOT
;
3930 osdc
->linger_map_checks
= RB_ROOT
;
3931 INIT_DELAYED_WORK(&osdc
->timeout_work
, handle_timeout
);
3932 INIT_DELAYED_WORK(&osdc
->osds_timeout_work
, handle_osds_timeout
);
3935 osdc
->osdmap
= ceph_osdmap_alloc();
3939 osdc
->req_mempool
= mempool_create_slab_pool(10,
3940 ceph_osd_request_cache
);
3941 if (!osdc
->req_mempool
)
3944 err
= ceph_msgpool_init(&osdc
->msgpool_op
, CEPH_MSG_OSD_OP
,
3945 PAGE_SIZE
, 10, true, "osd_op");
3948 err
= ceph_msgpool_init(&osdc
->msgpool_op_reply
, CEPH_MSG_OSD_OPREPLY
,
3949 PAGE_SIZE
, 10, true, "osd_op_reply");
3954 osdc
->notify_wq
= create_singlethread_workqueue("ceph-watch-notify");
3955 if (!osdc
->notify_wq
)
3956 goto out_msgpool_reply
;
3958 schedule_delayed_work(&osdc
->timeout_work
,
3959 osdc
->client
->options
->osd_keepalive_timeout
);
3960 schedule_delayed_work(&osdc
->osds_timeout_work
,
3961 round_jiffies_relative(osdc
->client
->options
->osd_idle_ttl
));
3966 ceph_msgpool_destroy(&osdc
->msgpool_op_reply
);
3968 ceph_msgpool_destroy(&osdc
->msgpool_op
);
3970 mempool_destroy(osdc
->req_mempool
);
3972 ceph_osdmap_destroy(osdc
->osdmap
);
3977 void ceph_osdc_stop(struct ceph_osd_client
*osdc
)
3979 flush_workqueue(osdc
->notify_wq
);
3980 destroy_workqueue(osdc
->notify_wq
);
3981 cancel_delayed_work_sync(&osdc
->timeout_work
);
3982 cancel_delayed_work_sync(&osdc
->osds_timeout_work
);
3984 down_write(&osdc
->lock
);
3985 while (!RB_EMPTY_ROOT(&osdc
->osds
)) {
3986 struct ceph_osd
*osd
= rb_entry(rb_first(&osdc
->osds
),
3987 struct ceph_osd
, o_node
);
3990 up_write(&osdc
->lock
);
3991 WARN_ON(atomic_read(&osdc
->homeless_osd
.o_ref
) != 1);
3992 osd_cleanup(&osdc
->homeless_osd
);
3994 WARN_ON(!list_empty(&osdc
->osd_lru
));
3995 WARN_ON(!RB_EMPTY_ROOT(&osdc
->linger_requests
));
3996 WARN_ON(!RB_EMPTY_ROOT(&osdc
->map_checks
));
3997 WARN_ON(!RB_EMPTY_ROOT(&osdc
->linger_map_checks
));
3998 WARN_ON(atomic_read(&osdc
->num_requests
));
3999 WARN_ON(atomic_read(&osdc
->num_homeless
));
4001 ceph_osdmap_destroy(osdc
->osdmap
);
4002 mempool_destroy(osdc
->req_mempool
);
4003 ceph_msgpool_destroy(&osdc
->msgpool_op
);
4004 ceph_msgpool_destroy(&osdc
->msgpool_op_reply
);
4008 * Read some contiguous pages. If we cross a stripe boundary, shorten
4009 * *plen. Return number of bytes read, or error.
4011 int ceph_osdc_readpages(struct ceph_osd_client
*osdc
,
4012 struct ceph_vino vino
, struct ceph_file_layout
*layout
,
4014 u32 truncate_seq
, u64 truncate_size
,
4015 struct page
**pages
, int num_pages
, int page_align
)
4017 struct ceph_osd_request
*req
;
4020 dout("readpages on ino %llx.%llx on %llu~%llu\n", vino
.ino
,
4021 vino
.snap
, off
, *plen
);
4022 req
= ceph_osdc_new_request(osdc
, layout
, vino
, off
, plen
, 0, 1,
4023 CEPH_OSD_OP_READ
, CEPH_OSD_FLAG_READ
,
4024 NULL
, truncate_seq
, truncate_size
,
4027 return PTR_ERR(req
);
4029 /* it may be a short read due to an object boundary */
4030 osd_req_op_extent_osd_data_pages(req
, 0,
4031 pages
, *plen
, page_align
, false, false);
4033 dout("readpages final extent is %llu~%llu (%llu bytes align %d)\n",
4034 off
, *plen
, *plen
, page_align
);
4036 rc
= ceph_osdc_start_request(osdc
, req
, false);
4038 rc
= ceph_osdc_wait_request(osdc
, req
);
4040 ceph_osdc_put_request(req
);
4041 dout("readpages result %d\n", rc
);
4044 EXPORT_SYMBOL(ceph_osdc_readpages
);
4047 * do a synchronous write on N pages
4049 int ceph_osdc_writepages(struct ceph_osd_client
*osdc
, struct ceph_vino vino
,
4050 struct ceph_file_layout
*layout
,
4051 struct ceph_snap_context
*snapc
,
4053 u32 truncate_seq
, u64 truncate_size
,
4054 struct timespec
*mtime
,
4055 struct page
**pages
, int num_pages
)
4057 struct ceph_osd_request
*req
;
4059 int page_align
= off
& ~PAGE_MASK
;
4061 req
= ceph_osdc_new_request(osdc
, layout
, vino
, off
, &len
, 0, 1,
4063 CEPH_OSD_FLAG_ONDISK
| CEPH_OSD_FLAG_WRITE
,
4064 snapc
, truncate_seq
, truncate_size
,
4067 return PTR_ERR(req
);
4069 /* it may be a short write due to an object boundary */
4070 osd_req_op_extent_osd_data_pages(req
, 0, pages
, len
, page_align
,
4072 dout("writepages %llu~%llu (%llu bytes)\n", off
, len
, len
);
4074 req
->r_mtime
= *mtime
;
4075 rc
= ceph_osdc_start_request(osdc
, req
, true);
4077 rc
= ceph_osdc_wait_request(osdc
, req
);
4079 ceph_osdc_put_request(req
);
4082 dout("writepages result %d\n", rc
);
4085 EXPORT_SYMBOL(ceph_osdc_writepages
);
4087 int ceph_osdc_setup(void)
4089 size_t size
= sizeof(struct ceph_osd_request
) +
4090 CEPH_OSD_SLAB_OPS
* sizeof(struct ceph_osd_req_op
);
4092 BUG_ON(ceph_osd_request_cache
);
4093 ceph_osd_request_cache
= kmem_cache_create("ceph_osd_request", size
,
4096 return ceph_osd_request_cache
? 0 : -ENOMEM
;
4098 EXPORT_SYMBOL(ceph_osdc_setup
);
4100 void ceph_osdc_cleanup(void)
4102 BUG_ON(!ceph_osd_request_cache
);
4103 kmem_cache_destroy(ceph_osd_request_cache
);
4104 ceph_osd_request_cache
= NULL
;
4106 EXPORT_SYMBOL(ceph_osdc_cleanup
);
4109 * handle incoming message
4111 static void dispatch(struct ceph_connection
*con
, struct ceph_msg
*msg
)
4113 struct ceph_osd
*osd
= con
->private;
4114 struct ceph_osd_client
*osdc
= osd
->o_osdc
;
4115 int type
= le16_to_cpu(msg
->hdr
.type
);
4118 case CEPH_MSG_OSD_MAP
:
4119 ceph_osdc_handle_map(osdc
, msg
);
4121 case CEPH_MSG_OSD_OPREPLY
:
4122 handle_reply(osd
, msg
);
4124 case CEPH_MSG_WATCH_NOTIFY
:
4125 handle_watch_notify(osdc
, msg
);
4129 pr_err("received unknown message type %d %s\n", type
,
4130 ceph_msg_type_name(type
));
4137 * Lookup and return message for incoming reply. Don't try to do
4138 * anything about a larger than preallocated data portion of the
4139 * message at the moment - for now, just skip the message.
4141 static struct ceph_msg
*get_reply(struct ceph_connection
*con
,
4142 struct ceph_msg_header
*hdr
,
4145 struct ceph_osd
*osd
= con
->private;
4146 struct ceph_osd_client
*osdc
= osd
->o_osdc
;
4147 struct ceph_msg
*m
= NULL
;
4148 struct ceph_osd_request
*req
;
4149 int front_len
= le32_to_cpu(hdr
->front_len
);
4150 int data_len
= le32_to_cpu(hdr
->data_len
);
4151 u64 tid
= le64_to_cpu(hdr
->tid
);
4153 down_read(&osdc
->lock
);
4154 if (!osd_registered(osd
)) {
4155 dout("%s osd%d unknown, skipping\n", __func__
, osd
->o_osd
);
4157 goto out_unlock_osdc
;
4159 WARN_ON(osd
->o_osd
!= le64_to_cpu(hdr
->src
.num
));
4161 mutex_lock(&osd
->lock
);
4162 req
= lookup_request(&osd
->o_requests
, tid
);
4164 dout("%s osd%d tid %llu unknown, skipping\n", __func__
,
4167 goto out_unlock_session
;
4170 ceph_msg_revoke_incoming(req
->r_reply
);
4172 if (front_len
> req
->r_reply
->front_alloc_len
) {
4173 pr_warn("%s osd%d tid %llu front %d > preallocated %d\n",
4174 __func__
, osd
->o_osd
, req
->r_tid
, front_len
,
4175 req
->r_reply
->front_alloc_len
);
4176 m
= ceph_msg_new(CEPH_MSG_OSD_OPREPLY
, front_len
, GFP_NOFS
,
4179 goto out_unlock_session
;
4180 ceph_msg_put(req
->r_reply
);
4184 if (data_len
> req
->r_reply
->data_length
) {
4185 pr_warn("%s osd%d tid %llu data %d > preallocated %zu, skipping\n",
4186 __func__
, osd
->o_osd
, req
->r_tid
, data_len
,
4187 req
->r_reply
->data_length
);
4190 goto out_unlock_session
;
4193 m
= ceph_msg_get(req
->r_reply
);
4194 dout("get_reply tid %lld %p\n", tid
, m
);
4197 mutex_unlock(&osd
->lock
);
4199 up_read(&osdc
->lock
);
4204 * TODO: switch to a msg-owned pagelist
4206 static struct ceph_msg
*alloc_msg_with_page_vector(struct ceph_msg_header
*hdr
)
4209 int type
= le16_to_cpu(hdr
->type
);
4210 u32 front_len
= le32_to_cpu(hdr
->front_len
);
4211 u32 data_len
= le32_to_cpu(hdr
->data_len
);
4213 m
= ceph_msg_new(type
, front_len
, GFP_NOIO
, false);
4218 struct page
**pages
;
4219 struct ceph_osd_data osd_data
;
4221 pages
= ceph_alloc_page_vector(calc_pages_for(0, data_len
),
4223 if (IS_ERR(pages
)) {
4228 ceph_osd_data_pages_init(&osd_data
, pages
, data_len
, 0, false,
4230 ceph_osdc_msg_data_add(m
, &osd_data
);
4236 static struct ceph_msg
*alloc_msg(struct ceph_connection
*con
,
4237 struct ceph_msg_header
*hdr
,
4240 struct ceph_osd
*osd
= con
->private;
4241 int type
= le16_to_cpu(hdr
->type
);
4245 case CEPH_MSG_OSD_MAP
:
4246 case CEPH_MSG_WATCH_NOTIFY
:
4247 return alloc_msg_with_page_vector(hdr
);
4248 case CEPH_MSG_OSD_OPREPLY
:
4249 return get_reply(con
, hdr
, skip
);
4251 pr_warn("%s osd%d unknown msg type %d, skipping\n", __func__
,
4259 * Wrappers to refcount containing ceph_osd struct
4261 static struct ceph_connection
*get_osd_con(struct ceph_connection
*con
)
4263 struct ceph_osd
*osd
= con
->private;
4269 static void put_osd_con(struct ceph_connection
*con
)
4271 struct ceph_osd
*osd
= con
->private;
4279 * Note: returned pointer is the address of a structure that's
4280 * managed separately. Caller must *not* attempt to free it.
4282 static struct ceph_auth_handshake
*get_authorizer(struct ceph_connection
*con
,
4283 int *proto
, int force_new
)
4285 struct ceph_osd
*o
= con
->private;
4286 struct ceph_osd_client
*osdc
= o
->o_osdc
;
4287 struct ceph_auth_client
*ac
= osdc
->client
->monc
.auth
;
4288 struct ceph_auth_handshake
*auth
= &o
->o_auth
;
4290 if (force_new
&& auth
->authorizer
) {
4291 ceph_auth_destroy_authorizer(auth
->authorizer
);
4292 auth
->authorizer
= NULL
;
4294 if (!auth
->authorizer
) {
4295 int ret
= ceph_auth_create_authorizer(ac
, CEPH_ENTITY_TYPE_OSD
,
4298 return ERR_PTR(ret
);
4300 int ret
= ceph_auth_update_authorizer(ac
, CEPH_ENTITY_TYPE_OSD
,
4303 return ERR_PTR(ret
);
4305 *proto
= ac
->protocol
;
4311 static int verify_authorizer_reply(struct ceph_connection
*con
, int len
)
4313 struct ceph_osd
*o
= con
->private;
4314 struct ceph_osd_client
*osdc
= o
->o_osdc
;
4315 struct ceph_auth_client
*ac
= osdc
->client
->monc
.auth
;
4317 return ceph_auth_verify_authorizer_reply(ac
, o
->o_auth
.authorizer
, len
);
4320 static int invalidate_authorizer(struct ceph_connection
*con
)
4322 struct ceph_osd
*o
= con
->private;
4323 struct ceph_osd_client
*osdc
= o
->o_osdc
;
4324 struct ceph_auth_client
*ac
= osdc
->client
->monc
.auth
;
4326 ceph_auth_invalidate_authorizer(ac
, CEPH_ENTITY_TYPE_OSD
);
4327 return ceph_monc_validate_auth(&osdc
->client
->monc
);
4330 static int osd_sign_message(struct ceph_msg
*msg
)
4332 struct ceph_osd
*o
= msg
->con
->private;
4333 struct ceph_auth_handshake
*auth
= &o
->o_auth
;
4335 return ceph_auth_sign_message(auth
, msg
);
4338 static int osd_check_message_signature(struct ceph_msg
*msg
)
4340 struct ceph_osd
*o
= msg
->con
->private;
4341 struct ceph_auth_handshake
*auth
= &o
->o_auth
;
4343 return ceph_auth_check_message_signature(auth
, msg
);
4346 static const struct ceph_connection_operations osd_con_ops
= {
4349 .dispatch
= dispatch
,
4350 .get_authorizer
= get_authorizer
,
4351 .verify_authorizer_reply
= verify_authorizer_reply
,
4352 .invalidate_authorizer
= invalidate_authorizer
,
4353 .alloc_msg
= alloc_msg
,
4354 .sign_message
= osd_sign_message
,
4355 .check_message_signature
= osd_check_message_signature
,