1 // SPDX-License-Identifier: GPL-2.0
3 #include <linux/ceph/ceph_debug.h>
5 #include <linux/module.h>
7 #include <linux/highmem.h>
9 #include <linux/pagemap.h>
10 #include <linux/slab.h>
11 #include <linux/uaccess.h>
13 #include <linux/bio.h>
16 #include <linux/ceph/ceph_features.h>
17 #include <linux/ceph/libceph.h>
18 #include <linux/ceph/osd_client.h>
19 #include <linux/ceph/messenger.h>
20 #include <linux/ceph/decode.h>
21 #include <linux/ceph/auth.h>
22 #include <linux/ceph/pagelist.h>
24 #define OSD_OPREPLY_FRONT_LEN 512
26 static struct kmem_cache
*ceph_osd_request_cache
;
28 static const struct ceph_connection_operations osd_con_ops
;
31 * Implement client access to distributed object storage cluster.
33 * All data objects are stored within a cluster/cloud of OSDs, or
34 * "object storage devices." (Note that Ceph OSDs have _nothing_ to
35 * do with the T10 OSD extensions to SCSI.) Ceph OSDs are simply
36 * remote daemons serving up and coordinating consistent and safe
39 * Cluster membership and the mapping of data objects onto storage devices
40 * are described by the osd map.
42 * We keep track of pending OSD requests (read, write), resubmit
43 * requests to different OSDs when the cluster topology/data layout
44 * change, or retry the affected requests when the communications
45 * channel with an OSD is reset.
48 static void link_request(struct ceph_osd
*osd
, struct ceph_osd_request
*req
);
49 static void unlink_request(struct ceph_osd
*osd
, struct ceph_osd_request
*req
);
50 static void link_linger(struct ceph_osd
*osd
,
51 struct ceph_osd_linger_request
*lreq
);
52 static void unlink_linger(struct ceph_osd
*osd
,
53 struct ceph_osd_linger_request
*lreq
);
54 static void clear_backoffs(struct ceph_osd
*osd
);
57 static inline bool rwsem_is_wrlocked(struct rw_semaphore
*sem
)
61 if (unlikely(down_read_trylock(sem
))) {
68 static inline void verify_osdc_locked(struct ceph_osd_client
*osdc
)
70 WARN_ON(!rwsem_is_locked(&osdc
->lock
));
72 static inline void verify_osdc_wrlocked(struct ceph_osd_client
*osdc
)
74 WARN_ON(!rwsem_is_wrlocked(&osdc
->lock
));
76 static inline void verify_osd_locked(struct ceph_osd
*osd
)
78 struct ceph_osd_client
*osdc
= osd
->o_osdc
;
80 WARN_ON(!(mutex_is_locked(&osd
->lock
) &&
81 rwsem_is_locked(&osdc
->lock
)) &&
82 !rwsem_is_wrlocked(&osdc
->lock
));
84 static inline void verify_lreq_locked(struct ceph_osd_linger_request
*lreq
)
86 WARN_ON(!mutex_is_locked(&lreq
->lock
));
89 static inline void verify_osdc_locked(struct ceph_osd_client
*osdc
) { }
90 static inline void verify_osdc_wrlocked(struct ceph_osd_client
*osdc
) { }
91 static inline void verify_osd_locked(struct ceph_osd
*osd
) { }
92 static inline void verify_lreq_locked(struct ceph_osd_linger_request
*lreq
) { }
96 * calculate the mapping of a file extent onto an object, and fill out the
97 * request accordingly. shorten extent as necessary if it crosses an
100 * fill osd op in request message.
102 static int calc_layout(struct ceph_file_layout
*layout
, u64 off
, u64
*plen
,
103 u64
*objnum
, u64
*objoff
, u64
*objlen
)
105 u64 orig_len
= *plen
;
109 r
= ceph_calc_file_object_mapping(layout
, off
, orig_len
, objnum
,
113 if (*objlen
< orig_len
) {
115 dout(" skipping last %llu, final file extent %llu~%llu\n",
116 orig_len
- *plen
, off
, *plen
);
119 dout("calc_layout objnum=%llx %llu~%llu\n", *objnum
, *objoff
, *objlen
);
124 static void ceph_osd_data_init(struct ceph_osd_data
*osd_data
)
126 memset(osd_data
, 0, sizeof (*osd_data
));
127 osd_data
->type
= CEPH_OSD_DATA_TYPE_NONE
;
130 static void ceph_osd_data_pages_init(struct ceph_osd_data
*osd_data
,
131 struct page
**pages
, u64 length
, u32 alignment
,
132 bool pages_from_pool
, bool own_pages
)
134 osd_data
->type
= CEPH_OSD_DATA_TYPE_PAGES
;
135 osd_data
->pages
= pages
;
136 osd_data
->length
= length
;
137 osd_data
->alignment
= alignment
;
138 osd_data
->pages_from_pool
= pages_from_pool
;
139 osd_data
->own_pages
= own_pages
;
142 static void ceph_osd_data_pagelist_init(struct ceph_osd_data
*osd_data
,
143 struct ceph_pagelist
*pagelist
)
145 osd_data
->type
= CEPH_OSD_DATA_TYPE_PAGELIST
;
146 osd_data
->pagelist
= pagelist
;
150 static void ceph_osd_data_bio_init(struct ceph_osd_data
*osd_data
,
151 struct bio
*bio
, size_t bio_length
)
153 osd_data
->type
= CEPH_OSD_DATA_TYPE_BIO
;
155 osd_data
->bio_length
= bio_length
;
157 #endif /* CONFIG_BLOCK */
159 #define osd_req_op_data(oreq, whch, typ, fld) \
161 struct ceph_osd_request *__oreq = (oreq); \
162 unsigned int __whch = (whch); \
163 BUG_ON(__whch >= __oreq->r_num_ops); \
164 &__oreq->r_ops[__whch].typ.fld; \
167 static struct ceph_osd_data
*
168 osd_req_op_raw_data_in(struct ceph_osd_request
*osd_req
, unsigned int which
)
170 BUG_ON(which
>= osd_req
->r_num_ops
);
172 return &osd_req
->r_ops
[which
].raw_data_in
;
175 struct ceph_osd_data
*
176 osd_req_op_extent_osd_data(struct ceph_osd_request
*osd_req
,
179 return osd_req_op_data(osd_req
, which
, extent
, osd_data
);
181 EXPORT_SYMBOL(osd_req_op_extent_osd_data
);
183 void osd_req_op_raw_data_in_pages(struct ceph_osd_request
*osd_req
,
184 unsigned int which
, struct page
**pages
,
185 u64 length
, u32 alignment
,
186 bool pages_from_pool
, bool own_pages
)
188 struct ceph_osd_data
*osd_data
;
190 osd_data
= osd_req_op_raw_data_in(osd_req
, which
);
191 ceph_osd_data_pages_init(osd_data
, pages
, length
, alignment
,
192 pages_from_pool
, own_pages
);
194 EXPORT_SYMBOL(osd_req_op_raw_data_in_pages
);
196 void osd_req_op_extent_osd_data_pages(struct ceph_osd_request
*osd_req
,
197 unsigned int which
, struct page
**pages
,
198 u64 length
, u32 alignment
,
199 bool pages_from_pool
, bool own_pages
)
201 struct ceph_osd_data
*osd_data
;
203 osd_data
= osd_req_op_data(osd_req
, which
, extent
, osd_data
);
204 ceph_osd_data_pages_init(osd_data
, pages
, length
, alignment
,
205 pages_from_pool
, own_pages
);
207 EXPORT_SYMBOL(osd_req_op_extent_osd_data_pages
);
209 void osd_req_op_extent_osd_data_pagelist(struct ceph_osd_request
*osd_req
,
210 unsigned int which
, struct ceph_pagelist
*pagelist
)
212 struct ceph_osd_data
*osd_data
;
214 osd_data
= osd_req_op_data(osd_req
, which
, extent
, osd_data
);
215 ceph_osd_data_pagelist_init(osd_data
, pagelist
);
217 EXPORT_SYMBOL(osd_req_op_extent_osd_data_pagelist
);
220 void osd_req_op_extent_osd_data_bio(struct ceph_osd_request
*osd_req
,
221 unsigned int which
, struct bio
*bio
, size_t bio_length
)
223 struct ceph_osd_data
*osd_data
;
225 osd_data
= osd_req_op_data(osd_req
, which
, extent
, osd_data
);
226 ceph_osd_data_bio_init(osd_data
, bio
, bio_length
);
228 EXPORT_SYMBOL(osd_req_op_extent_osd_data_bio
);
229 #endif /* CONFIG_BLOCK */
231 static void osd_req_op_cls_request_info_pagelist(
232 struct ceph_osd_request
*osd_req
,
233 unsigned int which
, struct ceph_pagelist
*pagelist
)
235 struct ceph_osd_data
*osd_data
;
237 osd_data
= osd_req_op_data(osd_req
, which
, cls
, request_info
);
238 ceph_osd_data_pagelist_init(osd_data
, pagelist
);
241 void osd_req_op_cls_request_data_pagelist(
242 struct ceph_osd_request
*osd_req
,
243 unsigned int which
, struct ceph_pagelist
*pagelist
)
245 struct ceph_osd_data
*osd_data
;
247 osd_data
= osd_req_op_data(osd_req
, which
, cls
, request_data
);
248 ceph_osd_data_pagelist_init(osd_data
, pagelist
);
249 osd_req
->r_ops
[which
].cls
.indata_len
+= pagelist
->length
;
250 osd_req
->r_ops
[which
].indata_len
+= pagelist
->length
;
252 EXPORT_SYMBOL(osd_req_op_cls_request_data_pagelist
);
254 void osd_req_op_cls_request_data_pages(struct ceph_osd_request
*osd_req
,
255 unsigned int which
, struct page
**pages
, u64 length
,
256 u32 alignment
, bool pages_from_pool
, bool own_pages
)
258 struct ceph_osd_data
*osd_data
;
260 osd_data
= osd_req_op_data(osd_req
, which
, cls
, request_data
);
261 ceph_osd_data_pages_init(osd_data
, pages
, length
, alignment
,
262 pages_from_pool
, own_pages
);
263 osd_req
->r_ops
[which
].cls
.indata_len
+= length
;
264 osd_req
->r_ops
[which
].indata_len
+= length
;
266 EXPORT_SYMBOL(osd_req_op_cls_request_data_pages
);
268 void osd_req_op_cls_response_data_pages(struct ceph_osd_request
*osd_req
,
269 unsigned int which
, struct page
**pages
, u64 length
,
270 u32 alignment
, bool pages_from_pool
, bool own_pages
)
272 struct ceph_osd_data
*osd_data
;
274 osd_data
= osd_req_op_data(osd_req
, which
, cls
, response_data
);
275 ceph_osd_data_pages_init(osd_data
, pages
, length
, alignment
,
276 pages_from_pool
, own_pages
);
278 EXPORT_SYMBOL(osd_req_op_cls_response_data_pages
);
280 static u64
ceph_osd_data_length(struct ceph_osd_data
*osd_data
)
282 switch (osd_data
->type
) {
283 case CEPH_OSD_DATA_TYPE_NONE
:
285 case CEPH_OSD_DATA_TYPE_PAGES
:
286 return osd_data
->length
;
287 case CEPH_OSD_DATA_TYPE_PAGELIST
:
288 return (u64
)osd_data
->pagelist
->length
;
290 case CEPH_OSD_DATA_TYPE_BIO
:
291 return (u64
)osd_data
->bio_length
;
292 #endif /* CONFIG_BLOCK */
294 WARN(true, "unrecognized data type %d\n", (int)osd_data
->type
);
299 static void ceph_osd_data_release(struct ceph_osd_data
*osd_data
)
301 if (osd_data
->type
== CEPH_OSD_DATA_TYPE_PAGES
&& osd_data
->own_pages
) {
304 num_pages
= calc_pages_for((u64
)osd_data
->alignment
,
305 (u64
)osd_data
->length
);
306 ceph_release_page_vector(osd_data
->pages
, num_pages
);
308 ceph_osd_data_init(osd_data
);
311 static void osd_req_op_data_release(struct ceph_osd_request
*osd_req
,
314 struct ceph_osd_req_op
*op
;
316 BUG_ON(which
>= osd_req
->r_num_ops
);
317 op
= &osd_req
->r_ops
[which
];
320 case CEPH_OSD_OP_READ
:
321 case CEPH_OSD_OP_WRITE
:
322 case CEPH_OSD_OP_WRITEFULL
:
323 ceph_osd_data_release(&op
->extent
.osd_data
);
325 case CEPH_OSD_OP_CALL
:
326 ceph_osd_data_release(&op
->cls
.request_info
);
327 ceph_osd_data_release(&op
->cls
.request_data
);
328 ceph_osd_data_release(&op
->cls
.response_data
);
330 case CEPH_OSD_OP_SETXATTR
:
331 case CEPH_OSD_OP_CMPXATTR
:
332 ceph_osd_data_release(&op
->xattr
.osd_data
);
334 case CEPH_OSD_OP_STAT
:
335 ceph_osd_data_release(&op
->raw_data_in
);
337 case CEPH_OSD_OP_NOTIFY_ACK
:
338 ceph_osd_data_release(&op
->notify_ack
.request_data
);
340 case CEPH_OSD_OP_NOTIFY
:
341 ceph_osd_data_release(&op
->notify
.request_data
);
342 ceph_osd_data_release(&op
->notify
.response_data
);
344 case CEPH_OSD_OP_LIST_WATCHERS
:
345 ceph_osd_data_release(&op
->list_watchers
.response_data
);
353 * Assumes @t is zero-initialized.
355 static void target_init(struct ceph_osd_request_target
*t
)
357 ceph_oid_init(&t
->base_oid
);
358 ceph_oloc_init(&t
->base_oloc
);
359 ceph_oid_init(&t
->target_oid
);
360 ceph_oloc_init(&t
->target_oloc
);
362 ceph_osds_init(&t
->acting
);
363 ceph_osds_init(&t
->up
);
367 t
->osd
= CEPH_HOMELESS_OSD
;
370 static void target_copy(struct ceph_osd_request_target
*dest
,
371 const struct ceph_osd_request_target
*src
)
373 ceph_oid_copy(&dest
->base_oid
, &src
->base_oid
);
374 ceph_oloc_copy(&dest
->base_oloc
, &src
->base_oloc
);
375 ceph_oid_copy(&dest
->target_oid
, &src
->target_oid
);
376 ceph_oloc_copy(&dest
->target_oloc
, &src
->target_oloc
);
378 dest
->pgid
= src
->pgid
; /* struct */
379 dest
->spgid
= src
->spgid
; /* struct */
380 dest
->pg_num
= src
->pg_num
;
381 dest
->pg_num_mask
= src
->pg_num_mask
;
382 ceph_osds_copy(&dest
->acting
, &src
->acting
);
383 ceph_osds_copy(&dest
->up
, &src
->up
);
384 dest
->size
= src
->size
;
385 dest
->min_size
= src
->min_size
;
386 dest
->sort_bitwise
= src
->sort_bitwise
;
388 dest
->flags
= src
->flags
;
389 dest
->paused
= src
->paused
;
391 dest
->epoch
= src
->epoch
;
392 dest
->last_force_resend
= src
->last_force_resend
;
394 dest
->osd
= src
->osd
;
397 static void target_destroy(struct ceph_osd_request_target
*t
)
399 ceph_oid_destroy(&t
->base_oid
);
400 ceph_oloc_destroy(&t
->base_oloc
);
401 ceph_oid_destroy(&t
->target_oid
);
402 ceph_oloc_destroy(&t
->target_oloc
);
408 static void request_release_checks(struct ceph_osd_request
*req
)
410 WARN_ON(!RB_EMPTY_NODE(&req
->r_node
));
411 WARN_ON(!RB_EMPTY_NODE(&req
->r_mc_node
));
412 WARN_ON(!list_empty(&req
->r_unsafe_item
));
416 static void ceph_osdc_release_request(struct kref
*kref
)
418 struct ceph_osd_request
*req
= container_of(kref
,
419 struct ceph_osd_request
, r_kref
);
422 dout("%s %p (r_request %p r_reply %p)\n", __func__
, req
,
423 req
->r_request
, req
->r_reply
);
424 request_release_checks(req
);
427 ceph_msg_put(req
->r_request
);
429 ceph_msg_put(req
->r_reply
);
431 for (which
= 0; which
< req
->r_num_ops
; which
++)
432 osd_req_op_data_release(req
, which
);
434 target_destroy(&req
->r_t
);
435 ceph_put_snap_context(req
->r_snapc
);
438 mempool_free(req
, req
->r_osdc
->req_mempool
);
439 else if (req
->r_num_ops
<= CEPH_OSD_SLAB_OPS
)
440 kmem_cache_free(ceph_osd_request_cache
, req
);
445 void ceph_osdc_get_request(struct ceph_osd_request
*req
)
447 dout("%s %p (was %d)\n", __func__
, req
,
448 kref_read(&req
->r_kref
));
449 kref_get(&req
->r_kref
);
451 EXPORT_SYMBOL(ceph_osdc_get_request
);
453 void ceph_osdc_put_request(struct ceph_osd_request
*req
)
456 dout("%s %p (was %d)\n", __func__
, req
,
457 kref_read(&req
->r_kref
));
458 kref_put(&req
->r_kref
, ceph_osdc_release_request
);
461 EXPORT_SYMBOL(ceph_osdc_put_request
);
463 static void request_init(struct ceph_osd_request
*req
)
465 /* req only, each op is zeroed in _osd_req_op_init() */
466 memset(req
, 0, sizeof(*req
));
468 kref_init(&req
->r_kref
);
469 init_completion(&req
->r_completion
);
470 RB_CLEAR_NODE(&req
->r_node
);
471 RB_CLEAR_NODE(&req
->r_mc_node
);
472 INIT_LIST_HEAD(&req
->r_unsafe_item
);
474 target_init(&req
->r_t
);
478 * This is ugly, but it allows us to reuse linger registration and ping
479 * requests, keeping the structure of the code around send_linger{_ping}()
480 * reasonable. Setting up a min_nr=2 mempool for each linger request
481 * and dealing with copying ops (this blasts req only, watch op remains
482 * intact) isn't any better.
484 static void request_reinit(struct ceph_osd_request
*req
)
486 struct ceph_osd_client
*osdc
= req
->r_osdc
;
487 bool mempool
= req
->r_mempool
;
488 unsigned int num_ops
= req
->r_num_ops
;
489 u64 snapid
= req
->r_snapid
;
490 struct ceph_snap_context
*snapc
= req
->r_snapc
;
491 bool linger
= req
->r_linger
;
492 struct ceph_msg
*request_msg
= req
->r_request
;
493 struct ceph_msg
*reply_msg
= req
->r_reply
;
495 dout("%s req %p\n", __func__
, req
);
496 WARN_ON(kref_read(&req
->r_kref
) != 1);
497 request_release_checks(req
);
499 WARN_ON(kref_read(&request_msg
->kref
) != 1);
500 WARN_ON(kref_read(&reply_msg
->kref
) != 1);
501 target_destroy(&req
->r_t
);
505 req
->r_mempool
= mempool
;
506 req
->r_num_ops
= num_ops
;
507 req
->r_snapid
= snapid
;
508 req
->r_snapc
= snapc
;
509 req
->r_linger
= linger
;
510 req
->r_request
= request_msg
;
511 req
->r_reply
= reply_msg
;
514 struct ceph_osd_request
*ceph_osdc_alloc_request(struct ceph_osd_client
*osdc
,
515 struct ceph_snap_context
*snapc
,
516 unsigned int num_ops
,
520 struct ceph_osd_request
*req
;
523 BUG_ON(num_ops
> CEPH_OSD_SLAB_OPS
);
524 req
= mempool_alloc(osdc
->req_mempool
, gfp_flags
);
525 } else if (num_ops
<= CEPH_OSD_SLAB_OPS
) {
526 req
= kmem_cache_alloc(ceph_osd_request_cache
, gfp_flags
);
528 BUG_ON(num_ops
> CEPH_OSD_MAX_OPS
);
529 req
= kmalloc(sizeof(*req
) + num_ops
* sizeof(req
->r_ops
[0]),
537 req
->r_mempool
= use_mempool
;
538 req
->r_num_ops
= num_ops
;
539 req
->r_snapid
= CEPH_NOSNAP
;
540 req
->r_snapc
= ceph_get_snap_context(snapc
);
542 dout("%s req %p\n", __func__
, req
);
545 EXPORT_SYMBOL(ceph_osdc_alloc_request
);
547 static int ceph_oloc_encoding_size(const struct ceph_object_locator
*oloc
)
549 return 8 + 4 + 4 + 4 + (oloc
->pool_ns
? oloc
->pool_ns
->len
: 0);
552 int ceph_osdc_alloc_messages(struct ceph_osd_request
*req
, gfp_t gfp
)
554 struct ceph_osd_client
*osdc
= req
->r_osdc
;
555 struct ceph_msg
*msg
;
558 WARN_ON(ceph_oid_empty(&req
->r_base_oid
));
559 WARN_ON(ceph_oloc_empty(&req
->r_base_oloc
));
561 /* create request message */
562 msg_size
= CEPH_ENCODING_START_BLK_LEN
+
563 CEPH_PGID_ENCODING_LEN
+ 1; /* spgid */
564 msg_size
+= 4 + 4 + 4; /* hash, osdmap_epoch, flags */
565 msg_size
+= CEPH_ENCODING_START_BLK_LEN
+
566 sizeof(struct ceph_osd_reqid
); /* reqid */
567 msg_size
+= sizeof(struct ceph_blkin_trace_info
); /* trace */
568 msg_size
+= 4 + sizeof(struct ceph_timespec
); /* client_inc, mtime */
569 msg_size
+= CEPH_ENCODING_START_BLK_LEN
+
570 ceph_oloc_encoding_size(&req
->r_base_oloc
); /* oloc */
571 msg_size
+= 4 + req
->r_base_oid
.name_len
; /* oid */
572 msg_size
+= 2 + req
->r_num_ops
* sizeof(struct ceph_osd_op
);
573 msg_size
+= 8; /* snapid */
574 msg_size
+= 8; /* snap_seq */
575 msg_size
+= 4 + 8 * (req
->r_snapc
? req
->r_snapc
->num_snaps
: 0);
576 msg_size
+= 4 + 8; /* retry_attempt, features */
579 msg
= ceph_msgpool_get(&osdc
->msgpool_op
, 0);
581 msg
= ceph_msg_new(CEPH_MSG_OSD_OP
, msg_size
, gfp
, true);
585 memset(msg
->front
.iov_base
, 0, msg
->front
.iov_len
);
586 req
->r_request
= msg
;
588 /* create reply message */
589 msg_size
= OSD_OPREPLY_FRONT_LEN
;
590 msg_size
+= req
->r_base_oid
.name_len
;
591 msg_size
+= req
->r_num_ops
* sizeof(struct ceph_osd_op
);
594 msg
= ceph_msgpool_get(&osdc
->msgpool_op_reply
, 0);
596 msg
= ceph_msg_new(CEPH_MSG_OSD_OPREPLY
, msg_size
, gfp
, true);
604 EXPORT_SYMBOL(ceph_osdc_alloc_messages
);
606 static bool osd_req_opcode_valid(u16 opcode
)
609 #define GENERATE_CASE(op, opcode, str) case CEPH_OSD_OP_##op: return true;
610 __CEPH_FORALL_OSD_OPS(GENERATE_CASE
)
618 * This is an osd op init function for opcodes that have no data or
619 * other information associated with them. It also serves as a
620 * common init routine for all the other init functions, below.
622 static struct ceph_osd_req_op
*
623 _osd_req_op_init(struct ceph_osd_request
*osd_req
, unsigned int which
,
624 u16 opcode
, u32 flags
)
626 struct ceph_osd_req_op
*op
;
628 BUG_ON(which
>= osd_req
->r_num_ops
);
629 BUG_ON(!osd_req_opcode_valid(opcode
));
631 op
= &osd_req
->r_ops
[which
];
632 memset(op
, 0, sizeof (*op
));
639 void osd_req_op_init(struct ceph_osd_request
*osd_req
,
640 unsigned int which
, u16 opcode
, u32 flags
)
642 (void)_osd_req_op_init(osd_req
, which
, opcode
, flags
);
644 EXPORT_SYMBOL(osd_req_op_init
);
646 void osd_req_op_extent_init(struct ceph_osd_request
*osd_req
,
647 unsigned int which
, u16 opcode
,
648 u64 offset
, u64 length
,
649 u64 truncate_size
, u32 truncate_seq
)
651 struct ceph_osd_req_op
*op
= _osd_req_op_init(osd_req
, which
,
653 size_t payload_len
= 0;
655 BUG_ON(opcode
!= CEPH_OSD_OP_READ
&& opcode
!= CEPH_OSD_OP_WRITE
&&
656 opcode
!= CEPH_OSD_OP_WRITEFULL
&& opcode
!= CEPH_OSD_OP_ZERO
&&
657 opcode
!= CEPH_OSD_OP_TRUNCATE
);
659 op
->extent
.offset
= offset
;
660 op
->extent
.length
= length
;
661 op
->extent
.truncate_size
= truncate_size
;
662 op
->extent
.truncate_seq
= truncate_seq
;
663 if (opcode
== CEPH_OSD_OP_WRITE
|| opcode
== CEPH_OSD_OP_WRITEFULL
)
664 payload_len
+= length
;
666 op
->indata_len
= payload_len
;
668 EXPORT_SYMBOL(osd_req_op_extent_init
);
670 void osd_req_op_extent_update(struct ceph_osd_request
*osd_req
,
671 unsigned int which
, u64 length
)
673 struct ceph_osd_req_op
*op
;
676 BUG_ON(which
>= osd_req
->r_num_ops
);
677 op
= &osd_req
->r_ops
[which
];
678 previous
= op
->extent
.length
;
680 if (length
== previous
)
681 return; /* Nothing to do */
682 BUG_ON(length
> previous
);
684 op
->extent
.length
= length
;
685 if (op
->op
== CEPH_OSD_OP_WRITE
|| op
->op
== CEPH_OSD_OP_WRITEFULL
)
686 op
->indata_len
-= previous
- length
;
688 EXPORT_SYMBOL(osd_req_op_extent_update
);
690 void osd_req_op_extent_dup_last(struct ceph_osd_request
*osd_req
,
691 unsigned int which
, u64 offset_inc
)
693 struct ceph_osd_req_op
*op
, *prev_op
;
695 BUG_ON(which
+ 1 >= osd_req
->r_num_ops
);
697 prev_op
= &osd_req
->r_ops
[which
];
698 op
= _osd_req_op_init(osd_req
, which
+ 1, prev_op
->op
, prev_op
->flags
);
699 /* dup previous one */
700 op
->indata_len
= prev_op
->indata_len
;
701 op
->outdata_len
= prev_op
->outdata_len
;
702 op
->extent
= prev_op
->extent
;
704 op
->extent
.offset
+= offset_inc
;
705 op
->extent
.length
-= offset_inc
;
707 if (op
->op
== CEPH_OSD_OP_WRITE
|| op
->op
== CEPH_OSD_OP_WRITEFULL
)
708 op
->indata_len
-= offset_inc
;
710 EXPORT_SYMBOL(osd_req_op_extent_dup_last
);
712 void osd_req_op_cls_init(struct ceph_osd_request
*osd_req
, unsigned int which
,
713 u16 opcode
, const char *class, const char *method
)
715 struct ceph_osd_req_op
*op
= _osd_req_op_init(osd_req
, which
,
717 struct ceph_pagelist
*pagelist
;
718 size_t payload_len
= 0;
721 BUG_ON(opcode
!= CEPH_OSD_OP_CALL
);
723 pagelist
= kmalloc(sizeof (*pagelist
), GFP_NOFS
);
725 ceph_pagelist_init(pagelist
);
727 op
->cls
.class_name
= class;
728 size
= strlen(class);
729 BUG_ON(size
> (size_t) U8_MAX
);
730 op
->cls
.class_len
= size
;
731 ceph_pagelist_append(pagelist
, class, size
);
734 op
->cls
.method_name
= method
;
735 size
= strlen(method
);
736 BUG_ON(size
> (size_t) U8_MAX
);
737 op
->cls
.method_len
= size
;
738 ceph_pagelist_append(pagelist
, method
, size
);
741 osd_req_op_cls_request_info_pagelist(osd_req
, which
, pagelist
);
743 op
->indata_len
= payload_len
;
745 EXPORT_SYMBOL(osd_req_op_cls_init
);
747 int osd_req_op_xattr_init(struct ceph_osd_request
*osd_req
, unsigned int which
,
748 u16 opcode
, const char *name
, const void *value
,
749 size_t size
, u8 cmp_op
, u8 cmp_mode
)
751 struct ceph_osd_req_op
*op
= _osd_req_op_init(osd_req
, which
,
753 struct ceph_pagelist
*pagelist
;
756 BUG_ON(opcode
!= CEPH_OSD_OP_SETXATTR
&& opcode
!= CEPH_OSD_OP_CMPXATTR
);
758 pagelist
= kmalloc(sizeof(*pagelist
), GFP_NOFS
);
762 ceph_pagelist_init(pagelist
);
764 payload_len
= strlen(name
);
765 op
->xattr
.name_len
= payload_len
;
766 ceph_pagelist_append(pagelist
, name
, payload_len
);
768 op
->xattr
.value_len
= size
;
769 ceph_pagelist_append(pagelist
, value
, size
);
772 op
->xattr
.cmp_op
= cmp_op
;
773 op
->xattr
.cmp_mode
= cmp_mode
;
775 ceph_osd_data_pagelist_init(&op
->xattr
.osd_data
, pagelist
);
776 op
->indata_len
= payload_len
;
779 EXPORT_SYMBOL(osd_req_op_xattr_init
);
782 * @watch_opcode: CEPH_OSD_WATCH_OP_*
784 static void osd_req_op_watch_init(struct ceph_osd_request
*req
, int which
,
785 u64 cookie
, u8 watch_opcode
)
787 struct ceph_osd_req_op
*op
;
789 op
= _osd_req_op_init(req
, which
, CEPH_OSD_OP_WATCH
, 0);
790 op
->watch
.cookie
= cookie
;
791 op
->watch
.op
= watch_opcode
;
795 void osd_req_op_alloc_hint_init(struct ceph_osd_request
*osd_req
,
797 u64 expected_object_size
,
798 u64 expected_write_size
)
800 struct ceph_osd_req_op
*op
= _osd_req_op_init(osd_req
, which
,
801 CEPH_OSD_OP_SETALLOCHINT
,
804 op
->alloc_hint
.expected_object_size
= expected_object_size
;
805 op
->alloc_hint
.expected_write_size
= expected_write_size
;
808 * CEPH_OSD_OP_SETALLOCHINT op is advisory and therefore deemed
809 * not worth a feature bit. Set FAILOK per-op flag to make
810 * sure older osds don't trip over an unsupported opcode.
812 op
->flags
|= CEPH_OSD_OP_FLAG_FAILOK
;
814 EXPORT_SYMBOL(osd_req_op_alloc_hint_init
);
816 static void ceph_osdc_msg_data_add(struct ceph_msg
*msg
,
817 struct ceph_osd_data
*osd_data
)
819 u64 length
= ceph_osd_data_length(osd_data
);
821 if (osd_data
->type
== CEPH_OSD_DATA_TYPE_PAGES
) {
822 BUG_ON(length
> (u64
) SIZE_MAX
);
824 ceph_msg_data_add_pages(msg
, osd_data
->pages
,
825 length
, osd_data
->alignment
);
826 } else if (osd_data
->type
== CEPH_OSD_DATA_TYPE_PAGELIST
) {
828 ceph_msg_data_add_pagelist(msg
, osd_data
->pagelist
);
830 } else if (osd_data
->type
== CEPH_OSD_DATA_TYPE_BIO
) {
831 ceph_msg_data_add_bio(msg
, osd_data
->bio
, length
);
834 BUG_ON(osd_data
->type
!= CEPH_OSD_DATA_TYPE_NONE
);
838 static u32
osd_req_encode_op(struct ceph_osd_op
*dst
,
839 const struct ceph_osd_req_op
*src
)
841 if (WARN_ON(!osd_req_opcode_valid(src
->op
))) {
842 pr_err("unrecognized osd opcode %d\n", src
->op
);
848 case CEPH_OSD_OP_STAT
:
850 case CEPH_OSD_OP_READ
:
851 case CEPH_OSD_OP_WRITE
:
852 case CEPH_OSD_OP_WRITEFULL
:
853 case CEPH_OSD_OP_ZERO
:
854 case CEPH_OSD_OP_TRUNCATE
:
855 dst
->extent
.offset
= cpu_to_le64(src
->extent
.offset
);
856 dst
->extent
.length
= cpu_to_le64(src
->extent
.length
);
857 dst
->extent
.truncate_size
=
858 cpu_to_le64(src
->extent
.truncate_size
);
859 dst
->extent
.truncate_seq
=
860 cpu_to_le32(src
->extent
.truncate_seq
);
862 case CEPH_OSD_OP_CALL
:
863 dst
->cls
.class_len
= src
->cls
.class_len
;
864 dst
->cls
.method_len
= src
->cls
.method_len
;
865 dst
->cls
.indata_len
= cpu_to_le32(src
->cls
.indata_len
);
867 case CEPH_OSD_OP_WATCH
:
868 dst
->watch
.cookie
= cpu_to_le64(src
->watch
.cookie
);
869 dst
->watch
.ver
= cpu_to_le64(0);
870 dst
->watch
.op
= src
->watch
.op
;
871 dst
->watch
.gen
= cpu_to_le32(src
->watch
.gen
);
873 case CEPH_OSD_OP_NOTIFY_ACK
:
875 case CEPH_OSD_OP_NOTIFY
:
876 dst
->notify
.cookie
= cpu_to_le64(src
->notify
.cookie
);
878 case CEPH_OSD_OP_LIST_WATCHERS
:
880 case CEPH_OSD_OP_SETALLOCHINT
:
881 dst
->alloc_hint
.expected_object_size
=
882 cpu_to_le64(src
->alloc_hint
.expected_object_size
);
883 dst
->alloc_hint
.expected_write_size
=
884 cpu_to_le64(src
->alloc_hint
.expected_write_size
);
886 case CEPH_OSD_OP_SETXATTR
:
887 case CEPH_OSD_OP_CMPXATTR
:
888 dst
->xattr
.name_len
= cpu_to_le32(src
->xattr
.name_len
);
889 dst
->xattr
.value_len
= cpu_to_le32(src
->xattr
.value_len
);
890 dst
->xattr
.cmp_op
= src
->xattr
.cmp_op
;
891 dst
->xattr
.cmp_mode
= src
->xattr
.cmp_mode
;
893 case CEPH_OSD_OP_CREATE
:
894 case CEPH_OSD_OP_DELETE
:
897 pr_err("unsupported osd opcode %s\n",
898 ceph_osd_op_name(src
->op
));
904 dst
->op
= cpu_to_le16(src
->op
);
905 dst
->flags
= cpu_to_le32(src
->flags
);
906 dst
->payload_len
= cpu_to_le32(src
->indata_len
);
908 return src
->indata_len
;
912 * build new request AND message, calculate layout, and adjust file
915 * if the file was recently truncated, we include information about its
916 * old and new size so that the object can be updated appropriately. (we
917 * avoid synchronously deleting truncated objects because it's slow.)
919 struct ceph_osd_request
*ceph_osdc_new_request(struct ceph_osd_client
*osdc
,
920 struct ceph_file_layout
*layout
,
921 struct ceph_vino vino
,
923 unsigned int which
, int num_ops
,
924 int opcode
, int flags
,
925 struct ceph_snap_context
*snapc
,
930 struct ceph_osd_request
*req
;
936 BUG_ON(opcode
!= CEPH_OSD_OP_READ
&& opcode
!= CEPH_OSD_OP_WRITE
&&
937 opcode
!= CEPH_OSD_OP_ZERO
&& opcode
!= CEPH_OSD_OP_TRUNCATE
&&
938 opcode
!= CEPH_OSD_OP_CREATE
&& opcode
!= CEPH_OSD_OP_DELETE
);
940 req
= ceph_osdc_alloc_request(osdc
, snapc
, num_ops
, use_mempool
,
947 /* calculate max write size */
948 r
= calc_layout(layout
, off
, plen
, &objnum
, &objoff
, &objlen
);
952 if (opcode
== CEPH_OSD_OP_CREATE
|| opcode
== CEPH_OSD_OP_DELETE
) {
953 osd_req_op_init(req
, which
, opcode
, 0);
955 u32 object_size
= layout
->object_size
;
956 u32 object_base
= off
- objoff
;
957 if (!(truncate_seq
== 1 && truncate_size
== -1ULL)) {
958 if (truncate_size
<= object_base
) {
961 truncate_size
-= object_base
;
962 if (truncate_size
> object_size
)
963 truncate_size
= object_size
;
966 osd_req_op_extent_init(req
, which
, opcode
, objoff
, objlen
,
967 truncate_size
, truncate_seq
);
970 req
->r_abort_on_full
= true;
971 req
->r_flags
= flags
;
972 req
->r_base_oloc
.pool
= layout
->pool_id
;
973 req
->r_base_oloc
.pool_ns
= ceph_try_get_string(layout
->pool_ns
);
974 ceph_oid_printf(&req
->r_base_oid
, "%llx.%08llx", vino
.ino
, objnum
);
976 req
->r_snapid
= vino
.snap
;
977 if (flags
& CEPH_OSD_FLAG_WRITE
)
978 req
->r_data_offset
= off
;
980 r
= ceph_osdc_alloc_messages(req
, GFP_NOFS
);
987 ceph_osdc_put_request(req
);
990 EXPORT_SYMBOL(ceph_osdc_new_request
);
993 * We keep osd requests in an rbtree, sorted by ->r_tid.
995 DEFINE_RB_FUNCS(request
, struct ceph_osd_request
, r_tid
, r_node
)
996 DEFINE_RB_FUNCS(request_mc
, struct ceph_osd_request
, r_tid
, r_mc_node
)
998 static bool osd_homeless(struct ceph_osd
*osd
)
1000 return osd
->o_osd
== CEPH_HOMELESS_OSD
;
1003 static bool osd_registered(struct ceph_osd
*osd
)
1005 verify_osdc_locked(osd
->o_osdc
);
1007 return !RB_EMPTY_NODE(&osd
->o_node
);
1011 * Assumes @osd is zero-initialized.
1013 static void osd_init(struct ceph_osd
*osd
)
1015 refcount_set(&osd
->o_ref
, 1);
1016 RB_CLEAR_NODE(&osd
->o_node
);
1017 osd
->o_requests
= RB_ROOT
;
1018 osd
->o_linger_requests
= RB_ROOT
;
1019 osd
->o_backoff_mappings
= RB_ROOT
;
1020 osd
->o_backoffs_by_id
= RB_ROOT
;
1021 INIT_LIST_HEAD(&osd
->o_osd_lru
);
1022 INIT_LIST_HEAD(&osd
->o_keepalive_item
);
1023 osd
->o_incarnation
= 1;
1024 mutex_init(&osd
->lock
);
1027 static void osd_cleanup(struct ceph_osd
*osd
)
1029 WARN_ON(!RB_EMPTY_NODE(&osd
->o_node
));
1030 WARN_ON(!RB_EMPTY_ROOT(&osd
->o_requests
));
1031 WARN_ON(!RB_EMPTY_ROOT(&osd
->o_linger_requests
));
1032 WARN_ON(!RB_EMPTY_ROOT(&osd
->o_backoff_mappings
));
1033 WARN_ON(!RB_EMPTY_ROOT(&osd
->o_backoffs_by_id
));
1034 WARN_ON(!list_empty(&osd
->o_osd_lru
));
1035 WARN_ON(!list_empty(&osd
->o_keepalive_item
));
1037 if (osd
->o_auth
.authorizer
) {
1038 WARN_ON(osd_homeless(osd
));
1039 ceph_auth_destroy_authorizer(osd
->o_auth
.authorizer
);
1044 * Track open sessions with osds.
1046 static struct ceph_osd
*create_osd(struct ceph_osd_client
*osdc
, int onum
)
1048 struct ceph_osd
*osd
;
1050 WARN_ON(onum
== CEPH_HOMELESS_OSD
);
1052 osd
= kzalloc(sizeof(*osd
), GFP_NOIO
| __GFP_NOFAIL
);
1057 ceph_con_init(&osd
->o_con
, osd
, &osd_con_ops
, &osdc
->client
->msgr
);
1062 static struct ceph_osd
*get_osd(struct ceph_osd
*osd
)
1064 if (refcount_inc_not_zero(&osd
->o_ref
)) {
1065 dout("get_osd %p %d -> %d\n", osd
, refcount_read(&osd
->o_ref
)-1,
1066 refcount_read(&osd
->o_ref
));
1069 dout("get_osd %p FAIL\n", osd
);
1074 static void put_osd(struct ceph_osd
*osd
)
1076 dout("put_osd %p %d -> %d\n", osd
, refcount_read(&osd
->o_ref
),
1077 refcount_read(&osd
->o_ref
) - 1);
1078 if (refcount_dec_and_test(&osd
->o_ref
)) {
1084 DEFINE_RB_FUNCS(osd
, struct ceph_osd
, o_osd
, o_node
)
1086 static void __move_osd_to_lru(struct ceph_osd
*osd
)
1088 struct ceph_osd_client
*osdc
= osd
->o_osdc
;
1090 dout("%s osd %p osd%d\n", __func__
, osd
, osd
->o_osd
);
1091 BUG_ON(!list_empty(&osd
->o_osd_lru
));
1093 spin_lock(&osdc
->osd_lru_lock
);
1094 list_add_tail(&osd
->o_osd_lru
, &osdc
->osd_lru
);
1095 spin_unlock(&osdc
->osd_lru_lock
);
1097 osd
->lru_ttl
= jiffies
+ osdc
->client
->options
->osd_idle_ttl
;
1100 static void maybe_move_osd_to_lru(struct ceph_osd
*osd
)
1102 if (RB_EMPTY_ROOT(&osd
->o_requests
) &&
1103 RB_EMPTY_ROOT(&osd
->o_linger_requests
))
1104 __move_osd_to_lru(osd
);
1107 static void __remove_osd_from_lru(struct ceph_osd
*osd
)
1109 struct ceph_osd_client
*osdc
= osd
->o_osdc
;
1111 dout("%s osd %p osd%d\n", __func__
, osd
, osd
->o_osd
);
1113 spin_lock(&osdc
->osd_lru_lock
);
1114 if (!list_empty(&osd
->o_osd_lru
))
1115 list_del_init(&osd
->o_osd_lru
);
1116 spin_unlock(&osdc
->osd_lru_lock
);
1120 * Close the connection and assign any leftover requests to the
1123 static void close_osd(struct ceph_osd
*osd
)
1125 struct ceph_osd_client
*osdc
= osd
->o_osdc
;
1128 verify_osdc_wrlocked(osdc
);
1129 dout("%s osd %p osd%d\n", __func__
, osd
, osd
->o_osd
);
1131 ceph_con_close(&osd
->o_con
);
1133 for (n
= rb_first(&osd
->o_requests
); n
; ) {
1134 struct ceph_osd_request
*req
=
1135 rb_entry(n
, struct ceph_osd_request
, r_node
);
1137 n
= rb_next(n
); /* unlink_request() */
1139 dout(" reassigning req %p tid %llu\n", req
, req
->r_tid
);
1140 unlink_request(osd
, req
);
1141 link_request(&osdc
->homeless_osd
, req
);
1143 for (n
= rb_first(&osd
->o_linger_requests
); n
; ) {
1144 struct ceph_osd_linger_request
*lreq
=
1145 rb_entry(n
, struct ceph_osd_linger_request
, node
);
1147 n
= rb_next(n
); /* unlink_linger() */
1149 dout(" reassigning lreq %p linger_id %llu\n", lreq
,
1151 unlink_linger(osd
, lreq
);
1152 link_linger(&osdc
->homeless_osd
, lreq
);
1154 clear_backoffs(osd
);
1156 __remove_osd_from_lru(osd
);
1157 erase_osd(&osdc
->osds
, osd
);
1164 static int reopen_osd(struct ceph_osd
*osd
)
1166 struct ceph_entity_addr
*peer_addr
;
1168 dout("%s osd %p osd%d\n", __func__
, osd
, osd
->o_osd
);
1170 if (RB_EMPTY_ROOT(&osd
->o_requests
) &&
1171 RB_EMPTY_ROOT(&osd
->o_linger_requests
)) {
1176 peer_addr
= &osd
->o_osdc
->osdmap
->osd_addr
[osd
->o_osd
];
1177 if (!memcmp(peer_addr
, &osd
->o_con
.peer_addr
, sizeof (*peer_addr
)) &&
1178 !ceph_con_opened(&osd
->o_con
)) {
1181 dout("osd addr hasn't changed and connection never opened, "
1182 "letting msgr retry\n");
1183 /* touch each r_stamp for handle_timeout()'s benfit */
1184 for (n
= rb_first(&osd
->o_requests
); n
; n
= rb_next(n
)) {
1185 struct ceph_osd_request
*req
=
1186 rb_entry(n
, struct ceph_osd_request
, r_node
);
1187 req
->r_stamp
= jiffies
;
1193 ceph_con_close(&osd
->o_con
);
1194 ceph_con_open(&osd
->o_con
, CEPH_ENTITY_TYPE_OSD
, osd
->o_osd
, peer_addr
);
1195 osd
->o_incarnation
++;
1200 static struct ceph_osd
*lookup_create_osd(struct ceph_osd_client
*osdc
, int o
,
1203 struct ceph_osd
*osd
;
1206 verify_osdc_wrlocked(osdc
);
1208 verify_osdc_locked(osdc
);
1210 if (o
!= CEPH_HOMELESS_OSD
)
1211 osd
= lookup_osd(&osdc
->osds
, o
);
1213 osd
= &osdc
->homeless_osd
;
1216 return ERR_PTR(-EAGAIN
);
1218 osd
= create_osd(osdc
, o
);
1219 insert_osd(&osdc
->osds
, osd
);
1220 ceph_con_open(&osd
->o_con
, CEPH_ENTITY_TYPE_OSD
, osd
->o_osd
,
1221 &osdc
->osdmap
->osd_addr
[osd
->o_osd
]);
1224 dout("%s osdc %p osd%d -> osd %p\n", __func__
, osdc
, o
, osd
);
1229 * Create request <-> OSD session relation.
1231 * @req has to be assigned a tid, @osd may be homeless.
1233 static void link_request(struct ceph_osd
*osd
, struct ceph_osd_request
*req
)
1235 verify_osd_locked(osd
);
1236 WARN_ON(!req
->r_tid
|| req
->r_osd
);
1237 dout("%s osd %p osd%d req %p tid %llu\n", __func__
, osd
, osd
->o_osd
,
1240 if (!osd_homeless(osd
))
1241 __remove_osd_from_lru(osd
);
1243 atomic_inc(&osd
->o_osdc
->num_homeless
);
1246 insert_request(&osd
->o_requests
, req
);
1250 static void unlink_request(struct ceph_osd
*osd
, struct ceph_osd_request
*req
)
1252 verify_osd_locked(osd
);
1253 WARN_ON(req
->r_osd
!= osd
);
1254 dout("%s osd %p osd%d req %p tid %llu\n", __func__
, osd
, osd
->o_osd
,
1258 erase_request(&osd
->o_requests
, req
);
1261 if (!osd_homeless(osd
))
1262 maybe_move_osd_to_lru(osd
);
1264 atomic_dec(&osd
->o_osdc
->num_homeless
);
1267 static bool __pool_full(struct ceph_pg_pool_info
*pi
)
1269 return pi
->flags
& CEPH_POOL_FLAG_FULL
;
1272 static bool have_pool_full(struct ceph_osd_client
*osdc
)
1276 for (n
= rb_first(&osdc
->osdmap
->pg_pools
); n
; n
= rb_next(n
)) {
1277 struct ceph_pg_pool_info
*pi
=
1278 rb_entry(n
, struct ceph_pg_pool_info
, node
);
1280 if (__pool_full(pi
))
1287 static bool pool_full(struct ceph_osd_client
*osdc
, s64 pool_id
)
1289 struct ceph_pg_pool_info
*pi
;
1291 pi
= ceph_pg_pool_by_id(osdc
->osdmap
, pool_id
);
1295 return __pool_full(pi
);
1299 * Returns whether a request should be blocked from being sent
1300 * based on the current osdmap and osd_client settings.
1302 static bool target_should_be_paused(struct ceph_osd_client
*osdc
,
1303 const struct ceph_osd_request_target
*t
,
1304 struct ceph_pg_pool_info
*pi
)
1306 bool pauserd
= ceph_osdmap_flag(osdc
, CEPH_OSDMAP_PAUSERD
);
1307 bool pausewr
= ceph_osdmap_flag(osdc
, CEPH_OSDMAP_PAUSEWR
) ||
1308 ceph_osdmap_flag(osdc
, CEPH_OSDMAP_FULL
) ||
1311 WARN_ON(pi
->id
!= t
->target_oloc
.pool
);
1312 return ((t
->flags
& CEPH_OSD_FLAG_READ
) && pauserd
) ||
1313 ((t
->flags
& CEPH_OSD_FLAG_WRITE
) && pausewr
) ||
1314 (osdc
->osdmap
->epoch
< osdc
->epoch_barrier
);
1317 enum calc_target_result
{
1318 CALC_TARGET_NO_ACTION
= 0,
1319 CALC_TARGET_NEED_RESEND
,
1320 CALC_TARGET_POOL_DNE
,
1323 static enum calc_target_result
calc_target(struct ceph_osd_client
*osdc
,
1324 struct ceph_osd_request_target
*t
,
1325 struct ceph_connection
*con
,
1328 struct ceph_pg_pool_info
*pi
;
1329 struct ceph_pg pgid
, last_pgid
;
1330 struct ceph_osds up
, acting
;
1331 bool force_resend
= false;
1332 bool unpaused
= false;
1335 bool sort_bitwise
= ceph_osdmap_flag(osdc
, CEPH_OSDMAP_SORTBITWISE
);
1336 bool recovery_deletes
= ceph_osdmap_flag(osdc
,
1337 CEPH_OSDMAP_RECOVERY_DELETES
);
1338 enum calc_target_result ct_res
;
1341 t
->epoch
= osdc
->osdmap
->epoch
;
1342 pi
= ceph_pg_pool_by_id(osdc
->osdmap
, t
->base_oloc
.pool
);
1344 t
->osd
= CEPH_HOMELESS_OSD
;
1345 ct_res
= CALC_TARGET_POOL_DNE
;
1349 if (osdc
->osdmap
->epoch
== pi
->last_force_request_resend
) {
1350 if (t
->last_force_resend
< pi
->last_force_request_resend
) {
1351 t
->last_force_resend
= pi
->last_force_request_resend
;
1352 force_resend
= true;
1353 } else if (t
->last_force_resend
== 0) {
1354 force_resend
= true;
1359 ceph_oid_copy(&t
->target_oid
, &t
->base_oid
);
1360 ceph_oloc_copy(&t
->target_oloc
, &t
->base_oloc
);
1361 if ((t
->flags
& CEPH_OSD_FLAG_IGNORE_OVERLAY
) == 0) {
1362 if (t
->flags
& CEPH_OSD_FLAG_READ
&& pi
->read_tier
>= 0)
1363 t
->target_oloc
.pool
= pi
->read_tier
;
1364 if (t
->flags
& CEPH_OSD_FLAG_WRITE
&& pi
->write_tier
>= 0)
1365 t
->target_oloc
.pool
= pi
->write_tier
;
1367 pi
= ceph_pg_pool_by_id(osdc
->osdmap
, t
->target_oloc
.pool
);
1369 t
->osd
= CEPH_HOMELESS_OSD
;
1370 ct_res
= CALC_TARGET_POOL_DNE
;
1375 ret
= __ceph_object_locator_to_pg(pi
, &t
->target_oid
, &t
->target_oloc
,
1378 WARN_ON(ret
!= -ENOENT
);
1379 t
->osd
= CEPH_HOMELESS_OSD
;
1380 ct_res
= CALC_TARGET_POOL_DNE
;
1383 last_pgid
.pool
= pgid
.pool
;
1384 last_pgid
.seed
= ceph_stable_mod(pgid
.seed
, t
->pg_num
, t
->pg_num_mask
);
1386 ceph_pg_to_up_acting_osds(osdc
->osdmap
, pi
, &pgid
, &up
, &acting
);
1388 ceph_is_new_interval(&t
->acting
,
1400 t
->recovery_deletes
,
1403 force_resend
= true;
1405 if (t
->paused
&& !target_should_be_paused(osdc
, t
, pi
)) {
1409 legacy_change
= ceph_pg_compare(&t
->pgid
, &pgid
) ||
1410 ceph_osds_changed(&t
->acting
, &acting
, any_change
);
1412 split
= ceph_pg_is_split(&last_pgid
, t
->pg_num
, pi
->pg_num
);
1414 if (legacy_change
|| force_resend
|| split
) {
1415 t
->pgid
= pgid
; /* struct */
1416 ceph_pg_to_primary_shard(osdc
->osdmap
, pi
, &pgid
, &t
->spgid
);
1417 ceph_osds_copy(&t
->acting
, &acting
);
1418 ceph_osds_copy(&t
->up
, &up
);
1420 t
->min_size
= pi
->min_size
;
1421 t
->pg_num
= pi
->pg_num
;
1422 t
->pg_num_mask
= pi
->pg_num_mask
;
1423 t
->sort_bitwise
= sort_bitwise
;
1424 t
->recovery_deletes
= recovery_deletes
;
1426 t
->osd
= acting
.primary
;
1429 if (unpaused
|| legacy_change
|| force_resend
||
1430 (split
&& con
&& CEPH_HAVE_FEATURE(con
->peer_features
,
1432 ct_res
= CALC_TARGET_NEED_RESEND
;
1434 ct_res
= CALC_TARGET_NO_ACTION
;
1437 dout("%s t %p -> ct_res %d osd %d\n", __func__
, t
, ct_res
, t
->osd
);
1441 static struct ceph_spg_mapping
*alloc_spg_mapping(void)
1443 struct ceph_spg_mapping
*spg
;
1445 spg
= kmalloc(sizeof(*spg
), GFP_NOIO
);
1449 RB_CLEAR_NODE(&spg
->node
);
1450 spg
->backoffs
= RB_ROOT
;
1454 static void free_spg_mapping(struct ceph_spg_mapping
*spg
)
1456 WARN_ON(!RB_EMPTY_NODE(&spg
->node
));
1457 WARN_ON(!RB_EMPTY_ROOT(&spg
->backoffs
));
1463 * rbtree of ceph_spg_mapping for handling map<spg_t, ...>, similar to
1464 * ceph_pg_mapping. Used to track OSD backoffs -- a backoff [range] is
1465 * defined only within a specific spgid; it does not pass anything to
1466 * children on split, or to another primary.
1468 DEFINE_RB_FUNCS2(spg_mapping
, struct ceph_spg_mapping
, spgid
, ceph_spg_compare
,
1469 RB_BYPTR
, const struct ceph_spg
*, node
)
1471 static u64
hoid_get_bitwise_key(const struct ceph_hobject_id
*hoid
)
1473 return hoid
->is_max
? 0x100000000ull
: hoid
->hash_reverse_bits
;
1476 static void hoid_get_effective_key(const struct ceph_hobject_id
*hoid
,
1477 void **pkey
, size_t *pkey_len
)
1479 if (hoid
->key_len
) {
1481 *pkey_len
= hoid
->key_len
;
1484 *pkey_len
= hoid
->oid_len
;
1488 static int compare_names(const void *name1
, size_t name1_len
,
1489 const void *name2
, size_t name2_len
)
1493 ret
= memcmp(name1
, name2
, min(name1_len
, name2_len
));
1495 if (name1_len
< name2_len
)
1497 else if (name1_len
> name2_len
)
1503 static int hoid_compare(const struct ceph_hobject_id
*lhs
,
1504 const struct ceph_hobject_id
*rhs
)
1506 void *effective_key1
, *effective_key2
;
1507 size_t effective_key1_len
, effective_key2_len
;
1510 if (lhs
->is_max
< rhs
->is_max
)
1512 if (lhs
->is_max
> rhs
->is_max
)
1515 if (lhs
->pool
< rhs
->pool
)
1517 if (lhs
->pool
> rhs
->pool
)
1520 if (hoid_get_bitwise_key(lhs
) < hoid_get_bitwise_key(rhs
))
1522 if (hoid_get_bitwise_key(lhs
) > hoid_get_bitwise_key(rhs
))
1525 ret
= compare_names(lhs
->nspace
, lhs
->nspace_len
,
1526 rhs
->nspace
, rhs
->nspace_len
);
1530 hoid_get_effective_key(lhs
, &effective_key1
, &effective_key1_len
);
1531 hoid_get_effective_key(rhs
, &effective_key2
, &effective_key2_len
);
1532 ret
= compare_names(effective_key1
, effective_key1_len
,
1533 effective_key2
, effective_key2_len
);
1537 ret
= compare_names(lhs
->oid
, lhs
->oid_len
, rhs
->oid
, rhs
->oid_len
);
1541 if (lhs
->snapid
< rhs
->snapid
)
1543 if (lhs
->snapid
> rhs
->snapid
)
1550 * For decoding ->begin and ->end of MOSDBackoff only -- no MIN/MAX
1551 * compat stuff here.
1553 * Assumes @hoid is zero-initialized.
1555 static int decode_hoid(void **p
, void *end
, struct ceph_hobject_id
*hoid
)
1561 ret
= ceph_start_decoding(p
, end
, 4, "hobject_t", &struct_v
,
1567 pr_err("got struct_v %d < 4 of hobject_t\n", struct_v
);
1571 hoid
->key
= ceph_extract_encoded_string(p
, end
, &hoid
->key_len
,
1573 if (IS_ERR(hoid
->key
)) {
1574 ret
= PTR_ERR(hoid
->key
);
1579 hoid
->oid
= ceph_extract_encoded_string(p
, end
, &hoid
->oid_len
,
1581 if (IS_ERR(hoid
->oid
)) {
1582 ret
= PTR_ERR(hoid
->oid
);
1587 ceph_decode_64_safe(p
, end
, hoid
->snapid
, e_inval
);
1588 ceph_decode_32_safe(p
, end
, hoid
->hash
, e_inval
);
1589 ceph_decode_8_safe(p
, end
, hoid
->is_max
, e_inval
);
1591 hoid
->nspace
= ceph_extract_encoded_string(p
, end
, &hoid
->nspace_len
,
1593 if (IS_ERR(hoid
->nspace
)) {
1594 ret
= PTR_ERR(hoid
->nspace
);
1595 hoid
->nspace
= NULL
;
1599 ceph_decode_64_safe(p
, end
, hoid
->pool
, e_inval
);
1601 ceph_hoid_build_hash_cache(hoid
);
1608 static int hoid_encoding_size(const struct ceph_hobject_id
*hoid
)
1610 return 8 + 4 + 1 + 8 + /* snapid, hash, is_max, pool */
1611 4 + hoid
->key_len
+ 4 + hoid
->oid_len
+ 4 + hoid
->nspace_len
;
1614 static void encode_hoid(void **p
, void *end
, const struct ceph_hobject_id
*hoid
)
1616 ceph_start_encoding(p
, 4, 3, hoid_encoding_size(hoid
));
1617 ceph_encode_string(p
, end
, hoid
->key
, hoid
->key_len
);
1618 ceph_encode_string(p
, end
, hoid
->oid
, hoid
->oid_len
);
1619 ceph_encode_64(p
, hoid
->snapid
);
1620 ceph_encode_32(p
, hoid
->hash
);
1621 ceph_encode_8(p
, hoid
->is_max
);
1622 ceph_encode_string(p
, end
, hoid
->nspace
, hoid
->nspace_len
);
1623 ceph_encode_64(p
, hoid
->pool
);
1626 static void free_hoid(struct ceph_hobject_id
*hoid
)
1631 kfree(hoid
->nspace
);
1636 static struct ceph_osd_backoff
*alloc_backoff(void)
1638 struct ceph_osd_backoff
*backoff
;
1640 backoff
= kzalloc(sizeof(*backoff
), GFP_NOIO
);
1644 RB_CLEAR_NODE(&backoff
->spg_node
);
1645 RB_CLEAR_NODE(&backoff
->id_node
);
1649 static void free_backoff(struct ceph_osd_backoff
*backoff
)
1651 WARN_ON(!RB_EMPTY_NODE(&backoff
->spg_node
));
1652 WARN_ON(!RB_EMPTY_NODE(&backoff
->id_node
));
1654 free_hoid(backoff
->begin
);
1655 free_hoid(backoff
->end
);
1660 * Within a specific spgid, backoffs are managed by ->begin hoid.
1662 DEFINE_RB_INSDEL_FUNCS2(backoff
, struct ceph_osd_backoff
, begin
, hoid_compare
,
1663 RB_BYVAL
, spg_node
);
1665 static struct ceph_osd_backoff
*lookup_containing_backoff(struct rb_root
*root
,
1666 const struct ceph_hobject_id
*hoid
)
1668 struct rb_node
*n
= root
->rb_node
;
1671 struct ceph_osd_backoff
*cur
=
1672 rb_entry(n
, struct ceph_osd_backoff
, spg_node
);
1675 cmp
= hoid_compare(hoid
, cur
->begin
);
1678 } else if (cmp
> 0) {
1679 if (hoid_compare(hoid
, cur
->end
) < 0)
1692 * Each backoff has a unique id within its OSD session.
1694 DEFINE_RB_FUNCS(backoff_by_id
, struct ceph_osd_backoff
, id
, id_node
)
1696 static void clear_backoffs(struct ceph_osd
*osd
)
1698 while (!RB_EMPTY_ROOT(&osd
->o_backoff_mappings
)) {
1699 struct ceph_spg_mapping
*spg
=
1700 rb_entry(rb_first(&osd
->o_backoff_mappings
),
1701 struct ceph_spg_mapping
, node
);
1703 while (!RB_EMPTY_ROOT(&spg
->backoffs
)) {
1704 struct ceph_osd_backoff
*backoff
=
1705 rb_entry(rb_first(&spg
->backoffs
),
1706 struct ceph_osd_backoff
, spg_node
);
1708 erase_backoff(&spg
->backoffs
, backoff
);
1709 erase_backoff_by_id(&osd
->o_backoffs_by_id
, backoff
);
1710 free_backoff(backoff
);
1712 erase_spg_mapping(&osd
->o_backoff_mappings
, spg
);
1713 free_spg_mapping(spg
);
1718 * Set up a temporary, non-owning view into @t.
1720 static void hoid_fill_from_target(struct ceph_hobject_id
*hoid
,
1721 const struct ceph_osd_request_target
*t
)
1725 hoid
->oid
= t
->target_oid
.name
;
1726 hoid
->oid_len
= t
->target_oid
.name_len
;
1727 hoid
->snapid
= CEPH_NOSNAP
;
1728 hoid
->hash
= t
->pgid
.seed
;
1729 hoid
->is_max
= false;
1730 if (t
->target_oloc
.pool_ns
) {
1731 hoid
->nspace
= t
->target_oloc
.pool_ns
->str
;
1732 hoid
->nspace_len
= t
->target_oloc
.pool_ns
->len
;
1734 hoid
->nspace
= NULL
;
1735 hoid
->nspace_len
= 0;
1737 hoid
->pool
= t
->target_oloc
.pool
;
1738 ceph_hoid_build_hash_cache(hoid
);
1741 static bool should_plug_request(struct ceph_osd_request
*req
)
1743 struct ceph_osd
*osd
= req
->r_osd
;
1744 struct ceph_spg_mapping
*spg
;
1745 struct ceph_osd_backoff
*backoff
;
1746 struct ceph_hobject_id hoid
;
1748 spg
= lookup_spg_mapping(&osd
->o_backoff_mappings
, &req
->r_t
.spgid
);
1752 hoid_fill_from_target(&hoid
, &req
->r_t
);
1753 backoff
= lookup_containing_backoff(&spg
->backoffs
, &hoid
);
1757 dout("%s req %p tid %llu backoff osd%d spgid %llu.%xs%d id %llu\n",
1758 __func__
, req
, req
->r_tid
, osd
->o_osd
, backoff
->spgid
.pgid
.pool
,
1759 backoff
->spgid
.pgid
.seed
, backoff
->spgid
.shard
, backoff
->id
);
1763 static void setup_request_data(struct ceph_osd_request
*req
,
1764 struct ceph_msg
*msg
)
1769 if (!list_empty(&msg
->data
))
1772 WARN_ON(msg
->data_length
);
1773 for (i
= 0; i
< req
->r_num_ops
; i
++) {
1774 struct ceph_osd_req_op
*op
= &req
->r_ops
[i
];
1778 case CEPH_OSD_OP_WRITE
:
1779 case CEPH_OSD_OP_WRITEFULL
:
1780 WARN_ON(op
->indata_len
!= op
->extent
.length
);
1781 ceph_osdc_msg_data_add(msg
, &op
->extent
.osd_data
);
1783 case CEPH_OSD_OP_SETXATTR
:
1784 case CEPH_OSD_OP_CMPXATTR
:
1785 WARN_ON(op
->indata_len
!= op
->xattr
.name_len
+
1786 op
->xattr
.value_len
);
1787 ceph_osdc_msg_data_add(msg
, &op
->xattr
.osd_data
);
1789 case CEPH_OSD_OP_NOTIFY_ACK
:
1790 ceph_osdc_msg_data_add(msg
,
1791 &op
->notify_ack
.request_data
);
1795 case CEPH_OSD_OP_STAT
:
1796 ceph_osdc_msg_data_add(req
->r_reply
,
1799 case CEPH_OSD_OP_READ
:
1800 ceph_osdc_msg_data_add(req
->r_reply
,
1801 &op
->extent
.osd_data
);
1803 case CEPH_OSD_OP_LIST_WATCHERS
:
1804 ceph_osdc_msg_data_add(req
->r_reply
,
1805 &op
->list_watchers
.response_data
);
1809 case CEPH_OSD_OP_CALL
:
1810 WARN_ON(op
->indata_len
!= op
->cls
.class_len
+
1811 op
->cls
.method_len
+
1812 op
->cls
.indata_len
);
1813 ceph_osdc_msg_data_add(msg
, &op
->cls
.request_info
);
1814 /* optional, can be NONE */
1815 ceph_osdc_msg_data_add(msg
, &op
->cls
.request_data
);
1816 /* optional, can be NONE */
1817 ceph_osdc_msg_data_add(req
->r_reply
,
1818 &op
->cls
.response_data
);
1820 case CEPH_OSD_OP_NOTIFY
:
1821 ceph_osdc_msg_data_add(msg
,
1822 &op
->notify
.request_data
);
1823 ceph_osdc_msg_data_add(req
->r_reply
,
1824 &op
->notify
.response_data
);
1828 data_len
+= op
->indata_len
;
1831 WARN_ON(data_len
!= msg
->data_length
);
1834 static void encode_pgid(void **p
, const struct ceph_pg
*pgid
)
1836 ceph_encode_8(p
, 1);
1837 ceph_encode_64(p
, pgid
->pool
);
1838 ceph_encode_32(p
, pgid
->seed
);
1839 ceph_encode_32(p
, -1); /* preferred */
1842 static void encode_spgid(void **p
, const struct ceph_spg
*spgid
)
1844 ceph_start_encoding(p
, 1, 1, CEPH_PGID_ENCODING_LEN
+ 1);
1845 encode_pgid(p
, &spgid
->pgid
);
1846 ceph_encode_8(p
, spgid
->shard
);
1849 static void encode_oloc(void **p
, void *end
,
1850 const struct ceph_object_locator
*oloc
)
1852 ceph_start_encoding(p
, 5, 4, ceph_oloc_encoding_size(oloc
));
1853 ceph_encode_64(p
, oloc
->pool
);
1854 ceph_encode_32(p
, -1); /* preferred */
1855 ceph_encode_32(p
, 0); /* key len */
1857 ceph_encode_string(p
, end
, oloc
->pool_ns
->str
,
1858 oloc
->pool_ns
->len
);
1860 ceph_encode_32(p
, 0);
1863 static void encode_request_partial(struct ceph_osd_request
*req
,
1864 struct ceph_msg
*msg
)
1866 void *p
= msg
->front
.iov_base
;
1867 void *const end
= p
+ msg
->front_alloc_len
;
1871 if (req
->r_flags
& CEPH_OSD_FLAG_WRITE
) {
1872 /* snapshots aren't writeable */
1873 WARN_ON(req
->r_snapid
!= CEPH_NOSNAP
);
1875 WARN_ON(req
->r_mtime
.tv_sec
|| req
->r_mtime
.tv_nsec
||
1876 req
->r_data_offset
|| req
->r_snapc
);
1879 setup_request_data(req
, msg
);
1881 encode_spgid(&p
, &req
->r_t
.spgid
); /* actual spg */
1882 ceph_encode_32(&p
, req
->r_t
.pgid
.seed
); /* raw hash */
1883 ceph_encode_32(&p
, req
->r_osdc
->osdmap
->epoch
);
1884 ceph_encode_32(&p
, req
->r_flags
);
1887 ceph_start_encoding(&p
, 2, 2, sizeof(struct ceph_osd_reqid
));
1888 memset(p
, 0, sizeof(struct ceph_osd_reqid
));
1889 p
+= sizeof(struct ceph_osd_reqid
);
1892 memset(p
, 0, sizeof(struct ceph_blkin_trace_info
));
1893 p
+= sizeof(struct ceph_blkin_trace_info
);
1895 ceph_encode_32(&p
, 0); /* client_inc, always 0 */
1896 ceph_encode_timespec(p
, &req
->r_mtime
);
1897 p
+= sizeof(struct ceph_timespec
);
1899 encode_oloc(&p
, end
, &req
->r_t
.target_oloc
);
1900 ceph_encode_string(&p
, end
, req
->r_t
.target_oid
.name
,
1901 req
->r_t
.target_oid
.name_len
);
1903 /* ops, can imply data */
1904 ceph_encode_16(&p
, req
->r_num_ops
);
1905 for (i
= 0; i
< req
->r_num_ops
; i
++) {
1906 data_len
+= osd_req_encode_op(p
, &req
->r_ops
[i
]);
1907 p
+= sizeof(struct ceph_osd_op
);
1910 ceph_encode_64(&p
, req
->r_snapid
); /* snapid */
1912 ceph_encode_64(&p
, req
->r_snapc
->seq
);
1913 ceph_encode_32(&p
, req
->r_snapc
->num_snaps
);
1914 for (i
= 0; i
< req
->r_snapc
->num_snaps
; i
++)
1915 ceph_encode_64(&p
, req
->r_snapc
->snaps
[i
]);
1917 ceph_encode_64(&p
, 0); /* snap_seq */
1918 ceph_encode_32(&p
, 0); /* snaps len */
1921 ceph_encode_32(&p
, req
->r_attempts
); /* retry_attempt */
1922 BUG_ON(p
> end
- 8); /* space for features */
1924 msg
->hdr
.version
= cpu_to_le16(8); /* MOSDOp v8 */
1925 /* front_len is finalized in encode_request_finish() */
1926 msg
->front
.iov_len
= p
- msg
->front
.iov_base
;
1927 msg
->hdr
.front_len
= cpu_to_le32(msg
->front
.iov_len
);
1928 msg
->hdr
.data_len
= cpu_to_le32(data_len
);
1930 * The header "data_off" is a hint to the receiver allowing it
1931 * to align received data into its buffers such that there's no
1932 * need to re-copy it before writing it to disk (direct I/O).
1934 msg
->hdr
.data_off
= cpu_to_le16(req
->r_data_offset
);
1936 dout("%s req %p msg %p oid %s oid_len %d\n", __func__
, req
, msg
,
1937 req
->r_t
.target_oid
.name
, req
->r_t
.target_oid
.name_len
);
1940 static void encode_request_finish(struct ceph_msg
*msg
)
1942 void *p
= msg
->front
.iov_base
;
1943 void *const partial_end
= p
+ msg
->front
.iov_len
;
1944 void *const end
= p
+ msg
->front_alloc_len
;
1946 if (CEPH_HAVE_FEATURE(msg
->con
->peer_features
, RESEND_ON_SPLIT
)) {
1947 /* luminous OSD -- encode features and be done */
1949 ceph_encode_64(&p
, msg
->con
->peer_features
);
1952 char spgid
[CEPH_ENCODING_START_BLK_LEN
+
1953 CEPH_PGID_ENCODING_LEN
+ 1];
1957 char reqid
[CEPH_ENCODING_START_BLK_LEN
+
1958 sizeof(struct ceph_osd_reqid
)];
1959 char trace
[sizeof(struct ceph_blkin_trace_info
)];
1961 struct ceph_timespec mtime
;
1963 struct ceph_pg pgid
;
1964 void *oloc
, *oid
, *tail
;
1965 int oloc_len
, oid_len
, tail_len
;
1969 * Pre-luminous OSD -- reencode v8 into v4 using @head
1970 * as a temporary buffer. Encode the raw PG; the rest
1971 * is just a matter of moving oloc, oid and tail blobs
1974 memcpy(&head
, p
, sizeof(head
));
1978 p
+= CEPH_ENCODING_START_BLK_LEN
;
1979 pgid
.pool
= ceph_decode_64(&p
);
1980 p
+= 4 + 4; /* preferred, key len */
1981 len
= ceph_decode_32(&p
);
1982 p
+= len
; /* nspace */
1983 oloc_len
= p
- oloc
;
1986 len
= ceph_decode_32(&p
);
1991 tail_len
= partial_end
- p
;
1993 p
= msg
->front
.iov_base
;
1994 ceph_encode_copy(&p
, &head
.client_inc
, sizeof(head
.client_inc
));
1995 ceph_encode_copy(&p
, &head
.epoch
, sizeof(head
.epoch
));
1996 ceph_encode_copy(&p
, &head
.flags
, sizeof(head
.flags
));
1997 ceph_encode_copy(&p
, &head
.mtime
, sizeof(head
.mtime
));
1999 /* reassert_version */
2000 memset(p
, 0, sizeof(struct ceph_eversion
));
2001 p
+= sizeof(struct ceph_eversion
);
2004 memmove(p
, oloc
, oloc_len
);
2007 pgid
.seed
= le32_to_cpu(head
.hash
);
2008 encode_pgid(&p
, &pgid
); /* raw pg */
2011 memmove(p
, oid
, oid_len
);
2014 /* tail -- ops, snapid, snapc, retry_attempt */
2016 memmove(p
, tail
, tail_len
);
2019 msg
->hdr
.version
= cpu_to_le16(4); /* MOSDOp v4 */
2023 msg
->front
.iov_len
= p
- msg
->front
.iov_base
;
2024 msg
->hdr
.front_len
= cpu_to_le32(msg
->front
.iov_len
);
2026 dout("%s msg %p tid %llu %u+%u+%u v%d\n", __func__
, msg
,
2027 le64_to_cpu(msg
->hdr
.tid
), le32_to_cpu(msg
->hdr
.front_len
),
2028 le32_to_cpu(msg
->hdr
.middle_len
), le32_to_cpu(msg
->hdr
.data_len
),
2029 le16_to_cpu(msg
->hdr
.version
));
2033 * @req has to be assigned a tid and registered.
2035 static void send_request(struct ceph_osd_request
*req
)
2037 struct ceph_osd
*osd
= req
->r_osd
;
2039 verify_osd_locked(osd
);
2040 WARN_ON(osd
->o_osd
!= req
->r_t
.osd
);
2043 if (should_plug_request(req
))
2047 * We may have a previously queued request message hanging
2048 * around. Cancel it to avoid corrupting the msgr.
2051 ceph_msg_revoke(req
->r_request
);
2053 req
->r_flags
|= CEPH_OSD_FLAG_KNOWN_REDIR
;
2054 if (req
->r_attempts
)
2055 req
->r_flags
|= CEPH_OSD_FLAG_RETRY
;
2057 WARN_ON(req
->r_flags
& CEPH_OSD_FLAG_RETRY
);
2059 encode_request_partial(req
, req
->r_request
);
2061 dout("%s req %p tid %llu to pgid %llu.%x spgid %llu.%xs%d osd%d e%u flags 0x%x attempt %d\n",
2062 __func__
, req
, req
->r_tid
, req
->r_t
.pgid
.pool
, req
->r_t
.pgid
.seed
,
2063 req
->r_t
.spgid
.pgid
.pool
, req
->r_t
.spgid
.pgid
.seed
,
2064 req
->r_t
.spgid
.shard
, osd
->o_osd
, req
->r_t
.epoch
, req
->r_flags
,
2067 req
->r_t
.paused
= false;
2068 req
->r_stamp
= jiffies
;
2071 req
->r_sent
= osd
->o_incarnation
;
2072 req
->r_request
->hdr
.tid
= cpu_to_le64(req
->r_tid
);
2073 ceph_con_send(&osd
->o_con
, ceph_msg_get(req
->r_request
));
2076 static void maybe_request_map(struct ceph_osd_client
*osdc
)
2078 bool continuous
= false;
2080 verify_osdc_locked(osdc
);
2081 WARN_ON(!osdc
->osdmap
->epoch
);
2083 if (ceph_osdmap_flag(osdc
, CEPH_OSDMAP_FULL
) ||
2084 ceph_osdmap_flag(osdc
, CEPH_OSDMAP_PAUSERD
) ||
2085 ceph_osdmap_flag(osdc
, CEPH_OSDMAP_PAUSEWR
)) {
2086 dout("%s osdc %p continuous\n", __func__
, osdc
);
2089 dout("%s osdc %p onetime\n", __func__
, osdc
);
2092 if (ceph_monc_want_map(&osdc
->client
->monc
, CEPH_SUB_OSDMAP
,
2093 osdc
->osdmap
->epoch
+ 1, continuous
))
2094 ceph_monc_renew_subs(&osdc
->client
->monc
);
2097 static void complete_request(struct ceph_osd_request
*req
, int err
);
2098 static void send_map_check(struct ceph_osd_request
*req
);
2100 static void __submit_request(struct ceph_osd_request
*req
, bool wrlocked
)
2102 struct ceph_osd_client
*osdc
= req
->r_osdc
;
2103 struct ceph_osd
*osd
;
2104 enum calc_target_result ct_res
;
2105 bool need_send
= false;
2106 bool promoted
= false;
2107 bool need_abort
= false;
2109 WARN_ON(req
->r_tid
);
2110 dout("%s req %p wrlocked %d\n", __func__
, req
, wrlocked
);
2113 ct_res
= calc_target(osdc
, &req
->r_t
, NULL
, false);
2114 if (ct_res
== CALC_TARGET_POOL_DNE
&& !wrlocked
)
2117 osd
= lookup_create_osd(osdc
, req
->r_t
.osd
, wrlocked
);
2119 WARN_ON(PTR_ERR(osd
) != -EAGAIN
|| wrlocked
);
2123 if (osdc
->osdmap
->epoch
< osdc
->epoch_barrier
) {
2124 dout("req %p epoch %u barrier %u\n", req
, osdc
->osdmap
->epoch
,
2125 osdc
->epoch_barrier
);
2126 req
->r_t
.paused
= true;
2127 maybe_request_map(osdc
);
2128 } else if ((req
->r_flags
& CEPH_OSD_FLAG_WRITE
) &&
2129 ceph_osdmap_flag(osdc
, CEPH_OSDMAP_PAUSEWR
)) {
2130 dout("req %p pausewr\n", req
);
2131 req
->r_t
.paused
= true;
2132 maybe_request_map(osdc
);
2133 } else if ((req
->r_flags
& CEPH_OSD_FLAG_READ
) &&
2134 ceph_osdmap_flag(osdc
, CEPH_OSDMAP_PAUSERD
)) {
2135 dout("req %p pauserd\n", req
);
2136 req
->r_t
.paused
= true;
2137 maybe_request_map(osdc
);
2138 } else if ((req
->r_flags
& CEPH_OSD_FLAG_WRITE
) &&
2139 !(req
->r_flags
& (CEPH_OSD_FLAG_FULL_TRY
|
2140 CEPH_OSD_FLAG_FULL_FORCE
)) &&
2141 (ceph_osdmap_flag(osdc
, CEPH_OSDMAP_FULL
) ||
2142 pool_full(osdc
, req
->r_t
.base_oloc
.pool
))) {
2143 dout("req %p full/pool_full\n", req
);
2144 pr_warn_ratelimited("FULL or reached pool quota\n");
2145 req
->r_t
.paused
= true;
2146 maybe_request_map(osdc
);
2147 if (req
->r_abort_on_full
)
2149 } else if (!osd_homeless(osd
)) {
2152 maybe_request_map(osdc
);
2155 mutex_lock(&osd
->lock
);
2157 * Assign the tid atomically with send_request() to protect
2158 * multiple writes to the same object from racing with each
2159 * other, resulting in out of order ops on the OSDs.
2161 req
->r_tid
= atomic64_inc_return(&osdc
->last_tid
);
2162 link_request(osd
, req
);
2165 else if (need_abort
)
2166 complete_request(req
, -ENOSPC
);
2167 mutex_unlock(&osd
->lock
);
2169 if (ct_res
== CALC_TARGET_POOL_DNE
)
2170 send_map_check(req
);
2173 downgrade_write(&osdc
->lock
);
2177 up_read(&osdc
->lock
);
2178 down_write(&osdc
->lock
);
2184 static void account_request(struct ceph_osd_request
*req
)
2186 WARN_ON(req
->r_flags
& (CEPH_OSD_FLAG_ACK
| CEPH_OSD_FLAG_ONDISK
));
2187 WARN_ON(!(req
->r_flags
& (CEPH_OSD_FLAG_READ
| CEPH_OSD_FLAG_WRITE
)));
2189 req
->r_flags
|= CEPH_OSD_FLAG_ONDISK
;
2190 atomic_inc(&req
->r_osdc
->num_requests
);
2192 req
->r_start_stamp
= jiffies
;
2195 static void submit_request(struct ceph_osd_request
*req
, bool wrlocked
)
2197 ceph_osdc_get_request(req
);
2198 account_request(req
);
2199 __submit_request(req
, wrlocked
);
2202 static void finish_request(struct ceph_osd_request
*req
)
2204 struct ceph_osd_client
*osdc
= req
->r_osdc
;
2206 WARN_ON(lookup_request_mc(&osdc
->map_checks
, req
->r_tid
));
2207 dout("%s req %p tid %llu\n", __func__
, req
, req
->r_tid
);
2210 unlink_request(req
->r_osd
, req
);
2211 atomic_dec(&osdc
->num_requests
);
2214 * If an OSD has failed or returned and a request has been sent
2215 * twice, it's possible to get a reply and end up here while the
2216 * request message is queued for delivery. We will ignore the
2217 * reply, so not a big deal, but better to try and catch it.
2219 ceph_msg_revoke(req
->r_request
);
2220 ceph_msg_revoke_incoming(req
->r_reply
);
2223 static void __complete_request(struct ceph_osd_request
*req
)
2225 if (req
->r_callback
) {
2226 dout("%s req %p tid %llu cb %pf result %d\n", __func__
, req
,
2227 req
->r_tid
, req
->r_callback
, req
->r_result
);
2228 req
->r_callback(req
);
2233 * This is open-coded in handle_reply().
2235 static void complete_request(struct ceph_osd_request
*req
, int err
)
2237 dout("%s req %p tid %llu err %d\n", __func__
, req
, req
->r_tid
, err
);
2239 req
->r_result
= err
;
2240 finish_request(req
);
2241 __complete_request(req
);
2242 complete_all(&req
->r_completion
);
2243 ceph_osdc_put_request(req
);
2246 static void cancel_map_check(struct ceph_osd_request
*req
)
2248 struct ceph_osd_client
*osdc
= req
->r_osdc
;
2249 struct ceph_osd_request
*lookup_req
;
2251 verify_osdc_wrlocked(osdc
);
2253 lookup_req
= lookup_request_mc(&osdc
->map_checks
, req
->r_tid
);
2257 WARN_ON(lookup_req
!= req
);
2258 erase_request_mc(&osdc
->map_checks
, req
);
2259 ceph_osdc_put_request(req
);
2262 static void cancel_request(struct ceph_osd_request
*req
)
2264 dout("%s req %p tid %llu\n", __func__
, req
, req
->r_tid
);
2266 cancel_map_check(req
);
2267 finish_request(req
);
2268 complete_all(&req
->r_completion
);
2269 ceph_osdc_put_request(req
);
2272 static void abort_request(struct ceph_osd_request
*req
, int err
)
2274 dout("%s req %p tid %llu err %d\n", __func__
, req
, req
->r_tid
, err
);
2276 cancel_map_check(req
);
2277 complete_request(req
, err
);
2280 static void update_epoch_barrier(struct ceph_osd_client
*osdc
, u32 eb
)
2282 if (likely(eb
> osdc
->epoch_barrier
)) {
2283 dout("updating epoch_barrier from %u to %u\n",
2284 osdc
->epoch_barrier
, eb
);
2285 osdc
->epoch_barrier
= eb
;
2286 /* Request map if we're not to the barrier yet */
2287 if (eb
> osdc
->osdmap
->epoch
)
2288 maybe_request_map(osdc
);
2292 void ceph_osdc_update_epoch_barrier(struct ceph_osd_client
*osdc
, u32 eb
)
2294 down_read(&osdc
->lock
);
2295 if (unlikely(eb
> osdc
->epoch_barrier
)) {
2296 up_read(&osdc
->lock
);
2297 down_write(&osdc
->lock
);
2298 update_epoch_barrier(osdc
, eb
);
2299 up_write(&osdc
->lock
);
2301 up_read(&osdc
->lock
);
2304 EXPORT_SYMBOL(ceph_osdc_update_epoch_barrier
);
2307 * Drop all pending requests that are stalled waiting on a full condition to
2308 * clear, and complete them with ENOSPC as the return code. Set the
2309 * osdc->epoch_barrier to the latest map epoch that we've seen if any were
2312 static void ceph_osdc_abort_on_full(struct ceph_osd_client
*osdc
)
2315 bool victims
= false;
2317 dout("enter abort_on_full\n");
2319 if (!ceph_osdmap_flag(osdc
, CEPH_OSDMAP_FULL
) && !have_pool_full(osdc
))
2322 /* Scan list and see if there is anything to abort */
2323 for (n
= rb_first(&osdc
->osds
); n
; n
= rb_next(n
)) {
2324 struct ceph_osd
*osd
= rb_entry(n
, struct ceph_osd
, o_node
);
2327 m
= rb_first(&osd
->o_requests
);
2329 struct ceph_osd_request
*req
= rb_entry(m
,
2330 struct ceph_osd_request
, r_node
);
2333 if (req
->r_abort_on_full
) {
2346 * Update the barrier to current epoch if it's behind that point,
2347 * since we know we have some calls to be aborted in the tree.
2349 update_epoch_barrier(osdc
, osdc
->osdmap
->epoch
);
2351 for (n
= rb_first(&osdc
->osds
); n
; n
= rb_next(n
)) {
2352 struct ceph_osd
*osd
= rb_entry(n
, struct ceph_osd
, o_node
);
2355 m
= rb_first(&osd
->o_requests
);
2357 struct ceph_osd_request
*req
= rb_entry(m
,
2358 struct ceph_osd_request
, r_node
);
2361 if (req
->r_abort_on_full
&&
2362 (ceph_osdmap_flag(osdc
, CEPH_OSDMAP_FULL
) ||
2363 pool_full(osdc
, req
->r_t
.target_oloc
.pool
)))
2364 abort_request(req
, -ENOSPC
);
2368 dout("return abort_on_full barrier=%u\n", osdc
->epoch_barrier
);
2371 static void check_pool_dne(struct ceph_osd_request
*req
)
2373 struct ceph_osd_client
*osdc
= req
->r_osdc
;
2374 struct ceph_osdmap
*map
= osdc
->osdmap
;
2376 verify_osdc_wrlocked(osdc
);
2377 WARN_ON(!map
->epoch
);
2379 if (req
->r_attempts
) {
2381 * We sent a request earlier, which means that
2382 * previously the pool existed, and now it does not
2383 * (i.e., it was deleted).
2385 req
->r_map_dne_bound
= map
->epoch
;
2386 dout("%s req %p tid %llu pool disappeared\n", __func__
, req
,
2389 dout("%s req %p tid %llu map_dne_bound %u have %u\n", __func__
,
2390 req
, req
->r_tid
, req
->r_map_dne_bound
, map
->epoch
);
2393 if (req
->r_map_dne_bound
) {
2394 if (map
->epoch
>= req
->r_map_dne_bound
) {
2395 /* we had a new enough map */
2396 pr_info_ratelimited("tid %llu pool does not exist\n",
2398 complete_request(req
, -ENOENT
);
2401 send_map_check(req
);
2405 static void map_check_cb(struct ceph_mon_generic_request
*greq
)
2407 struct ceph_osd_client
*osdc
= &greq
->monc
->client
->osdc
;
2408 struct ceph_osd_request
*req
;
2409 u64 tid
= greq
->private_data
;
2411 WARN_ON(greq
->result
|| !greq
->u
.newest
);
2413 down_write(&osdc
->lock
);
2414 req
= lookup_request_mc(&osdc
->map_checks
, tid
);
2416 dout("%s tid %llu dne\n", __func__
, tid
);
2420 dout("%s req %p tid %llu map_dne_bound %u newest %llu\n", __func__
,
2421 req
, req
->r_tid
, req
->r_map_dne_bound
, greq
->u
.newest
);
2422 if (!req
->r_map_dne_bound
)
2423 req
->r_map_dne_bound
= greq
->u
.newest
;
2424 erase_request_mc(&osdc
->map_checks
, req
);
2425 check_pool_dne(req
);
2427 ceph_osdc_put_request(req
);
2429 up_write(&osdc
->lock
);
2432 static void send_map_check(struct ceph_osd_request
*req
)
2434 struct ceph_osd_client
*osdc
= req
->r_osdc
;
2435 struct ceph_osd_request
*lookup_req
;
2438 verify_osdc_wrlocked(osdc
);
2440 lookup_req
= lookup_request_mc(&osdc
->map_checks
, req
->r_tid
);
2442 WARN_ON(lookup_req
!= req
);
2446 ceph_osdc_get_request(req
);
2447 insert_request_mc(&osdc
->map_checks
, req
);
2448 ret
= ceph_monc_get_version_async(&osdc
->client
->monc
, "osdmap",
2449 map_check_cb
, req
->r_tid
);
2454 * lingering requests, watch/notify v2 infrastructure
2456 static void linger_release(struct kref
*kref
)
2458 struct ceph_osd_linger_request
*lreq
=
2459 container_of(kref
, struct ceph_osd_linger_request
, kref
);
2461 dout("%s lreq %p reg_req %p ping_req %p\n", __func__
, lreq
,
2462 lreq
->reg_req
, lreq
->ping_req
);
2463 WARN_ON(!RB_EMPTY_NODE(&lreq
->node
));
2464 WARN_ON(!RB_EMPTY_NODE(&lreq
->osdc_node
));
2465 WARN_ON(!RB_EMPTY_NODE(&lreq
->mc_node
));
2466 WARN_ON(!list_empty(&lreq
->scan_item
));
2467 WARN_ON(!list_empty(&lreq
->pending_lworks
));
2471 ceph_osdc_put_request(lreq
->reg_req
);
2473 ceph_osdc_put_request(lreq
->ping_req
);
2474 target_destroy(&lreq
->t
);
2478 static void linger_put(struct ceph_osd_linger_request
*lreq
)
2481 kref_put(&lreq
->kref
, linger_release
);
2484 static struct ceph_osd_linger_request
*
2485 linger_get(struct ceph_osd_linger_request
*lreq
)
2487 kref_get(&lreq
->kref
);
2491 static struct ceph_osd_linger_request
*
2492 linger_alloc(struct ceph_osd_client
*osdc
)
2494 struct ceph_osd_linger_request
*lreq
;
2496 lreq
= kzalloc(sizeof(*lreq
), GFP_NOIO
);
2500 kref_init(&lreq
->kref
);
2501 mutex_init(&lreq
->lock
);
2502 RB_CLEAR_NODE(&lreq
->node
);
2503 RB_CLEAR_NODE(&lreq
->osdc_node
);
2504 RB_CLEAR_NODE(&lreq
->mc_node
);
2505 INIT_LIST_HEAD(&lreq
->scan_item
);
2506 INIT_LIST_HEAD(&lreq
->pending_lworks
);
2507 init_completion(&lreq
->reg_commit_wait
);
2508 init_completion(&lreq
->notify_finish_wait
);
2511 target_init(&lreq
->t
);
2513 dout("%s lreq %p\n", __func__
, lreq
);
2517 DEFINE_RB_INSDEL_FUNCS(linger
, struct ceph_osd_linger_request
, linger_id
, node
)
2518 DEFINE_RB_FUNCS(linger_osdc
, struct ceph_osd_linger_request
, linger_id
, osdc_node
)
2519 DEFINE_RB_FUNCS(linger_mc
, struct ceph_osd_linger_request
, linger_id
, mc_node
)
2522 * Create linger request <-> OSD session relation.
2524 * @lreq has to be registered, @osd may be homeless.
2526 static void link_linger(struct ceph_osd
*osd
,
2527 struct ceph_osd_linger_request
*lreq
)
2529 verify_osd_locked(osd
);
2530 WARN_ON(!lreq
->linger_id
|| lreq
->osd
);
2531 dout("%s osd %p osd%d lreq %p linger_id %llu\n", __func__
, osd
,
2532 osd
->o_osd
, lreq
, lreq
->linger_id
);
2534 if (!osd_homeless(osd
))
2535 __remove_osd_from_lru(osd
);
2537 atomic_inc(&osd
->o_osdc
->num_homeless
);
2540 insert_linger(&osd
->o_linger_requests
, lreq
);
2544 static void unlink_linger(struct ceph_osd
*osd
,
2545 struct ceph_osd_linger_request
*lreq
)
2547 verify_osd_locked(osd
);
2548 WARN_ON(lreq
->osd
!= osd
);
2549 dout("%s osd %p osd%d lreq %p linger_id %llu\n", __func__
, osd
,
2550 osd
->o_osd
, lreq
, lreq
->linger_id
);
2553 erase_linger(&osd
->o_linger_requests
, lreq
);
2556 if (!osd_homeless(osd
))
2557 maybe_move_osd_to_lru(osd
);
2559 atomic_dec(&osd
->o_osdc
->num_homeless
);
2562 static bool __linger_registered(struct ceph_osd_linger_request
*lreq
)
2564 verify_osdc_locked(lreq
->osdc
);
2566 return !RB_EMPTY_NODE(&lreq
->osdc_node
);
2569 static bool linger_registered(struct ceph_osd_linger_request
*lreq
)
2571 struct ceph_osd_client
*osdc
= lreq
->osdc
;
2574 down_read(&osdc
->lock
);
2575 registered
= __linger_registered(lreq
);
2576 up_read(&osdc
->lock
);
2581 static void linger_register(struct ceph_osd_linger_request
*lreq
)
2583 struct ceph_osd_client
*osdc
= lreq
->osdc
;
2585 verify_osdc_wrlocked(osdc
);
2586 WARN_ON(lreq
->linger_id
);
2589 lreq
->linger_id
= ++osdc
->last_linger_id
;
2590 insert_linger_osdc(&osdc
->linger_requests
, lreq
);
2593 static void linger_unregister(struct ceph_osd_linger_request
*lreq
)
2595 struct ceph_osd_client
*osdc
= lreq
->osdc
;
2597 verify_osdc_wrlocked(osdc
);
2599 erase_linger_osdc(&osdc
->linger_requests
, lreq
);
2603 static void cancel_linger_request(struct ceph_osd_request
*req
)
2605 struct ceph_osd_linger_request
*lreq
= req
->r_priv
;
2607 WARN_ON(!req
->r_linger
);
2608 cancel_request(req
);
2612 struct linger_work
{
2613 struct work_struct work
;
2614 struct ceph_osd_linger_request
*lreq
;
2615 struct list_head pending_item
;
2616 unsigned long queued_stamp
;
2622 void *payload
; /* points into @msg front */
2625 struct ceph_msg
*msg
; /* for ceph_msg_put() */
2633 static struct linger_work
*lwork_alloc(struct ceph_osd_linger_request
*lreq
,
2636 struct linger_work
*lwork
;
2638 lwork
= kzalloc(sizeof(*lwork
), GFP_NOIO
);
2642 INIT_WORK(&lwork
->work
, workfn
);
2643 INIT_LIST_HEAD(&lwork
->pending_item
);
2644 lwork
->lreq
= linger_get(lreq
);
2649 static void lwork_free(struct linger_work
*lwork
)
2651 struct ceph_osd_linger_request
*lreq
= lwork
->lreq
;
2653 mutex_lock(&lreq
->lock
);
2654 list_del(&lwork
->pending_item
);
2655 mutex_unlock(&lreq
->lock
);
2661 static void lwork_queue(struct linger_work
*lwork
)
2663 struct ceph_osd_linger_request
*lreq
= lwork
->lreq
;
2664 struct ceph_osd_client
*osdc
= lreq
->osdc
;
2666 verify_lreq_locked(lreq
);
2667 WARN_ON(!list_empty(&lwork
->pending_item
));
2669 lwork
->queued_stamp
= jiffies
;
2670 list_add_tail(&lwork
->pending_item
, &lreq
->pending_lworks
);
2671 queue_work(osdc
->notify_wq
, &lwork
->work
);
2674 static void do_watch_notify(struct work_struct
*w
)
2676 struct linger_work
*lwork
= container_of(w
, struct linger_work
, work
);
2677 struct ceph_osd_linger_request
*lreq
= lwork
->lreq
;
2679 if (!linger_registered(lreq
)) {
2680 dout("%s lreq %p not registered\n", __func__
, lreq
);
2684 WARN_ON(!lreq
->is_watch
);
2685 dout("%s lreq %p notify_id %llu notifier_id %llu payload_len %zu\n",
2686 __func__
, lreq
, lwork
->notify
.notify_id
, lwork
->notify
.notifier_id
,
2687 lwork
->notify
.payload_len
);
2688 lreq
->wcb(lreq
->data
, lwork
->notify
.notify_id
, lreq
->linger_id
,
2689 lwork
->notify
.notifier_id
, lwork
->notify
.payload
,
2690 lwork
->notify
.payload_len
);
2693 ceph_msg_put(lwork
->notify
.msg
);
2697 static void do_watch_error(struct work_struct
*w
)
2699 struct linger_work
*lwork
= container_of(w
, struct linger_work
, work
);
2700 struct ceph_osd_linger_request
*lreq
= lwork
->lreq
;
2702 if (!linger_registered(lreq
)) {
2703 dout("%s lreq %p not registered\n", __func__
, lreq
);
2707 dout("%s lreq %p err %d\n", __func__
, lreq
, lwork
->error
.err
);
2708 lreq
->errcb(lreq
->data
, lreq
->linger_id
, lwork
->error
.err
);
2714 static void queue_watch_error(struct ceph_osd_linger_request
*lreq
)
2716 struct linger_work
*lwork
;
2718 lwork
= lwork_alloc(lreq
, do_watch_error
);
2720 pr_err("failed to allocate error-lwork\n");
2724 lwork
->error
.err
= lreq
->last_error
;
2728 static void linger_reg_commit_complete(struct ceph_osd_linger_request
*lreq
,
2731 if (!completion_done(&lreq
->reg_commit_wait
)) {
2732 lreq
->reg_commit_error
= (result
<= 0 ? result
: 0);
2733 complete_all(&lreq
->reg_commit_wait
);
2737 static void linger_commit_cb(struct ceph_osd_request
*req
)
2739 struct ceph_osd_linger_request
*lreq
= req
->r_priv
;
2741 mutex_lock(&lreq
->lock
);
2742 dout("%s lreq %p linger_id %llu result %d\n", __func__
, lreq
,
2743 lreq
->linger_id
, req
->r_result
);
2744 linger_reg_commit_complete(lreq
, req
->r_result
);
2745 lreq
->committed
= true;
2747 if (!lreq
->is_watch
) {
2748 struct ceph_osd_data
*osd_data
=
2749 osd_req_op_data(req
, 0, notify
, response_data
);
2750 void *p
= page_address(osd_data
->pages
[0]);
2752 WARN_ON(req
->r_ops
[0].op
!= CEPH_OSD_OP_NOTIFY
||
2753 osd_data
->type
!= CEPH_OSD_DATA_TYPE_PAGES
);
2755 /* make note of the notify_id */
2756 if (req
->r_ops
[0].outdata_len
>= sizeof(u64
)) {
2757 lreq
->notify_id
= ceph_decode_64(&p
);
2758 dout("lreq %p notify_id %llu\n", lreq
,
2761 dout("lreq %p no notify_id\n", lreq
);
2765 mutex_unlock(&lreq
->lock
);
2769 static int normalize_watch_error(int err
)
2772 * Translate ENOENT -> ENOTCONN so that a delete->disconnection
2773 * notification and a failure to reconnect because we raced with
2774 * the delete appear the same to the user.
2782 static void linger_reconnect_cb(struct ceph_osd_request
*req
)
2784 struct ceph_osd_linger_request
*lreq
= req
->r_priv
;
2786 mutex_lock(&lreq
->lock
);
2787 dout("%s lreq %p linger_id %llu result %d last_error %d\n", __func__
,
2788 lreq
, lreq
->linger_id
, req
->r_result
, lreq
->last_error
);
2789 if (req
->r_result
< 0) {
2790 if (!lreq
->last_error
) {
2791 lreq
->last_error
= normalize_watch_error(req
->r_result
);
2792 queue_watch_error(lreq
);
2796 mutex_unlock(&lreq
->lock
);
2800 static void send_linger(struct ceph_osd_linger_request
*lreq
)
2802 struct ceph_osd_request
*req
= lreq
->reg_req
;
2803 struct ceph_osd_req_op
*op
= &req
->r_ops
[0];
2805 verify_osdc_wrlocked(req
->r_osdc
);
2806 dout("%s lreq %p linger_id %llu\n", __func__
, lreq
, lreq
->linger_id
);
2809 cancel_linger_request(req
);
2811 request_reinit(req
);
2812 ceph_oid_copy(&req
->r_base_oid
, &lreq
->t
.base_oid
);
2813 ceph_oloc_copy(&req
->r_base_oloc
, &lreq
->t
.base_oloc
);
2814 req
->r_flags
= lreq
->t
.flags
;
2815 req
->r_mtime
= lreq
->mtime
;
2817 mutex_lock(&lreq
->lock
);
2818 if (lreq
->is_watch
&& lreq
->committed
) {
2819 WARN_ON(op
->op
!= CEPH_OSD_OP_WATCH
||
2820 op
->watch
.cookie
!= lreq
->linger_id
);
2821 op
->watch
.op
= CEPH_OSD_WATCH_OP_RECONNECT
;
2822 op
->watch
.gen
= ++lreq
->register_gen
;
2823 dout("lreq %p reconnect register_gen %u\n", lreq
,
2825 req
->r_callback
= linger_reconnect_cb
;
2827 if (!lreq
->is_watch
)
2828 lreq
->notify_id
= 0;
2830 WARN_ON(op
->watch
.op
!= CEPH_OSD_WATCH_OP_WATCH
);
2831 dout("lreq %p register\n", lreq
);
2832 req
->r_callback
= linger_commit_cb
;
2834 mutex_unlock(&lreq
->lock
);
2836 req
->r_priv
= linger_get(lreq
);
2837 req
->r_linger
= true;
2839 submit_request(req
, true);
2842 static void linger_ping_cb(struct ceph_osd_request
*req
)
2844 struct ceph_osd_linger_request
*lreq
= req
->r_priv
;
2846 mutex_lock(&lreq
->lock
);
2847 dout("%s lreq %p linger_id %llu result %d ping_sent %lu last_error %d\n",
2848 __func__
, lreq
, lreq
->linger_id
, req
->r_result
, lreq
->ping_sent
,
2850 if (lreq
->register_gen
== req
->r_ops
[0].watch
.gen
) {
2851 if (!req
->r_result
) {
2852 lreq
->watch_valid_thru
= lreq
->ping_sent
;
2853 } else if (!lreq
->last_error
) {
2854 lreq
->last_error
= normalize_watch_error(req
->r_result
);
2855 queue_watch_error(lreq
);
2858 dout("lreq %p register_gen %u ignoring old pong %u\n", lreq
,
2859 lreq
->register_gen
, req
->r_ops
[0].watch
.gen
);
2862 mutex_unlock(&lreq
->lock
);
2866 static void send_linger_ping(struct ceph_osd_linger_request
*lreq
)
2868 struct ceph_osd_client
*osdc
= lreq
->osdc
;
2869 struct ceph_osd_request
*req
= lreq
->ping_req
;
2870 struct ceph_osd_req_op
*op
= &req
->r_ops
[0];
2872 if (ceph_osdmap_flag(osdc
, CEPH_OSDMAP_PAUSERD
)) {
2873 dout("%s PAUSERD\n", __func__
);
2877 lreq
->ping_sent
= jiffies
;
2878 dout("%s lreq %p linger_id %llu ping_sent %lu register_gen %u\n",
2879 __func__
, lreq
, lreq
->linger_id
, lreq
->ping_sent
,
2880 lreq
->register_gen
);
2883 cancel_linger_request(req
);
2885 request_reinit(req
);
2886 target_copy(&req
->r_t
, &lreq
->t
);
2888 WARN_ON(op
->op
!= CEPH_OSD_OP_WATCH
||
2889 op
->watch
.cookie
!= lreq
->linger_id
||
2890 op
->watch
.op
!= CEPH_OSD_WATCH_OP_PING
);
2891 op
->watch
.gen
= lreq
->register_gen
;
2892 req
->r_callback
= linger_ping_cb
;
2893 req
->r_priv
= linger_get(lreq
);
2894 req
->r_linger
= true;
2896 ceph_osdc_get_request(req
);
2897 account_request(req
);
2898 req
->r_tid
= atomic64_inc_return(&osdc
->last_tid
);
2899 link_request(lreq
->osd
, req
);
2903 static void linger_submit(struct ceph_osd_linger_request
*lreq
)
2905 struct ceph_osd_client
*osdc
= lreq
->osdc
;
2906 struct ceph_osd
*osd
;
2908 calc_target(osdc
, &lreq
->t
, NULL
, false);
2909 osd
= lookup_create_osd(osdc
, lreq
->t
.osd
, true);
2910 link_linger(osd
, lreq
);
2915 static void cancel_linger_map_check(struct ceph_osd_linger_request
*lreq
)
2917 struct ceph_osd_client
*osdc
= lreq
->osdc
;
2918 struct ceph_osd_linger_request
*lookup_lreq
;
2920 verify_osdc_wrlocked(osdc
);
2922 lookup_lreq
= lookup_linger_mc(&osdc
->linger_map_checks
,
2927 WARN_ON(lookup_lreq
!= lreq
);
2928 erase_linger_mc(&osdc
->linger_map_checks
, lreq
);
2933 * @lreq has to be both registered and linked.
2935 static void __linger_cancel(struct ceph_osd_linger_request
*lreq
)
2937 if (lreq
->is_watch
&& lreq
->ping_req
->r_osd
)
2938 cancel_linger_request(lreq
->ping_req
);
2939 if (lreq
->reg_req
->r_osd
)
2940 cancel_linger_request(lreq
->reg_req
);
2941 cancel_linger_map_check(lreq
);
2942 unlink_linger(lreq
->osd
, lreq
);
2943 linger_unregister(lreq
);
2946 static void linger_cancel(struct ceph_osd_linger_request
*lreq
)
2948 struct ceph_osd_client
*osdc
= lreq
->osdc
;
2950 down_write(&osdc
->lock
);
2951 if (__linger_registered(lreq
))
2952 __linger_cancel(lreq
);
2953 up_write(&osdc
->lock
);
2956 static void send_linger_map_check(struct ceph_osd_linger_request
*lreq
);
2958 static void check_linger_pool_dne(struct ceph_osd_linger_request
*lreq
)
2960 struct ceph_osd_client
*osdc
= lreq
->osdc
;
2961 struct ceph_osdmap
*map
= osdc
->osdmap
;
2963 verify_osdc_wrlocked(osdc
);
2964 WARN_ON(!map
->epoch
);
2966 if (lreq
->register_gen
) {
2967 lreq
->map_dne_bound
= map
->epoch
;
2968 dout("%s lreq %p linger_id %llu pool disappeared\n", __func__
,
2969 lreq
, lreq
->linger_id
);
2971 dout("%s lreq %p linger_id %llu map_dne_bound %u have %u\n",
2972 __func__
, lreq
, lreq
->linger_id
, lreq
->map_dne_bound
,
2976 if (lreq
->map_dne_bound
) {
2977 if (map
->epoch
>= lreq
->map_dne_bound
) {
2978 /* we had a new enough map */
2979 pr_info("linger_id %llu pool does not exist\n",
2981 linger_reg_commit_complete(lreq
, -ENOENT
);
2982 __linger_cancel(lreq
);
2985 send_linger_map_check(lreq
);
2989 static void linger_map_check_cb(struct ceph_mon_generic_request
*greq
)
2991 struct ceph_osd_client
*osdc
= &greq
->monc
->client
->osdc
;
2992 struct ceph_osd_linger_request
*lreq
;
2993 u64 linger_id
= greq
->private_data
;
2995 WARN_ON(greq
->result
|| !greq
->u
.newest
);
2997 down_write(&osdc
->lock
);
2998 lreq
= lookup_linger_mc(&osdc
->linger_map_checks
, linger_id
);
3000 dout("%s linger_id %llu dne\n", __func__
, linger_id
);
3004 dout("%s lreq %p linger_id %llu map_dne_bound %u newest %llu\n",
3005 __func__
, lreq
, lreq
->linger_id
, lreq
->map_dne_bound
,
3007 if (!lreq
->map_dne_bound
)
3008 lreq
->map_dne_bound
= greq
->u
.newest
;
3009 erase_linger_mc(&osdc
->linger_map_checks
, lreq
);
3010 check_linger_pool_dne(lreq
);
3014 up_write(&osdc
->lock
);
3017 static void send_linger_map_check(struct ceph_osd_linger_request
*lreq
)
3019 struct ceph_osd_client
*osdc
= lreq
->osdc
;
3020 struct ceph_osd_linger_request
*lookup_lreq
;
3023 verify_osdc_wrlocked(osdc
);
3025 lookup_lreq
= lookup_linger_mc(&osdc
->linger_map_checks
,
3028 WARN_ON(lookup_lreq
!= lreq
);
3033 insert_linger_mc(&osdc
->linger_map_checks
, lreq
);
3034 ret
= ceph_monc_get_version_async(&osdc
->client
->monc
, "osdmap",
3035 linger_map_check_cb
, lreq
->linger_id
);
3039 static int linger_reg_commit_wait(struct ceph_osd_linger_request
*lreq
)
3043 dout("%s lreq %p linger_id %llu\n", __func__
, lreq
, lreq
->linger_id
);
3044 ret
= wait_for_completion_interruptible(&lreq
->reg_commit_wait
);
3045 return ret
?: lreq
->reg_commit_error
;
3048 static int linger_notify_finish_wait(struct ceph_osd_linger_request
*lreq
)
3052 dout("%s lreq %p linger_id %llu\n", __func__
, lreq
, lreq
->linger_id
);
3053 ret
= wait_for_completion_interruptible(&lreq
->notify_finish_wait
);
3054 return ret
?: lreq
->notify_finish_error
;
3058 * Timeout callback, called every N seconds. When 1 or more OSD
3059 * requests has been active for more than N seconds, we send a keepalive
3060 * (tag + timestamp) to its OSD to ensure any communications channel
3061 * reset is detected.
3063 static void handle_timeout(struct work_struct
*work
)
3065 struct ceph_osd_client
*osdc
=
3066 container_of(work
, struct ceph_osd_client
, timeout_work
.work
);
3067 struct ceph_options
*opts
= osdc
->client
->options
;
3068 unsigned long cutoff
= jiffies
- opts
->osd_keepalive_timeout
;
3069 unsigned long expiry_cutoff
= jiffies
- opts
->osd_request_timeout
;
3070 LIST_HEAD(slow_osds
);
3071 struct rb_node
*n
, *p
;
3073 dout("%s osdc %p\n", __func__
, osdc
);
3074 down_write(&osdc
->lock
);
3077 * ping osds that are a bit slow. this ensures that if there
3078 * is a break in the TCP connection we will notice, and reopen
3079 * a connection with that osd (from the fault callback).
3081 for (n
= rb_first(&osdc
->osds
); n
; n
= rb_next(n
)) {
3082 struct ceph_osd
*osd
= rb_entry(n
, struct ceph_osd
, o_node
);
3085 for (p
= rb_first(&osd
->o_requests
); p
; ) {
3086 struct ceph_osd_request
*req
=
3087 rb_entry(p
, struct ceph_osd_request
, r_node
);
3089 p
= rb_next(p
); /* abort_request() */
3091 if (time_before(req
->r_stamp
, cutoff
)) {
3092 dout(" req %p tid %llu on osd%d is laggy\n",
3093 req
, req
->r_tid
, osd
->o_osd
);
3096 if (opts
->osd_request_timeout
&&
3097 time_before(req
->r_start_stamp
, expiry_cutoff
)) {
3098 pr_err_ratelimited("tid %llu on osd%d timeout\n",
3099 req
->r_tid
, osd
->o_osd
);
3100 abort_request(req
, -ETIMEDOUT
);
3103 for (p
= rb_first(&osd
->o_linger_requests
); p
; p
= rb_next(p
)) {
3104 struct ceph_osd_linger_request
*lreq
=
3105 rb_entry(p
, struct ceph_osd_linger_request
, node
);
3107 dout(" lreq %p linger_id %llu is served by osd%d\n",
3108 lreq
, lreq
->linger_id
, osd
->o_osd
);
3111 mutex_lock(&lreq
->lock
);
3112 if (lreq
->is_watch
&& lreq
->committed
&& !lreq
->last_error
)
3113 send_linger_ping(lreq
);
3114 mutex_unlock(&lreq
->lock
);
3118 list_move_tail(&osd
->o_keepalive_item
, &slow_osds
);
3121 if (opts
->osd_request_timeout
) {
3122 for (p
= rb_first(&osdc
->homeless_osd
.o_requests
); p
; ) {
3123 struct ceph_osd_request
*req
=
3124 rb_entry(p
, struct ceph_osd_request
, r_node
);
3126 p
= rb_next(p
); /* abort_request() */
3128 if (time_before(req
->r_start_stamp
, expiry_cutoff
)) {
3129 pr_err_ratelimited("tid %llu on osd%d timeout\n",
3130 req
->r_tid
, osdc
->homeless_osd
.o_osd
);
3131 abort_request(req
, -ETIMEDOUT
);
3136 if (atomic_read(&osdc
->num_homeless
) || !list_empty(&slow_osds
))
3137 maybe_request_map(osdc
);
3139 while (!list_empty(&slow_osds
)) {
3140 struct ceph_osd
*osd
= list_first_entry(&slow_osds
,
3143 list_del_init(&osd
->o_keepalive_item
);
3144 ceph_con_keepalive(&osd
->o_con
);
3147 up_write(&osdc
->lock
);
3148 schedule_delayed_work(&osdc
->timeout_work
,
3149 osdc
->client
->options
->osd_keepalive_timeout
);
3152 static void handle_osds_timeout(struct work_struct
*work
)
3154 struct ceph_osd_client
*osdc
=
3155 container_of(work
, struct ceph_osd_client
,
3156 osds_timeout_work
.work
);
3157 unsigned long delay
= osdc
->client
->options
->osd_idle_ttl
/ 4;
3158 struct ceph_osd
*osd
, *nosd
;
3160 dout("%s osdc %p\n", __func__
, osdc
);
3161 down_write(&osdc
->lock
);
3162 list_for_each_entry_safe(osd
, nosd
, &osdc
->osd_lru
, o_osd_lru
) {
3163 if (time_before(jiffies
, osd
->lru_ttl
))
3166 WARN_ON(!RB_EMPTY_ROOT(&osd
->o_requests
));
3167 WARN_ON(!RB_EMPTY_ROOT(&osd
->o_linger_requests
));
3171 up_write(&osdc
->lock
);
3172 schedule_delayed_work(&osdc
->osds_timeout_work
,
3173 round_jiffies_relative(delay
));
3176 static int ceph_oloc_decode(void **p
, void *end
,
3177 struct ceph_object_locator
*oloc
)
3179 u8 struct_v
, struct_cv
;
3184 ceph_decode_need(p
, end
, 1 + 1 + 4, e_inval
);
3185 struct_v
= ceph_decode_8(p
);
3186 struct_cv
= ceph_decode_8(p
);
3188 pr_warn("got v %d < 3 cv %d of ceph_object_locator\n",
3189 struct_v
, struct_cv
);
3192 if (struct_cv
> 6) {
3193 pr_warn("got v %d cv %d > 6 of ceph_object_locator\n",
3194 struct_v
, struct_cv
);
3197 len
= ceph_decode_32(p
);
3198 ceph_decode_need(p
, end
, len
, e_inval
);
3199 struct_end
= *p
+ len
;
3201 oloc
->pool
= ceph_decode_64(p
);
3202 *p
+= 4; /* skip preferred */
3204 len
= ceph_decode_32(p
);
3206 pr_warn("ceph_object_locator::key is set\n");
3210 if (struct_v
>= 5) {
3211 bool changed
= false;
3213 len
= ceph_decode_32(p
);
3215 ceph_decode_need(p
, end
, len
, e_inval
);
3216 if (!oloc
->pool_ns
||
3217 ceph_compare_string(oloc
->pool_ns
, *p
, len
))
3225 /* redirect changes namespace */
3226 pr_warn("ceph_object_locator::nspace is changed\n");
3231 if (struct_v
>= 6) {
3232 s64 hash
= ceph_decode_64(p
);
3234 pr_warn("ceph_object_locator::hash is set\n");
3249 static int ceph_redirect_decode(void **p
, void *end
,
3250 struct ceph_request_redirect
*redir
)
3252 u8 struct_v
, struct_cv
;
3257 ceph_decode_need(p
, end
, 1 + 1 + 4, e_inval
);
3258 struct_v
= ceph_decode_8(p
);
3259 struct_cv
= ceph_decode_8(p
);
3260 if (struct_cv
> 1) {
3261 pr_warn("got v %d cv %d > 1 of ceph_request_redirect\n",
3262 struct_v
, struct_cv
);
3265 len
= ceph_decode_32(p
);
3266 ceph_decode_need(p
, end
, len
, e_inval
);
3267 struct_end
= *p
+ len
;
3269 ret
= ceph_oloc_decode(p
, end
, &redir
->oloc
);
3273 len
= ceph_decode_32(p
);
3275 pr_warn("ceph_request_redirect::object_name is set\n");
3279 len
= ceph_decode_32(p
);
3280 *p
+= len
; /* skip osd_instructions */
3292 struct MOSDOpReply
{
3293 struct ceph_pg pgid
;
3298 u32 outdata_len
[CEPH_OSD_MAX_OPS
];
3299 s32 rval
[CEPH_OSD_MAX_OPS
];
3301 struct ceph_eversion replay_version
;
3303 struct ceph_request_redirect redirect
;
3306 static int decode_MOSDOpReply(const struct ceph_msg
*msg
, struct MOSDOpReply
*m
)
3308 void *p
= msg
->front
.iov_base
;
3309 void *const end
= p
+ msg
->front
.iov_len
;
3310 u16 version
= le16_to_cpu(msg
->hdr
.version
);
3311 struct ceph_eversion bad_replay_version
;
3317 ceph_decode_32_safe(&p
, end
, len
, e_inval
);
3318 ceph_decode_need(&p
, end
, len
, e_inval
);
3319 p
+= len
; /* skip oid */
3321 ret
= ceph_decode_pgid(&p
, end
, &m
->pgid
);
3325 ceph_decode_64_safe(&p
, end
, m
->flags
, e_inval
);
3326 ceph_decode_32_safe(&p
, end
, m
->result
, e_inval
);
3327 ceph_decode_need(&p
, end
, sizeof(bad_replay_version
), e_inval
);
3328 memcpy(&bad_replay_version
, p
, sizeof(bad_replay_version
));
3329 p
+= sizeof(bad_replay_version
);
3330 ceph_decode_32_safe(&p
, end
, m
->epoch
, e_inval
);
3332 ceph_decode_32_safe(&p
, end
, m
->num_ops
, e_inval
);
3333 if (m
->num_ops
> ARRAY_SIZE(m
->outdata_len
))
3336 ceph_decode_need(&p
, end
, m
->num_ops
* sizeof(struct ceph_osd_op
),
3338 for (i
= 0; i
< m
->num_ops
; i
++) {
3339 struct ceph_osd_op
*op
= p
;
3341 m
->outdata_len
[i
] = le32_to_cpu(op
->payload_len
);
3345 ceph_decode_32_safe(&p
, end
, m
->retry_attempt
, e_inval
);
3346 for (i
= 0; i
< m
->num_ops
; i
++)
3347 ceph_decode_32_safe(&p
, end
, m
->rval
[i
], e_inval
);
3350 ceph_decode_need(&p
, end
, sizeof(m
->replay_version
), e_inval
);
3351 memcpy(&m
->replay_version
, p
, sizeof(m
->replay_version
));
3352 p
+= sizeof(m
->replay_version
);
3353 ceph_decode_64_safe(&p
, end
, m
->user_version
, e_inval
);
3355 m
->replay_version
= bad_replay_version
; /* struct */
3356 m
->user_version
= le64_to_cpu(m
->replay_version
.version
);
3361 ceph_decode_8_safe(&p
, end
, decode_redir
, e_inval
);
3369 ret
= ceph_redirect_decode(&p
, end
, &m
->redirect
);
3373 ceph_oloc_init(&m
->redirect
.oloc
);
3383 * Handle MOSDOpReply. Set ->r_result and call the callback if it is
3386 static void handle_reply(struct ceph_osd
*osd
, struct ceph_msg
*msg
)
3388 struct ceph_osd_client
*osdc
= osd
->o_osdc
;
3389 struct ceph_osd_request
*req
;
3390 struct MOSDOpReply m
;
3391 u64 tid
= le64_to_cpu(msg
->hdr
.tid
);
3396 dout("%s msg %p tid %llu\n", __func__
, msg
, tid
);
3398 down_read(&osdc
->lock
);
3399 if (!osd_registered(osd
)) {
3400 dout("%s osd%d unknown\n", __func__
, osd
->o_osd
);
3401 goto out_unlock_osdc
;
3403 WARN_ON(osd
->o_osd
!= le64_to_cpu(msg
->hdr
.src
.num
));
3405 mutex_lock(&osd
->lock
);
3406 req
= lookup_request(&osd
->o_requests
, tid
);
3408 dout("%s osd%d tid %llu unknown\n", __func__
, osd
->o_osd
, tid
);
3409 goto out_unlock_session
;
3412 m
.redirect
.oloc
.pool_ns
= req
->r_t
.target_oloc
.pool_ns
;
3413 ret
= decode_MOSDOpReply(msg
, &m
);
3414 m
.redirect
.oloc
.pool_ns
= NULL
;
3416 pr_err("failed to decode MOSDOpReply for tid %llu: %d\n",
3421 dout("%s req %p tid %llu flags 0x%llx pgid %llu.%x epoch %u attempt %d v %u'%llu uv %llu\n",
3422 __func__
, req
, req
->r_tid
, m
.flags
, m
.pgid
.pool
, m
.pgid
.seed
,
3423 m
.epoch
, m
.retry_attempt
, le32_to_cpu(m
.replay_version
.epoch
),
3424 le64_to_cpu(m
.replay_version
.version
), m
.user_version
);
3426 if (m
.retry_attempt
>= 0) {
3427 if (m
.retry_attempt
!= req
->r_attempts
- 1) {
3428 dout("req %p tid %llu retry_attempt %d != %d, ignoring\n",
3429 req
, req
->r_tid
, m
.retry_attempt
,
3430 req
->r_attempts
- 1);
3431 goto out_unlock_session
;
3434 WARN_ON(1); /* MOSDOpReply v4 is assumed */
3437 if (!ceph_oloc_empty(&m
.redirect
.oloc
)) {
3438 dout("req %p tid %llu redirect pool %lld\n", req
, req
->r_tid
,
3439 m
.redirect
.oloc
.pool
);
3440 unlink_request(osd
, req
);
3441 mutex_unlock(&osd
->lock
);
3444 * Not ceph_oloc_copy() - changing pool_ns is not
3447 req
->r_t
.target_oloc
.pool
= m
.redirect
.oloc
.pool
;
3448 req
->r_flags
|= CEPH_OSD_FLAG_REDIRECTED
;
3450 __submit_request(req
, false);
3451 goto out_unlock_osdc
;
3454 if (m
.num_ops
!= req
->r_num_ops
) {
3455 pr_err("num_ops %d != %d for tid %llu\n", m
.num_ops
,
3456 req
->r_num_ops
, req
->r_tid
);
3459 for (i
= 0; i
< req
->r_num_ops
; i
++) {
3460 dout(" req %p tid %llu op %d rval %d len %u\n", req
,
3461 req
->r_tid
, i
, m
.rval
[i
], m
.outdata_len
[i
]);
3462 req
->r_ops
[i
].rval
= m
.rval
[i
];
3463 req
->r_ops
[i
].outdata_len
= m
.outdata_len
[i
];
3464 data_len
+= m
.outdata_len
[i
];
3466 if (data_len
!= le32_to_cpu(msg
->hdr
.data_len
)) {
3467 pr_err("sum of lens %u != %u for tid %llu\n", data_len
,
3468 le32_to_cpu(msg
->hdr
.data_len
), req
->r_tid
);
3471 dout("%s req %p tid %llu result %d data_len %u\n", __func__
,
3472 req
, req
->r_tid
, m
.result
, data_len
);
3475 * Since we only ever request ONDISK, we should only ever get
3476 * one (type of) reply back.
3478 WARN_ON(!(m
.flags
& CEPH_OSD_FLAG_ONDISK
));
3479 req
->r_result
= m
.result
?: data_len
;
3480 finish_request(req
);
3481 mutex_unlock(&osd
->lock
);
3482 up_read(&osdc
->lock
);
3484 __complete_request(req
);
3485 complete_all(&req
->r_completion
);
3486 ceph_osdc_put_request(req
);
3490 complete_request(req
, -EIO
);
3492 mutex_unlock(&osd
->lock
);
3494 up_read(&osdc
->lock
);
3497 static void set_pool_was_full(struct ceph_osd_client
*osdc
)
3501 for (n
= rb_first(&osdc
->osdmap
->pg_pools
); n
; n
= rb_next(n
)) {
3502 struct ceph_pg_pool_info
*pi
=
3503 rb_entry(n
, struct ceph_pg_pool_info
, node
);
3505 pi
->was_full
= __pool_full(pi
);
3509 static bool pool_cleared_full(struct ceph_osd_client
*osdc
, s64 pool_id
)
3511 struct ceph_pg_pool_info
*pi
;
3513 pi
= ceph_pg_pool_by_id(osdc
->osdmap
, pool_id
);
3517 return pi
->was_full
&& !__pool_full(pi
);
3520 static enum calc_target_result
3521 recalc_linger_target(struct ceph_osd_linger_request
*lreq
)
3523 struct ceph_osd_client
*osdc
= lreq
->osdc
;
3524 enum calc_target_result ct_res
;
3526 ct_res
= calc_target(osdc
, &lreq
->t
, NULL
, true);
3527 if (ct_res
== CALC_TARGET_NEED_RESEND
) {
3528 struct ceph_osd
*osd
;
3530 osd
= lookup_create_osd(osdc
, lreq
->t
.osd
, true);
3531 if (osd
!= lreq
->osd
) {
3532 unlink_linger(lreq
->osd
, lreq
);
3533 link_linger(osd
, lreq
);
3541 * Requeue requests whose mapping to an OSD has changed.
3543 static void scan_requests(struct ceph_osd
*osd
,
3546 bool check_pool_cleared_full
,
3547 struct rb_root
*need_resend
,
3548 struct list_head
*need_resend_linger
)
3550 struct ceph_osd_client
*osdc
= osd
->o_osdc
;
3552 bool force_resend_writes
;
3554 for (n
= rb_first(&osd
->o_linger_requests
); n
; ) {
3555 struct ceph_osd_linger_request
*lreq
=
3556 rb_entry(n
, struct ceph_osd_linger_request
, node
);
3557 enum calc_target_result ct_res
;
3559 n
= rb_next(n
); /* recalc_linger_target() */
3561 dout("%s lreq %p linger_id %llu\n", __func__
, lreq
,
3563 ct_res
= recalc_linger_target(lreq
);
3565 case CALC_TARGET_NO_ACTION
:
3566 force_resend_writes
= cleared_full
||
3567 (check_pool_cleared_full
&&
3568 pool_cleared_full(osdc
, lreq
->t
.base_oloc
.pool
));
3569 if (!force_resend
&& !force_resend_writes
)
3573 case CALC_TARGET_NEED_RESEND
:
3574 cancel_linger_map_check(lreq
);
3576 * scan_requests() for the previous epoch(s)
3577 * may have already added it to the list, since
3578 * it's not unlinked here.
3580 if (list_empty(&lreq
->scan_item
))
3581 list_add_tail(&lreq
->scan_item
, need_resend_linger
);
3583 case CALC_TARGET_POOL_DNE
:
3584 list_del_init(&lreq
->scan_item
);
3585 check_linger_pool_dne(lreq
);
3590 for (n
= rb_first(&osd
->o_requests
); n
; ) {
3591 struct ceph_osd_request
*req
=
3592 rb_entry(n
, struct ceph_osd_request
, r_node
);
3593 enum calc_target_result ct_res
;
3595 n
= rb_next(n
); /* unlink_request(), check_pool_dne() */
3597 dout("%s req %p tid %llu\n", __func__
, req
, req
->r_tid
);
3598 ct_res
= calc_target(osdc
, &req
->r_t
, &req
->r_osd
->o_con
,
3601 case CALC_TARGET_NO_ACTION
:
3602 force_resend_writes
= cleared_full
||
3603 (check_pool_cleared_full
&&
3604 pool_cleared_full(osdc
, req
->r_t
.base_oloc
.pool
));
3605 if (!force_resend
&&
3606 (!(req
->r_flags
& CEPH_OSD_FLAG_WRITE
) ||
3607 !force_resend_writes
))
3611 case CALC_TARGET_NEED_RESEND
:
3612 cancel_map_check(req
);
3613 unlink_request(osd
, req
);
3614 insert_request(need_resend
, req
);
3616 case CALC_TARGET_POOL_DNE
:
3617 check_pool_dne(req
);
3623 static int handle_one_map(struct ceph_osd_client
*osdc
,
3624 void *p
, void *end
, bool incremental
,
3625 struct rb_root
*need_resend
,
3626 struct list_head
*need_resend_linger
)
3628 struct ceph_osdmap
*newmap
;
3630 bool skipped_map
= false;
3633 was_full
= ceph_osdmap_flag(osdc
, CEPH_OSDMAP_FULL
);
3634 set_pool_was_full(osdc
);
3637 newmap
= osdmap_apply_incremental(&p
, end
, osdc
->osdmap
);
3639 newmap
= ceph_osdmap_decode(&p
, end
);
3641 return PTR_ERR(newmap
);
3643 if (newmap
!= osdc
->osdmap
) {
3645 * Preserve ->was_full before destroying the old map.
3646 * For pools that weren't in the old map, ->was_full
3649 for (n
= rb_first(&newmap
->pg_pools
); n
; n
= rb_next(n
)) {
3650 struct ceph_pg_pool_info
*pi
=
3651 rb_entry(n
, struct ceph_pg_pool_info
, node
);
3652 struct ceph_pg_pool_info
*old_pi
;
3654 old_pi
= ceph_pg_pool_by_id(osdc
->osdmap
, pi
->id
);
3656 pi
->was_full
= old_pi
->was_full
;
3658 WARN_ON(pi
->was_full
);
3661 if (osdc
->osdmap
->epoch
&&
3662 osdc
->osdmap
->epoch
+ 1 < newmap
->epoch
) {
3663 WARN_ON(incremental
);
3667 ceph_osdmap_destroy(osdc
->osdmap
);
3668 osdc
->osdmap
= newmap
;
3671 was_full
&= !ceph_osdmap_flag(osdc
, CEPH_OSDMAP_FULL
);
3672 scan_requests(&osdc
->homeless_osd
, skipped_map
, was_full
, true,
3673 need_resend
, need_resend_linger
);
3675 for (n
= rb_first(&osdc
->osds
); n
; ) {
3676 struct ceph_osd
*osd
= rb_entry(n
, struct ceph_osd
, o_node
);
3678 n
= rb_next(n
); /* close_osd() */
3680 scan_requests(osd
, skipped_map
, was_full
, true, need_resend
,
3681 need_resend_linger
);
3682 if (!ceph_osd_is_up(osdc
->osdmap
, osd
->o_osd
) ||
3683 memcmp(&osd
->o_con
.peer_addr
,
3684 ceph_osd_addr(osdc
->osdmap
, osd
->o_osd
),
3685 sizeof(struct ceph_entity_addr
)))
3692 static void kick_requests(struct ceph_osd_client
*osdc
,
3693 struct rb_root
*need_resend
,
3694 struct list_head
*need_resend_linger
)
3696 struct ceph_osd_linger_request
*lreq
, *nlreq
;
3697 enum calc_target_result ct_res
;
3700 /* make sure need_resend targets reflect latest map */
3701 for (n
= rb_first(need_resend
); n
; ) {
3702 struct ceph_osd_request
*req
=
3703 rb_entry(n
, struct ceph_osd_request
, r_node
);
3707 if (req
->r_t
.epoch
< osdc
->osdmap
->epoch
) {
3708 ct_res
= calc_target(osdc
, &req
->r_t
, NULL
, false);
3709 if (ct_res
== CALC_TARGET_POOL_DNE
) {
3710 erase_request(need_resend
, req
);
3711 check_pool_dne(req
);
3716 for (n
= rb_first(need_resend
); n
; ) {
3717 struct ceph_osd_request
*req
=
3718 rb_entry(n
, struct ceph_osd_request
, r_node
);
3719 struct ceph_osd
*osd
;
3722 erase_request(need_resend
, req
); /* before link_request() */
3724 osd
= lookup_create_osd(osdc
, req
->r_t
.osd
, true);
3725 link_request(osd
, req
);
3726 if (!req
->r_linger
) {
3727 if (!osd_homeless(osd
) && !req
->r_t
.paused
)
3730 cancel_linger_request(req
);
3734 list_for_each_entry_safe(lreq
, nlreq
, need_resend_linger
, scan_item
) {
3735 if (!osd_homeless(lreq
->osd
))
3738 list_del_init(&lreq
->scan_item
);
3743 * Process updated osd map.
3745 * The message contains any number of incremental and full maps, normally
3746 * indicating some sort of topology change in the cluster. Kick requests
3747 * off to different OSDs as needed.
3749 void ceph_osdc_handle_map(struct ceph_osd_client
*osdc
, struct ceph_msg
*msg
)
3751 void *p
= msg
->front
.iov_base
;
3752 void *const end
= p
+ msg
->front
.iov_len
;
3753 u32 nr_maps
, maplen
;
3755 struct ceph_fsid fsid
;
3756 struct rb_root need_resend
= RB_ROOT
;
3757 LIST_HEAD(need_resend_linger
);
3758 bool handled_incremental
= false;
3759 bool was_pauserd
, was_pausewr
;
3760 bool pauserd
, pausewr
;
3763 dout("%s have %u\n", __func__
, osdc
->osdmap
->epoch
);
3764 down_write(&osdc
->lock
);
3767 ceph_decode_need(&p
, end
, sizeof(fsid
), bad
);
3768 ceph_decode_copy(&p
, &fsid
, sizeof(fsid
));
3769 if (ceph_check_fsid(osdc
->client
, &fsid
) < 0)
3772 was_pauserd
= ceph_osdmap_flag(osdc
, CEPH_OSDMAP_PAUSERD
);
3773 was_pausewr
= ceph_osdmap_flag(osdc
, CEPH_OSDMAP_PAUSEWR
) ||
3774 ceph_osdmap_flag(osdc
, CEPH_OSDMAP_FULL
) ||
3775 have_pool_full(osdc
);
3777 /* incremental maps */
3778 ceph_decode_32_safe(&p
, end
, nr_maps
, bad
);
3779 dout(" %d inc maps\n", nr_maps
);
3780 while (nr_maps
> 0) {
3781 ceph_decode_need(&p
, end
, 2*sizeof(u32
), bad
);
3782 epoch
= ceph_decode_32(&p
);
3783 maplen
= ceph_decode_32(&p
);
3784 ceph_decode_need(&p
, end
, maplen
, bad
);
3785 if (osdc
->osdmap
->epoch
&&
3786 osdc
->osdmap
->epoch
+ 1 == epoch
) {
3787 dout("applying incremental map %u len %d\n",
3789 err
= handle_one_map(osdc
, p
, p
+ maplen
, true,
3790 &need_resend
, &need_resend_linger
);
3793 handled_incremental
= true;
3795 dout("ignoring incremental map %u len %d\n",
3801 if (handled_incremental
)
3805 ceph_decode_32_safe(&p
, end
, nr_maps
, bad
);
3806 dout(" %d full maps\n", nr_maps
);
3808 ceph_decode_need(&p
, end
, 2*sizeof(u32
), bad
);
3809 epoch
= ceph_decode_32(&p
);
3810 maplen
= ceph_decode_32(&p
);
3811 ceph_decode_need(&p
, end
, maplen
, bad
);
3813 dout("skipping non-latest full map %u len %d\n",
3815 } else if (osdc
->osdmap
->epoch
>= epoch
) {
3816 dout("skipping full map %u len %d, "
3817 "older than our %u\n", epoch
, maplen
,
3818 osdc
->osdmap
->epoch
);
3820 dout("taking full map %u len %d\n", epoch
, maplen
);
3821 err
= handle_one_map(osdc
, p
, p
+ maplen
, false,
3822 &need_resend
, &need_resend_linger
);
3832 * subscribe to subsequent osdmap updates if full to ensure
3833 * we find out when we are no longer full and stop returning
3836 pauserd
= ceph_osdmap_flag(osdc
, CEPH_OSDMAP_PAUSERD
);
3837 pausewr
= ceph_osdmap_flag(osdc
, CEPH_OSDMAP_PAUSEWR
) ||
3838 ceph_osdmap_flag(osdc
, CEPH_OSDMAP_FULL
) ||
3839 have_pool_full(osdc
);
3840 if (was_pauserd
|| was_pausewr
|| pauserd
|| pausewr
||
3841 osdc
->osdmap
->epoch
< osdc
->epoch_barrier
)
3842 maybe_request_map(osdc
);
3844 kick_requests(osdc
, &need_resend
, &need_resend_linger
);
3846 ceph_osdc_abort_on_full(osdc
);
3847 ceph_monc_got_map(&osdc
->client
->monc
, CEPH_SUB_OSDMAP
,
3848 osdc
->osdmap
->epoch
);
3849 up_write(&osdc
->lock
);
3850 wake_up_all(&osdc
->client
->auth_wq
);
3854 pr_err("osdc handle_map corrupt msg\n");
3856 up_write(&osdc
->lock
);
3860 * Resubmit requests pending on the given osd.
3862 static void kick_osd_requests(struct ceph_osd
*osd
)
3866 clear_backoffs(osd
);
3868 for (n
= rb_first(&osd
->o_requests
); n
; ) {
3869 struct ceph_osd_request
*req
=
3870 rb_entry(n
, struct ceph_osd_request
, r_node
);
3872 n
= rb_next(n
); /* cancel_linger_request() */
3874 if (!req
->r_linger
) {
3875 if (!req
->r_t
.paused
)
3878 cancel_linger_request(req
);
3881 for (n
= rb_first(&osd
->o_linger_requests
); n
; n
= rb_next(n
)) {
3882 struct ceph_osd_linger_request
*lreq
=
3883 rb_entry(n
, struct ceph_osd_linger_request
, node
);
3890 * If the osd connection drops, we need to resubmit all requests.
3892 static void osd_fault(struct ceph_connection
*con
)
3894 struct ceph_osd
*osd
= con
->private;
3895 struct ceph_osd_client
*osdc
= osd
->o_osdc
;
3897 dout("%s osd %p osd%d\n", __func__
, osd
, osd
->o_osd
);
3899 down_write(&osdc
->lock
);
3900 if (!osd_registered(osd
)) {
3901 dout("%s osd%d unknown\n", __func__
, osd
->o_osd
);
3905 if (!reopen_osd(osd
))
3906 kick_osd_requests(osd
);
3907 maybe_request_map(osdc
);
3910 up_write(&osdc
->lock
);
3913 struct MOSDBackoff
{
3914 struct ceph_spg spgid
;
3918 struct ceph_hobject_id
*begin
;
3919 struct ceph_hobject_id
*end
;
3922 static int decode_MOSDBackoff(const struct ceph_msg
*msg
, struct MOSDBackoff
*m
)
3924 void *p
= msg
->front
.iov_base
;
3925 void *const end
= p
+ msg
->front
.iov_len
;
3930 ret
= ceph_start_decoding(&p
, end
, 1, "spg_t", &struct_v
, &struct_len
);
3934 ret
= ceph_decode_pgid(&p
, end
, &m
->spgid
.pgid
);
3938 ceph_decode_8_safe(&p
, end
, m
->spgid
.shard
, e_inval
);
3939 ceph_decode_32_safe(&p
, end
, m
->map_epoch
, e_inval
);
3940 ceph_decode_8_safe(&p
, end
, m
->op
, e_inval
);
3941 ceph_decode_64_safe(&p
, end
, m
->id
, e_inval
);
3943 m
->begin
= kzalloc(sizeof(*m
->begin
), GFP_NOIO
);
3947 ret
= decode_hoid(&p
, end
, m
->begin
);
3949 free_hoid(m
->begin
);
3953 m
->end
= kzalloc(sizeof(*m
->end
), GFP_NOIO
);
3955 free_hoid(m
->begin
);
3959 ret
= decode_hoid(&p
, end
, m
->end
);
3961 free_hoid(m
->begin
);
3972 static struct ceph_msg
*create_backoff_message(
3973 const struct ceph_osd_backoff
*backoff
,
3976 struct ceph_msg
*msg
;
3980 msg_size
= CEPH_ENCODING_START_BLK_LEN
+
3981 CEPH_PGID_ENCODING_LEN
+ 1; /* spgid */
3982 msg_size
+= 4 + 1 + 8; /* map_epoch, op, id */
3983 msg_size
+= CEPH_ENCODING_START_BLK_LEN
+
3984 hoid_encoding_size(backoff
->begin
);
3985 msg_size
+= CEPH_ENCODING_START_BLK_LEN
+
3986 hoid_encoding_size(backoff
->end
);
3988 msg
= ceph_msg_new(CEPH_MSG_OSD_BACKOFF
, msg_size
, GFP_NOIO
, true);
3992 p
= msg
->front
.iov_base
;
3993 end
= p
+ msg
->front_alloc_len
;
3995 encode_spgid(&p
, &backoff
->spgid
);
3996 ceph_encode_32(&p
, map_epoch
);
3997 ceph_encode_8(&p
, CEPH_OSD_BACKOFF_OP_ACK_BLOCK
);
3998 ceph_encode_64(&p
, backoff
->id
);
3999 encode_hoid(&p
, end
, backoff
->begin
);
4000 encode_hoid(&p
, end
, backoff
->end
);
4003 msg
->front
.iov_len
= p
- msg
->front
.iov_base
;
4004 msg
->hdr
.version
= cpu_to_le16(1); /* MOSDBackoff v1 */
4005 msg
->hdr
.front_len
= cpu_to_le32(msg
->front
.iov_len
);
4010 static void handle_backoff_block(struct ceph_osd
*osd
, struct MOSDBackoff
*m
)
4012 struct ceph_spg_mapping
*spg
;
4013 struct ceph_osd_backoff
*backoff
;
4014 struct ceph_msg
*msg
;
4016 dout("%s osd%d spgid %llu.%xs%d id %llu\n", __func__
, osd
->o_osd
,
4017 m
->spgid
.pgid
.pool
, m
->spgid
.pgid
.seed
, m
->spgid
.shard
, m
->id
);
4019 spg
= lookup_spg_mapping(&osd
->o_backoff_mappings
, &m
->spgid
);
4021 spg
= alloc_spg_mapping();
4023 pr_err("%s failed to allocate spg\n", __func__
);
4026 spg
->spgid
= m
->spgid
; /* struct */
4027 insert_spg_mapping(&osd
->o_backoff_mappings
, spg
);
4030 backoff
= alloc_backoff();
4032 pr_err("%s failed to allocate backoff\n", __func__
);
4035 backoff
->spgid
= m
->spgid
; /* struct */
4036 backoff
->id
= m
->id
;
4037 backoff
->begin
= m
->begin
;
4038 m
->begin
= NULL
; /* backoff now owns this */
4039 backoff
->end
= m
->end
;
4040 m
->end
= NULL
; /* ditto */
4042 insert_backoff(&spg
->backoffs
, backoff
);
4043 insert_backoff_by_id(&osd
->o_backoffs_by_id
, backoff
);
4046 * Ack with original backoff's epoch so that the OSD can
4047 * discard this if there was a PG split.
4049 msg
= create_backoff_message(backoff
, m
->map_epoch
);
4051 pr_err("%s failed to allocate msg\n", __func__
);
4054 ceph_con_send(&osd
->o_con
, msg
);
4057 static bool target_contained_by(const struct ceph_osd_request_target
*t
,
4058 const struct ceph_hobject_id
*begin
,
4059 const struct ceph_hobject_id
*end
)
4061 struct ceph_hobject_id hoid
;
4064 hoid_fill_from_target(&hoid
, t
);
4065 cmp
= hoid_compare(&hoid
, begin
);
4066 return !cmp
|| (cmp
> 0 && hoid_compare(&hoid
, end
) < 0);
4069 static void handle_backoff_unblock(struct ceph_osd
*osd
,
4070 const struct MOSDBackoff
*m
)
4072 struct ceph_spg_mapping
*spg
;
4073 struct ceph_osd_backoff
*backoff
;
4076 dout("%s osd%d spgid %llu.%xs%d id %llu\n", __func__
, osd
->o_osd
,
4077 m
->spgid
.pgid
.pool
, m
->spgid
.pgid
.seed
, m
->spgid
.shard
, m
->id
);
4079 backoff
= lookup_backoff_by_id(&osd
->o_backoffs_by_id
, m
->id
);
4081 pr_err("%s osd%d spgid %llu.%xs%d id %llu backoff dne\n",
4082 __func__
, osd
->o_osd
, m
->spgid
.pgid
.pool
,
4083 m
->spgid
.pgid
.seed
, m
->spgid
.shard
, m
->id
);
4087 if (hoid_compare(backoff
->begin
, m
->begin
) &&
4088 hoid_compare(backoff
->end
, m
->end
)) {
4089 pr_err("%s osd%d spgid %llu.%xs%d id %llu bad range?\n",
4090 __func__
, osd
->o_osd
, m
->spgid
.pgid
.pool
,
4091 m
->spgid
.pgid
.seed
, m
->spgid
.shard
, m
->id
);
4092 /* unblock it anyway... */
4095 spg
= lookup_spg_mapping(&osd
->o_backoff_mappings
, &backoff
->spgid
);
4098 erase_backoff(&spg
->backoffs
, backoff
);
4099 erase_backoff_by_id(&osd
->o_backoffs_by_id
, backoff
);
4100 free_backoff(backoff
);
4102 if (RB_EMPTY_ROOT(&spg
->backoffs
)) {
4103 erase_spg_mapping(&osd
->o_backoff_mappings
, spg
);
4104 free_spg_mapping(spg
);
4107 for (n
= rb_first(&osd
->o_requests
); n
; n
= rb_next(n
)) {
4108 struct ceph_osd_request
*req
=
4109 rb_entry(n
, struct ceph_osd_request
, r_node
);
4111 if (!ceph_spg_compare(&req
->r_t
.spgid
, &m
->spgid
)) {
4113 * Match against @m, not @backoff -- the PG may
4114 * have split on the OSD.
4116 if (target_contained_by(&req
->r_t
, m
->begin
, m
->end
)) {
4118 * If no other installed backoff applies,
4127 static void handle_backoff(struct ceph_osd
*osd
, struct ceph_msg
*msg
)
4129 struct ceph_osd_client
*osdc
= osd
->o_osdc
;
4130 struct MOSDBackoff m
;
4133 down_read(&osdc
->lock
);
4134 if (!osd_registered(osd
)) {
4135 dout("%s osd%d unknown\n", __func__
, osd
->o_osd
);
4136 up_read(&osdc
->lock
);
4139 WARN_ON(osd
->o_osd
!= le64_to_cpu(msg
->hdr
.src
.num
));
4141 mutex_lock(&osd
->lock
);
4142 ret
= decode_MOSDBackoff(msg
, &m
);
4144 pr_err("failed to decode MOSDBackoff: %d\n", ret
);
4150 case CEPH_OSD_BACKOFF_OP_BLOCK
:
4151 handle_backoff_block(osd
, &m
);
4153 case CEPH_OSD_BACKOFF_OP_UNBLOCK
:
4154 handle_backoff_unblock(osd
, &m
);
4157 pr_err("%s osd%d unknown op %d\n", __func__
, osd
->o_osd
, m
.op
);
4164 mutex_unlock(&osd
->lock
);
4165 up_read(&osdc
->lock
);
4169 * Process osd watch notifications
4171 static void handle_watch_notify(struct ceph_osd_client
*osdc
,
4172 struct ceph_msg
*msg
)
4174 void *p
= msg
->front
.iov_base
;
4175 void *const end
= p
+ msg
->front
.iov_len
;
4176 struct ceph_osd_linger_request
*lreq
;
4177 struct linger_work
*lwork
;
4178 u8 proto_ver
, opcode
;
4179 u64 cookie
, notify_id
;
4180 u64 notifier_id
= 0;
4181 s32 return_code
= 0;
4182 void *payload
= NULL
;
4183 u32 payload_len
= 0;
4185 ceph_decode_8_safe(&p
, end
, proto_ver
, bad
);
4186 ceph_decode_8_safe(&p
, end
, opcode
, bad
);
4187 ceph_decode_64_safe(&p
, end
, cookie
, bad
);
4188 p
+= 8; /* skip ver */
4189 ceph_decode_64_safe(&p
, end
, notify_id
, bad
);
4191 if (proto_ver
>= 1) {
4192 ceph_decode_32_safe(&p
, end
, payload_len
, bad
);
4193 ceph_decode_need(&p
, end
, payload_len
, bad
);
4198 if (le16_to_cpu(msg
->hdr
.version
) >= 2)
4199 ceph_decode_32_safe(&p
, end
, return_code
, bad
);
4201 if (le16_to_cpu(msg
->hdr
.version
) >= 3)
4202 ceph_decode_64_safe(&p
, end
, notifier_id
, bad
);
4204 down_read(&osdc
->lock
);
4205 lreq
= lookup_linger_osdc(&osdc
->linger_requests
, cookie
);
4207 dout("%s opcode %d cookie %llu dne\n", __func__
, opcode
,
4209 goto out_unlock_osdc
;
4212 mutex_lock(&lreq
->lock
);
4213 dout("%s opcode %d cookie %llu lreq %p is_watch %d\n", __func__
,
4214 opcode
, cookie
, lreq
, lreq
->is_watch
);
4215 if (opcode
== CEPH_WATCH_EVENT_DISCONNECT
) {
4216 if (!lreq
->last_error
) {
4217 lreq
->last_error
= -ENOTCONN
;
4218 queue_watch_error(lreq
);
4220 } else if (!lreq
->is_watch
) {
4221 /* CEPH_WATCH_EVENT_NOTIFY_COMPLETE */
4222 if (lreq
->notify_id
&& lreq
->notify_id
!= notify_id
) {
4223 dout("lreq %p notify_id %llu != %llu, ignoring\n", lreq
,
4224 lreq
->notify_id
, notify_id
);
4225 } else if (!completion_done(&lreq
->notify_finish_wait
)) {
4226 struct ceph_msg_data
*data
=
4227 list_first_entry_or_null(&msg
->data
,
4228 struct ceph_msg_data
,
4232 if (lreq
->preply_pages
) {
4233 WARN_ON(data
->type
!=
4234 CEPH_MSG_DATA_PAGES
);
4235 *lreq
->preply_pages
= data
->pages
;
4236 *lreq
->preply_len
= data
->length
;
4238 ceph_release_page_vector(data
->pages
,
4239 calc_pages_for(0, data
->length
));
4242 lreq
->notify_finish_error
= return_code
;
4243 complete_all(&lreq
->notify_finish_wait
);
4246 /* CEPH_WATCH_EVENT_NOTIFY */
4247 lwork
= lwork_alloc(lreq
, do_watch_notify
);
4249 pr_err("failed to allocate notify-lwork\n");
4250 goto out_unlock_lreq
;
4253 lwork
->notify
.notify_id
= notify_id
;
4254 lwork
->notify
.notifier_id
= notifier_id
;
4255 lwork
->notify
.payload
= payload
;
4256 lwork
->notify
.payload_len
= payload_len
;
4257 lwork
->notify
.msg
= ceph_msg_get(msg
);
4262 mutex_unlock(&lreq
->lock
);
4264 up_read(&osdc
->lock
);
4268 pr_err("osdc handle_watch_notify corrupt msg\n");
4272 * Register request, send initial attempt.
4274 int ceph_osdc_start_request(struct ceph_osd_client
*osdc
,
4275 struct ceph_osd_request
*req
,
4278 down_read(&osdc
->lock
);
4279 submit_request(req
, false);
4280 up_read(&osdc
->lock
);
4284 EXPORT_SYMBOL(ceph_osdc_start_request
);
4287 * Unregister a registered request. The request is not completed:
4288 * ->r_result isn't set and __complete_request() isn't called.
4290 void ceph_osdc_cancel_request(struct ceph_osd_request
*req
)
4292 struct ceph_osd_client
*osdc
= req
->r_osdc
;
4294 down_write(&osdc
->lock
);
4296 cancel_request(req
);
4297 up_write(&osdc
->lock
);
4299 EXPORT_SYMBOL(ceph_osdc_cancel_request
);
4302 * @timeout: in jiffies, 0 means "wait forever"
4304 static int wait_request_timeout(struct ceph_osd_request
*req
,
4305 unsigned long timeout
)
4309 dout("%s req %p tid %llu\n", __func__
, req
, req
->r_tid
);
4310 left
= wait_for_completion_killable_timeout(&req
->r_completion
,
4311 ceph_timeout_jiffies(timeout
));
4313 left
= left
?: -ETIMEDOUT
;
4314 ceph_osdc_cancel_request(req
);
4316 left
= req
->r_result
; /* completed */
4323 * wait for a request to complete
4325 int ceph_osdc_wait_request(struct ceph_osd_client
*osdc
,
4326 struct ceph_osd_request
*req
)
4328 return wait_request_timeout(req
, 0);
4330 EXPORT_SYMBOL(ceph_osdc_wait_request
);
4333 * sync - wait for all in-flight requests to flush. avoid starvation.
4335 void ceph_osdc_sync(struct ceph_osd_client
*osdc
)
4337 struct rb_node
*n
, *p
;
4338 u64 last_tid
= atomic64_read(&osdc
->last_tid
);
4341 down_read(&osdc
->lock
);
4342 for (n
= rb_first(&osdc
->osds
); n
; n
= rb_next(n
)) {
4343 struct ceph_osd
*osd
= rb_entry(n
, struct ceph_osd
, o_node
);
4345 mutex_lock(&osd
->lock
);
4346 for (p
= rb_first(&osd
->o_requests
); p
; p
= rb_next(p
)) {
4347 struct ceph_osd_request
*req
=
4348 rb_entry(p
, struct ceph_osd_request
, r_node
);
4350 if (req
->r_tid
> last_tid
)
4353 if (!(req
->r_flags
& CEPH_OSD_FLAG_WRITE
))
4356 ceph_osdc_get_request(req
);
4357 mutex_unlock(&osd
->lock
);
4358 up_read(&osdc
->lock
);
4359 dout("%s waiting on req %p tid %llu last_tid %llu\n",
4360 __func__
, req
, req
->r_tid
, last_tid
);
4361 wait_for_completion(&req
->r_completion
);
4362 ceph_osdc_put_request(req
);
4366 mutex_unlock(&osd
->lock
);
4369 up_read(&osdc
->lock
);
4370 dout("%s done last_tid %llu\n", __func__
, last_tid
);
4372 EXPORT_SYMBOL(ceph_osdc_sync
);
4374 static struct ceph_osd_request
*
4375 alloc_linger_request(struct ceph_osd_linger_request
*lreq
)
4377 struct ceph_osd_request
*req
;
4379 req
= ceph_osdc_alloc_request(lreq
->osdc
, NULL
, 1, false, GFP_NOIO
);
4383 ceph_oid_copy(&req
->r_base_oid
, &lreq
->t
.base_oid
);
4384 ceph_oloc_copy(&req
->r_base_oloc
, &lreq
->t
.base_oloc
);
4386 if (ceph_osdc_alloc_messages(req
, GFP_NOIO
)) {
4387 ceph_osdc_put_request(req
);
4395 * Returns a handle, caller owns a ref.
4397 struct ceph_osd_linger_request
*
4398 ceph_osdc_watch(struct ceph_osd_client
*osdc
,
4399 struct ceph_object_id
*oid
,
4400 struct ceph_object_locator
*oloc
,
4401 rados_watchcb2_t wcb
,
4402 rados_watcherrcb_t errcb
,
4405 struct ceph_osd_linger_request
*lreq
;
4408 lreq
= linger_alloc(osdc
);
4410 return ERR_PTR(-ENOMEM
);
4412 lreq
->is_watch
= true;
4414 lreq
->errcb
= errcb
;
4416 lreq
->watch_valid_thru
= jiffies
;
4418 ceph_oid_copy(&lreq
->t
.base_oid
, oid
);
4419 ceph_oloc_copy(&lreq
->t
.base_oloc
, oloc
);
4420 lreq
->t
.flags
= CEPH_OSD_FLAG_WRITE
;
4421 ktime_get_real_ts(&lreq
->mtime
);
4423 lreq
->reg_req
= alloc_linger_request(lreq
);
4424 if (!lreq
->reg_req
) {
4429 lreq
->ping_req
= alloc_linger_request(lreq
);
4430 if (!lreq
->ping_req
) {
4435 down_write(&osdc
->lock
);
4436 linger_register(lreq
); /* before osd_req_op_* */
4437 osd_req_op_watch_init(lreq
->reg_req
, 0, lreq
->linger_id
,
4438 CEPH_OSD_WATCH_OP_WATCH
);
4439 osd_req_op_watch_init(lreq
->ping_req
, 0, lreq
->linger_id
,
4440 CEPH_OSD_WATCH_OP_PING
);
4441 linger_submit(lreq
);
4442 up_write(&osdc
->lock
);
4444 ret
= linger_reg_commit_wait(lreq
);
4446 linger_cancel(lreq
);
4454 return ERR_PTR(ret
);
4456 EXPORT_SYMBOL(ceph_osdc_watch
);
4461 * Times out after mount_timeout to preserve rbd unmap behaviour
4462 * introduced in 2894e1d76974 ("rbd: timeout watch teardown on unmap
4463 * with mount_timeout").
4465 int ceph_osdc_unwatch(struct ceph_osd_client
*osdc
,
4466 struct ceph_osd_linger_request
*lreq
)
4468 struct ceph_options
*opts
= osdc
->client
->options
;
4469 struct ceph_osd_request
*req
;
4472 req
= ceph_osdc_alloc_request(osdc
, NULL
, 1, false, GFP_NOIO
);
4476 ceph_oid_copy(&req
->r_base_oid
, &lreq
->t
.base_oid
);
4477 ceph_oloc_copy(&req
->r_base_oloc
, &lreq
->t
.base_oloc
);
4478 req
->r_flags
= CEPH_OSD_FLAG_WRITE
;
4479 ktime_get_real_ts(&req
->r_mtime
);
4480 osd_req_op_watch_init(req
, 0, lreq
->linger_id
,
4481 CEPH_OSD_WATCH_OP_UNWATCH
);
4483 ret
= ceph_osdc_alloc_messages(req
, GFP_NOIO
);
4487 ceph_osdc_start_request(osdc
, req
, false);
4488 linger_cancel(lreq
);
4490 ret
= wait_request_timeout(req
, opts
->mount_timeout
);
4493 ceph_osdc_put_request(req
);
4496 EXPORT_SYMBOL(ceph_osdc_unwatch
);
4498 static int osd_req_op_notify_ack_init(struct ceph_osd_request
*req
, int which
,
4499 u64 notify_id
, u64 cookie
, void *payload
,
4502 struct ceph_osd_req_op
*op
;
4503 struct ceph_pagelist
*pl
;
4506 op
= _osd_req_op_init(req
, which
, CEPH_OSD_OP_NOTIFY_ACK
, 0);
4508 pl
= kmalloc(sizeof(*pl
), GFP_NOIO
);
4512 ceph_pagelist_init(pl
);
4513 ret
= ceph_pagelist_encode_64(pl
, notify_id
);
4514 ret
|= ceph_pagelist_encode_64(pl
, cookie
);
4516 ret
|= ceph_pagelist_encode_32(pl
, payload_len
);
4517 ret
|= ceph_pagelist_append(pl
, payload
, payload_len
);
4519 ret
|= ceph_pagelist_encode_32(pl
, 0);
4522 ceph_pagelist_release(pl
);
4526 ceph_osd_data_pagelist_init(&op
->notify_ack
.request_data
, pl
);
4527 op
->indata_len
= pl
->length
;
4531 int ceph_osdc_notify_ack(struct ceph_osd_client
*osdc
,
4532 struct ceph_object_id
*oid
,
4533 struct ceph_object_locator
*oloc
,
4539 struct ceph_osd_request
*req
;
4542 req
= ceph_osdc_alloc_request(osdc
, NULL
, 1, false, GFP_NOIO
);
4546 ceph_oid_copy(&req
->r_base_oid
, oid
);
4547 ceph_oloc_copy(&req
->r_base_oloc
, oloc
);
4548 req
->r_flags
= CEPH_OSD_FLAG_READ
;
4550 ret
= ceph_osdc_alloc_messages(req
, GFP_NOIO
);
4554 ret
= osd_req_op_notify_ack_init(req
, 0, notify_id
, cookie
, payload
,
4559 ceph_osdc_start_request(osdc
, req
, false);
4560 ret
= ceph_osdc_wait_request(osdc
, req
);
4563 ceph_osdc_put_request(req
);
4566 EXPORT_SYMBOL(ceph_osdc_notify_ack
);
4568 static int osd_req_op_notify_init(struct ceph_osd_request
*req
, int which
,
4569 u64 cookie
, u32 prot_ver
, u32 timeout
,
4570 void *payload
, size_t payload_len
)
4572 struct ceph_osd_req_op
*op
;
4573 struct ceph_pagelist
*pl
;
4576 op
= _osd_req_op_init(req
, which
, CEPH_OSD_OP_NOTIFY
, 0);
4577 op
->notify
.cookie
= cookie
;
4579 pl
= kmalloc(sizeof(*pl
), GFP_NOIO
);
4583 ceph_pagelist_init(pl
);
4584 ret
= ceph_pagelist_encode_32(pl
, 1); /* prot_ver */
4585 ret
|= ceph_pagelist_encode_32(pl
, timeout
);
4586 ret
|= ceph_pagelist_encode_32(pl
, payload_len
);
4587 ret
|= ceph_pagelist_append(pl
, payload
, payload_len
);
4589 ceph_pagelist_release(pl
);
4593 ceph_osd_data_pagelist_init(&op
->notify
.request_data
, pl
);
4594 op
->indata_len
= pl
->length
;
4599 * @timeout: in seconds
4601 * @preply_{pages,len} are initialized both on success and error.
4602 * The caller is responsible for:
4604 * ceph_release_page_vector(reply_pages, calc_pages_for(0, reply_len))
4606 int ceph_osdc_notify(struct ceph_osd_client
*osdc
,
4607 struct ceph_object_id
*oid
,
4608 struct ceph_object_locator
*oloc
,
4612 struct page
***preply_pages
,
4615 struct ceph_osd_linger_request
*lreq
;
4616 struct page
**pages
;
4621 *preply_pages
= NULL
;
4625 lreq
= linger_alloc(osdc
);
4629 lreq
->preply_pages
= preply_pages
;
4630 lreq
->preply_len
= preply_len
;
4632 ceph_oid_copy(&lreq
->t
.base_oid
, oid
);
4633 ceph_oloc_copy(&lreq
->t
.base_oloc
, oloc
);
4634 lreq
->t
.flags
= CEPH_OSD_FLAG_READ
;
4636 lreq
->reg_req
= alloc_linger_request(lreq
);
4637 if (!lreq
->reg_req
) {
4643 pages
= ceph_alloc_page_vector(1, GFP_NOIO
);
4644 if (IS_ERR(pages
)) {
4645 ret
= PTR_ERR(pages
);
4649 down_write(&osdc
->lock
);
4650 linger_register(lreq
); /* before osd_req_op_* */
4651 ret
= osd_req_op_notify_init(lreq
->reg_req
, 0, lreq
->linger_id
, 1,
4652 timeout
, payload
, payload_len
);
4654 linger_unregister(lreq
);
4655 up_write(&osdc
->lock
);
4656 ceph_release_page_vector(pages
, 1);
4659 ceph_osd_data_pages_init(osd_req_op_data(lreq
->reg_req
, 0, notify
,
4661 pages
, PAGE_SIZE
, 0, false, true);
4662 linger_submit(lreq
);
4663 up_write(&osdc
->lock
);
4665 ret
= linger_reg_commit_wait(lreq
);
4667 ret
= linger_notify_finish_wait(lreq
);
4669 dout("lreq %p failed to initiate notify %d\n", lreq
, ret
);
4671 linger_cancel(lreq
);
4676 EXPORT_SYMBOL(ceph_osdc_notify
);
4679 * Return the number of milliseconds since the watch was last
4680 * confirmed, or an error. If there is an error, the watch is no
4681 * longer valid, and should be destroyed with ceph_osdc_unwatch().
4683 int ceph_osdc_watch_check(struct ceph_osd_client
*osdc
,
4684 struct ceph_osd_linger_request
*lreq
)
4686 unsigned long stamp
, age
;
4689 down_read(&osdc
->lock
);
4690 mutex_lock(&lreq
->lock
);
4691 stamp
= lreq
->watch_valid_thru
;
4692 if (!list_empty(&lreq
->pending_lworks
)) {
4693 struct linger_work
*lwork
=
4694 list_first_entry(&lreq
->pending_lworks
,
4698 if (time_before(lwork
->queued_stamp
, stamp
))
4699 stamp
= lwork
->queued_stamp
;
4701 age
= jiffies
- stamp
;
4702 dout("%s lreq %p linger_id %llu age %lu last_error %d\n", __func__
,
4703 lreq
, lreq
->linger_id
, age
, lreq
->last_error
);
4704 /* we are truncating to msecs, so return a safe upper bound */
4705 ret
= lreq
->last_error
?: 1 + jiffies_to_msecs(age
);
4707 mutex_unlock(&lreq
->lock
);
4708 up_read(&osdc
->lock
);
4712 static int decode_watcher(void **p
, void *end
, struct ceph_watch_item
*item
)
4718 ret
= ceph_start_decoding(p
, end
, 2, "watch_item_t",
4719 &struct_v
, &struct_len
);
4723 ceph_decode_copy(p
, &item
->name
, sizeof(item
->name
));
4724 item
->cookie
= ceph_decode_64(p
);
4725 *p
+= 4; /* skip timeout_seconds */
4726 if (struct_v
>= 2) {
4727 ceph_decode_copy(p
, &item
->addr
, sizeof(item
->addr
));
4728 ceph_decode_addr(&item
->addr
);
4731 dout("%s %s%llu cookie %llu addr %s\n", __func__
,
4732 ENTITY_NAME(item
->name
), item
->cookie
,
4733 ceph_pr_addr(&item
->addr
.in_addr
));
4737 static int decode_watchers(void **p
, void *end
,
4738 struct ceph_watch_item
**watchers
,
4746 ret
= ceph_start_decoding(p
, end
, 1, "obj_list_watch_response_t",
4747 &struct_v
, &struct_len
);
4751 *num_watchers
= ceph_decode_32(p
);
4752 *watchers
= kcalloc(*num_watchers
, sizeof(**watchers
), GFP_NOIO
);
4756 for (i
= 0; i
< *num_watchers
; i
++) {
4757 ret
= decode_watcher(p
, end
, *watchers
+ i
);
4768 * On success, the caller is responsible for:
4772 int ceph_osdc_list_watchers(struct ceph_osd_client
*osdc
,
4773 struct ceph_object_id
*oid
,
4774 struct ceph_object_locator
*oloc
,
4775 struct ceph_watch_item
**watchers
,
4778 struct ceph_osd_request
*req
;
4779 struct page
**pages
;
4782 req
= ceph_osdc_alloc_request(osdc
, NULL
, 1, false, GFP_NOIO
);
4786 ceph_oid_copy(&req
->r_base_oid
, oid
);
4787 ceph_oloc_copy(&req
->r_base_oloc
, oloc
);
4788 req
->r_flags
= CEPH_OSD_FLAG_READ
;
4790 ret
= ceph_osdc_alloc_messages(req
, GFP_NOIO
);
4794 pages
= ceph_alloc_page_vector(1, GFP_NOIO
);
4795 if (IS_ERR(pages
)) {
4796 ret
= PTR_ERR(pages
);
4800 osd_req_op_init(req
, 0, CEPH_OSD_OP_LIST_WATCHERS
, 0);
4801 ceph_osd_data_pages_init(osd_req_op_data(req
, 0, list_watchers
,
4803 pages
, PAGE_SIZE
, 0, false, true);
4805 ceph_osdc_start_request(osdc
, req
, false);
4806 ret
= ceph_osdc_wait_request(osdc
, req
);
4808 void *p
= page_address(pages
[0]);
4809 void *const end
= p
+ req
->r_ops
[0].outdata_len
;
4811 ret
= decode_watchers(&p
, end
, watchers
, num_watchers
);
4815 ceph_osdc_put_request(req
);
4818 EXPORT_SYMBOL(ceph_osdc_list_watchers
);
4821 * Call all pending notify callbacks - for use after a watch is
4822 * unregistered, to make sure no more callbacks for it will be invoked
4824 void ceph_osdc_flush_notifies(struct ceph_osd_client
*osdc
)
4826 dout("%s osdc %p\n", __func__
, osdc
);
4827 flush_workqueue(osdc
->notify_wq
);
4829 EXPORT_SYMBOL(ceph_osdc_flush_notifies
);
4831 void ceph_osdc_maybe_request_map(struct ceph_osd_client
*osdc
)
4833 down_read(&osdc
->lock
);
4834 maybe_request_map(osdc
);
4835 up_read(&osdc
->lock
);
4837 EXPORT_SYMBOL(ceph_osdc_maybe_request_map
);
4840 * Execute an OSD class method on an object.
4842 * @flags: CEPH_OSD_FLAG_*
4843 * @resp_len: in/out param for reply length
4845 int ceph_osdc_call(struct ceph_osd_client
*osdc
,
4846 struct ceph_object_id
*oid
,
4847 struct ceph_object_locator
*oloc
,
4848 const char *class, const char *method
,
4850 struct page
*req_page
, size_t req_len
,
4851 struct page
*resp_page
, size_t *resp_len
)
4853 struct ceph_osd_request
*req
;
4856 if (req_len
> PAGE_SIZE
|| (resp_page
&& *resp_len
> PAGE_SIZE
))
4859 req
= ceph_osdc_alloc_request(osdc
, NULL
, 1, false, GFP_NOIO
);
4863 ceph_oid_copy(&req
->r_base_oid
, oid
);
4864 ceph_oloc_copy(&req
->r_base_oloc
, oloc
);
4865 req
->r_flags
= flags
;
4867 ret
= ceph_osdc_alloc_messages(req
, GFP_NOIO
);
4871 osd_req_op_cls_init(req
, 0, CEPH_OSD_OP_CALL
, class, method
);
4873 osd_req_op_cls_request_data_pages(req
, 0, &req_page
, req_len
,
4876 osd_req_op_cls_response_data_pages(req
, 0, &resp_page
,
4877 *resp_len
, 0, false, false);
4879 ceph_osdc_start_request(osdc
, req
, false);
4880 ret
= ceph_osdc_wait_request(osdc
, req
);
4882 ret
= req
->r_ops
[0].rval
;
4884 *resp_len
= req
->r_ops
[0].outdata_len
;
4888 ceph_osdc_put_request(req
);
4891 EXPORT_SYMBOL(ceph_osdc_call
);
4896 int ceph_osdc_init(struct ceph_osd_client
*osdc
, struct ceph_client
*client
)
4901 osdc
->client
= client
;
4902 init_rwsem(&osdc
->lock
);
4903 osdc
->osds
= RB_ROOT
;
4904 INIT_LIST_HEAD(&osdc
->osd_lru
);
4905 spin_lock_init(&osdc
->osd_lru_lock
);
4906 osd_init(&osdc
->homeless_osd
);
4907 osdc
->homeless_osd
.o_osdc
= osdc
;
4908 osdc
->homeless_osd
.o_osd
= CEPH_HOMELESS_OSD
;
4909 osdc
->last_linger_id
= CEPH_LINGER_ID_START
;
4910 osdc
->linger_requests
= RB_ROOT
;
4911 osdc
->map_checks
= RB_ROOT
;
4912 osdc
->linger_map_checks
= RB_ROOT
;
4913 INIT_DELAYED_WORK(&osdc
->timeout_work
, handle_timeout
);
4914 INIT_DELAYED_WORK(&osdc
->osds_timeout_work
, handle_osds_timeout
);
4917 osdc
->osdmap
= ceph_osdmap_alloc();
4921 osdc
->req_mempool
= mempool_create_slab_pool(10,
4922 ceph_osd_request_cache
);
4923 if (!osdc
->req_mempool
)
4926 err
= ceph_msgpool_init(&osdc
->msgpool_op
, CEPH_MSG_OSD_OP
,
4927 PAGE_SIZE
, 10, true, "osd_op");
4930 err
= ceph_msgpool_init(&osdc
->msgpool_op_reply
, CEPH_MSG_OSD_OPREPLY
,
4931 PAGE_SIZE
, 10, true, "osd_op_reply");
4936 osdc
->notify_wq
= create_singlethread_workqueue("ceph-watch-notify");
4937 if (!osdc
->notify_wq
)
4938 goto out_msgpool_reply
;
4940 schedule_delayed_work(&osdc
->timeout_work
,
4941 osdc
->client
->options
->osd_keepalive_timeout
);
4942 schedule_delayed_work(&osdc
->osds_timeout_work
,
4943 round_jiffies_relative(osdc
->client
->options
->osd_idle_ttl
));
4948 ceph_msgpool_destroy(&osdc
->msgpool_op_reply
);
4950 ceph_msgpool_destroy(&osdc
->msgpool_op
);
4952 mempool_destroy(osdc
->req_mempool
);
4954 ceph_osdmap_destroy(osdc
->osdmap
);
4959 void ceph_osdc_stop(struct ceph_osd_client
*osdc
)
4961 flush_workqueue(osdc
->notify_wq
);
4962 destroy_workqueue(osdc
->notify_wq
);
4963 cancel_delayed_work_sync(&osdc
->timeout_work
);
4964 cancel_delayed_work_sync(&osdc
->osds_timeout_work
);
4966 down_write(&osdc
->lock
);
4967 while (!RB_EMPTY_ROOT(&osdc
->osds
)) {
4968 struct ceph_osd
*osd
= rb_entry(rb_first(&osdc
->osds
),
4969 struct ceph_osd
, o_node
);
4972 up_write(&osdc
->lock
);
4973 WARN_ON(refcount_read(&osdc
->homeless_osd
.o_ref
) != 1);
4974 osd_cleanup(&osdc
->homeless_osd
);
4976 WARN_ON(!list_empty(&osdc
->osd_lru
));
4977 WARN_ON(!RB_EMPTY_ROOT(&osdc
->linger_requests
));
4978 WARN_ON(!RB_EMPTY_ROOT(&osdc
->map_checks
));
4979 WARN_ON(!RB_EMPTY_ROOT(&osdc
->linger_map_checks
));
4980 WARN_ON(atomic_read(&osdc
->num_requests
));
4981 WARN_ON(atomic_read(&osdc
->num_homeless
));
4983 ceph_osdmap_destroy(osdc
->osdmap
);
4984 mempool_destroy(osdc
->req_mempool
);
4985 ceph_msgpool_destroy(&osdc
->msgpool_op
);
4986 ceph_msgpool_destroy(&osdc
->msgpool_op_reply
);
4990 * Read some contiguous pages. If we cross a stripe boundary, shorten
4991 * *plen. Return number of bytes read, or error.
4993 int ceph_osdc_readpages(struct ceph_osd_client
*osdc
,
4994 struct ceph_vino vino
, struct ceph_file_layout
*layout
,
4996 u32 truncate_seq
, u64 truncate_size
,
4997 struct page
**pages
, int num_pages
, int page_align
)
4999 struct ceph_osd_request
*req
;
5002 dout("readpages on ino %llx.%llx on %llu~%llu\n", vino
.ino
,
5003 vino
.snap
, off
, *plen
);
5004 req
= ceph_osdc_new_request(osdc
, layout
, vino
, off
, plen
, 0, 1,
5005 CEPH_OSD_OP_READ
, CEPH_OSD_FLAG_READ
,
5006 NULL
, truncate_seq
, truncate_size
,
5009 return PTR_ERR(req
);
5011 /* it may be a short read due to an object boundary */
5012 osd_req_op_extent_osd_data_pages(req
, 0,
5013 pages
, *plen
, page_align
, false, false);
5015 dout("readpages final extent is %llu~%llu (%llu bytes align %d)\n",
5016 off
, *plen
, *plen
, page_align
);
5018 rc
= ceph_osdc_start_request(osdc
, req
, false);
5020 rc
= ceph_osdc_wait_request(osdc
, req
);
5022 ceph_osdc_put_request(req
);
5023 dout("readpages result %d\n", rc
);
5026 EXPORT_SYMBOL(ceph_osdc_readpages
);
5029 * do a synchronous write on N pages
5031 int ceph_osdc_writepages(struct ceph_osd_client
*osdc
, struct ceph_vino vino
,
5032 struct ceph_file_layout
*layout
,
5033 struct ceph_snap_context
*snapc
,
5035 u32 truncate_seq
, u64 truncate_size
,
5036 struct timespec
*mtime
,
5037 struct page
**pages
, int num_pages
)
5039 struct ceph_osd_request
*req
;
5041 int page_align
= off
& ~PAGE_MASK
;
5043 req
= ceph_osdc_new_request(osdc
, layout
, vino
, off
, &len
, 0, 1,
5044 CEPH_OSD_OP_WRITE
, CEPH_OSD_FLAG_WRITE
,
5045 snapc
, truncate_seq
, truncate_size
,
5048 return PTR_ERR(req
);
5050 /* it may be a short write due to an object boundary */
5051 osd_req_op_extent_osd_data_pages(req
, 0, pages
, len
, page_align
,
5053 dout("writepages %llu~%llu (%llu bytes)\n", off
, len
, len
);
5055 req
->r_mtime
= *mtime
;
5056 rc
= ceph_osdc_start_request(osdc
, req
, true);
5058 rc
= ceph_osdc_wait_request(osdc
, req
);
5060 ceph_osdc_put_request(req
);
5063 dout("writepages result %d\n", rc
);
5066 EXPORT_SYMBOL(ceph_osdc_writepages
);
5068 int ceph_osdc_setup(void)
5070 size_t size
= sizeof(struct ceph_osd_request
) +
5071 CEPH_OSD_SLAB_OPS
* sizeof(struct ceph_osd_req_op
);
5073 BUG_ON(ceph_osd_request_cache
);
5074 ceph_osd_request_cache
= kmem_cache_create("ceph_osd_request", size
,
5077 return ceph_osd_request_cache
? 0 : -ENOMEM
;
5079 EXPORT_SYMBOL(ceph_osdc_setup
);
5081 void ceph_osdc_cleanup(void)
5083 BUG_ON(!ceph_osd_request_cache
);
5084 kmem_cache_destroy(ceph_osd_request_cache
);
5085 ceph_osd_request_cache
= NULL
;
5087 EXPORT_SYMBOL(ceph_osdc_cleanup
);
5090 * handle incoming message
5092 static void dispatch(struct ceph_connection
*con
, struct ceph_msg
*msg
)
5094 struct ceph_osd
*osd
= con
->private;
5095 struct ceph_osd_client
*osdc
= osd
->o_osdc
;
5096 int type
= le16_to_cpu(msg
->hdr
.type
);
5099 case CEPH_MSG_OSD_MAP
:
5100 ceph_osdc_handle_map(osdc
, msg
);
5102 case CEPH_MSG_OSD_OPREPLY
:
5103 handle_reply(osd
, msg
);
5105 case CEPH_MSG_OSD_BACKOFF
:
5106 handle_backoff(osd
, msg
);
5108 case CEPH_MSG_WATCH_NOTIFY
:
5109 handle_watch_notify(osdc
, msg
);
5113 pr_err("received unknown message type %d %s\n", type
,
5114 ceph_msg_type_name(type
));
5121 * Lookup and return message for incoming reply. Don't try to do
5122 * anything about a larger than preallocated data portion of the
5123 * message at the moment - for now, just skip the message.
5125 static struct ceph_msg
*get_reply(struct ceph_connection
*con
,
5126 struct ceph_msg_header
*hdr
,
5129 struct ceph_osd
*osd
= con
->private;
5130 struct ceph_osd_client
*osdc
= osd
->o_osdc
;
5131 struct ceph_msg
*m
= NULL
;
5132 struct ceph_osd_request
*req
;
5133 int front_len
= le32_to_cpu(hdr
->front_len
);
5134 int data_len
= le32_to_cpu(hdr
->data_len
);
5135 u64 tid
= le64_to_cpu(hdr
->tid
);
5137 down_read(&osdc
->lock
);
5138 if (!osd_registered(osd
)) {
5139 dout("%s osd%d unknown, skipping\n", __func__
, osd
->o_osd
);
5141 goto out_unlock_osdc
;
5143 WARN_ON(osd
->o_osd
!= le64_to_cpu(hdr
->src
.num
));
5145 mutex_lock(&osd
->lock
);
5146 req
= lookup_request(&osd
->o_requests
, tid
);
5148 dout("%s osd%d tid %llu unknown, skipping\n", __func__
,
5151 goto out_unlock_session
;
5154 ceph_msg_revoke_incoming(req
->r_reply
);
5156 if (front_len
> req
->r_reply
->front_alloc_len
) {
5157 pr_warn("%s osd%d tid %llu front %d > preallocated %d\n",
5158 __func__
, osd
->o_osd
, req
->r_tid
, front_len
,
5159 req
->r_reply
->front_alloc_len
);
5160 m
= ceph_msg_new(CEPH_MSG_OSD_OPREPLY
, front_len
, GFP_NOFS
,
5163 goto out_unlock_session
;
5164 ceph_msg_put(req
->r_reply
);
5168 if (data_len
> req
->r_reply
->data_length
) {
5169 pr_warn("%s osd%d tid %llu data %d > preallocated %zu, skipping\n",
5170 __func__
, osd
->o_osd
, req
->r_tid
, data_len
,
5171 req
->r_reply
->data_length
);
5174 goto out_unlock_session
;
5177 m
= ceph_msg_get(req
->r_reply
);
5178 dout("get_reply tid %lld %p\n", tid
, m
);
5181 mutex_unlock(&osd
->lock
);
5183 up_read(&osdc
->lock
);
5188 * TODO: switch to a msg-owned pagelist
5190 static struct ceph_msg
*alloc_msg_with_page_vector(struct ceph_msg_header
*hdr
)
5193 int type
= le16_to_cpu(hdr
->type
);
5194 u32 front_len
= le32_to_cpu(hdr
->front_len
);
5195 u32 data_len
= le32_to_cpu(hdr
->data_len
);
5197 m
= ceph_msg_new(type
, front_len
, GFP_NOIO
, false);
5202 struct page
**pages
;
5203 struct ceph_osd_data osd_data
;
5205 pages
= ceph_alloc_page_vector(calc_pages_for(0, data_len
),
5207 if (IS_ERR(pages
)) {
5212 ceph_osd_data_pages_init(&osd_data
, pages
, data_len
, 0, false,
5214 ceph_osdc_msg_data_add(m
, &osd_data
);
5220 static struct ceph_msg
*alloc_msg(struct ceph_connection
*con
,
5221 struct ceph_msg_header
*hdr
,
5224 struct ceph_osd
*osd
= con
->private;
5225 int type
= le16_to_cpu(hdr
->type
);
5229 case CEPH_MSG_OSD_MAP
:
5230 case CEPH_MSG_OSD_BACKOFF
:
5231 case CEPH_MSG_WATCH_NOTIFY
:
5232 return alloc_msg_with_page_vector(hdr
);
5233 case CEPH_MSG_OSD_OPREPLY
:
5234 return get_reply(con
, hdr
, skip
);
5236 pr_warn("%s osd%d unknown msg type %d, skipping\n", __func__
,
5244 * Wrappers to refcount containing ceph_osd struct
5246 static struct ceph_connection
*get_osd_con(struct ceph_connection
*con
)
5248 struct ceph_osd
*osd
= con
->private;
5254 static void put_osd_con(struct ceph_connection
*con
)
5256 struct ceph_osd
*osd
= con
->private;
5264 * Note: returned pointer is the address of a structure that's
5265 * managed separately. Caller must *not* attempt to free it.
5267 static struct ceph_auth_handshake
*get_authorizer(struct ceph_connection
*con
,
5268 int *proto
, int force_new
)
5270 struct ceph_osd
*o
= con
->private;
5271 struct ceph_osd_client
*osdc
= o
->o_osdc
;
5272 struct ceph_auth_client
*ac
= osdc
->client
->monc
.auth
;
5273 struct ceph_auth_handshake
*auth
= &o
->o_auth
;
5275 if (force_new
&& auth
->authorizer
) {
5276 ceph_auth_destroy_authorizer(auth
->authorizer
);
5277 auth
->authorizer
= NULL
;
5279 if (!auth
->authorizer
) {
5280 int ret
= ceph_auth_create_authorizer(ac
, CEPH_ENTITY_TYPE_OSD
,
5283 return ERR_PTR(ret
);
5285 int ret
= ceph_auth_update_authorizer(ac
, CEPH_ENTITY_TYPE_OSD
,
5288 return ERR_PTR(ret
);
5290 *proto
= ac
->protocol
;
5296 static int verify_authorizer_reply(struct ceph_connection
*con
)
5298 struct ceph_osd
*o
= con
->private;
5299 struct ceph_osd_client
*osdc
= o
->o_osdc
;
5300 struct ceph_auth_client
*ac
= osdc
->client
->monc
.auth
;
5302 return ceph_auth_verify_authorizer_reply(ac
, o
->o_auth
.authorizer
);
5305 static int invalidate_authorizer(struct ceph_connection
*con
)
5307 struct ceph_osd
*o
= con
->private;
5308 struct ceph_osd_client
*osdc
= o
->o_osdc
;
5309 struct ceph_auth_client
*ac
= osdc
->client
->monc
.auth
;
5311 ceph_auth_invalidate_authorizer(ac
, CEPH_ENTITY_TYPE_OSD
);
5312 return ceph_monc_validate_auth(&osdc
->client
->monc
);
5315 static void osd_reencode_message(struct ceph_msg
*msg
)
5317 int type
= le16_to_cpu(msg
->hdr
.type
);
5319 if (type
== CEPH_MSG_OSD_OP
)
5320 encode_request_finish(msg
);
5323 static int osd_sign_message(struct ceph_msg
*msg
)
5325 struct ceph_osd
*o
= msg
->con
->private;
5326 struct ceph_auth_handshake
*auth
= &o
->o_auth
;
5328 return ceph_auth_sign_message(auth
, msg
);
5331 static int osd_check_message_signature(struct ceph_msg
*msg
)
5333 struct ceph_osd
*o
= msg
->con
->private;
5334 struct ceph_auth_handshake
*auth
= &o
->o_auth
;
5336 return ceph_auth_check_message_signature(auth
, msg
);
5339 static const struct ceph_connection_operations osd_con_ops
= {
5342 .dispatch
= dispatch
,
5343 .get_authorizer
= get_authorizer
,
5344 .verify_authorizer_reply
= verify_authorizer_reply
,
5345 .invalidate_authorizer
= invalidate_authorizer
,
5346 .alloc_msg
= alloc_msg
,
5347 .reencode_message
= osd_reencode_message
,
5348 .sign_message
= osd_sign_message
,
5349 .check_message_signature
= osd_check_message_signature
,