1 // SPDX-License-Identifier: GPL-2.0
3 #include <linux/ceph/ceph_debug.h>
5 #include <linux/module.h>
7 #include <linux/highmem.h>
9 #include <linux/pagemap.h>
10 #include <linux/slab.h>
11 #include <linux/uaccess.h>
13 #include <linux/bio.h>
16 #include <linux/ceph/ceph_features.h>
17 #include <linux/ceph/libceph.h>
18 #include <linux/ceph/osd_client.h>
19 #include <linux/ceph/messenger.h>
20 #include <linux/ceph/decode.h>
21 #include <linux/ceph/auth.h>
22 #include <linux/ceph/pagelist.h>
23 #include <linux/ceph/striper.h>
25 #define OSD_OPREPLY_FRONT_LEN 512
27 static struct kmem_cache
*ceph_osd_request_cache
;
29 static const struct ceph_connection_operations osd_con_ops
;
32 * Implement client access to distributed object storage cluster.
34 * All data objects are stored within a cluster/cloud of OSDs, or
35 * "object storage devices." (Note that Ceph OSDs have _nothing_ to
36 * do with the T10 OSD extensions to SCSI.) Ceph OSDs are simply
37 * remote daemons serving up and coordinating consistent and safe
40 * Cluster membership and the mapping of data objects onto storage devices
41 * are described by the osd map.
43 * We keep track of pending OSD requests (read, write), resubmit
44 * requests to different OSDs when the cluster topology/data layout
45 * change, or retry the affected requests when the communications
46 * channel with an OSD is reset.
49 static void link_request(struct ceph_osd
*osd
, struct ceph_osd_request
*req
);
50 static void unlink_request(struct ceph_osd
*osd
, struct ceph_osd_request
*req
);
51 static void link_linger(struct ceph_osd
*osd
,
52 struct ceph_osd_linger_request
*lreq
);
53 static void unlink_linger(struct ceph_osd
*osd
,
54 struct ceph_osd_linger_request
*lreq
);
55 static void clear_backoffs(struct ceph_osd
*osd
);
58 static inline bool rwsem_is_wrlocked(struct rw_semaphore
*sem
)
62 if (unlikely(down_read_trylock(sem
))) {
69 static inline void verify_osdc_locked(struct ceph_osd_client
*osdc
)
71 WARN_ON(!rwsem_is_locked(&osdc
->lock
));
73 static inline void verify_osdc_wrlocked(struct ceph_osd_client
*osdc
)
75 WARN_ON(!rwsem_is_wrlocked(&osdc
->lock
));
77 static inline void verify_osd_locked(struct ceph_osd
*osd
)
79 struct ceph_osd_client
*osdc
= osd
->o_osdc
;
81 WARN_ON(!(mutex_is_locked(&osd
->lock
) &&
82 rwsem_is_locked(&osdc
->lock
)) &&
83 !rwsem_is_wrlocked(&osdc
->lock
));
85 static inline void verify_lreq_locked(struct ceph_osd_linger_request
*lreq
)
87 WARN_ON(!mutex_is_locked(&lreq
->lock
));
90 static inline void verify_osdc_locked(struct ceph_osd_client
*osdc
) { }
91 static inline void verify_osdc_wrlocked(struct ceph_osd_client
*osdc
) { }
92 static inline void verify_osd_locked(struct ceph_osd
*osd
) { }
93 static inline void verify_lreq_locked(struct ceph_osd_linger_request
*lreq
) { }
97 * calculate the mapping of a file extent onto an object, and fill out the
98 * request accordingly. shorten extent as necessary if it crosses an
101 * fill osd op in request message.
103 static int calc_layout(struct ceph_file_layout
*layout
, u64 off
, u64
*plen
,
104 u64
*objnum
, u64
*objoff
, u64
*objlen
)
106 u64 orig_len
= *plen
;
110 ceph_calc_file_object_mapping(layout
, off
, orig_len
, objnum
,
113 if (*objlen
< orig_len
) {
115 dout(" skipping last %llu, final file extent %llu~%llu\n",
116 orig_len
- *plen
, off
, *plen
);
119 dout("calc_layout objnum=%llx %llu~%llu\n", *objnum
, *objoff
, *objlen
);
123 static void ceph_osd_data_init(struct ceph_osd_data
*osd_data
)
125 memset(osd_data
, 0, sizeof (*osd_data
));
126 osd_data
->type
= CEPH_OSD_DATA_TYPE_NONE
;
130 * Consumes @pages if @own_pages is true.
132 static void ceph_osd_data_pages_init(struct ceph_osd_data
*osd_data
,
133 struct page
**pages
, u64 length
, u32 alignment
,
134 bool pages_from_pool
, bool own_pages
)
136 osd_data
->type
= CEPH_OSD_DATA_TYPE_PAGES
;
137 osd_data
->pages
= pages
;
138 osd_data
->length
= length
;
139 osd_data
->alignment
= alignment
;
140 osd_data
->pages_from_pool
= pages_from_pool
;
141 osd_data
->own_pages
= own_pages
;
145 * Consumes a ref on @pagelist.
147 static void ceph_osd_data_pagelist_init(struct ceph_osd_data
*osd_data
,
148 struct ceph_pagelist
*pagelist
)
150 osd_data
->type
= CEPH_OSD_DATA_TYPE_PAGELIST
;
151 osd_data
->pagelist
= pagelist
;
155 static void ceph_osd_data_bio_init(struct ceph_osd_data
*osd_data
,
156 struct ceph_bio_iter
*bio_pos
,
159 osd_data
->type
= CEPH_OSD_DATA_TYPE_BIO
;
160 osd_data
->bio_pos
= *bio_pos
;
161 osd_data
->bio_length
= bio_length
;
163 #endif /* CONFIG_BLOCK */
165 static void ceph_osd_data_bvecs_init(struct ceph_osd_data
*osd_data
,
166 struct ceph_bvec_iter
*bvec_pos
,
169 osd_data
->type
= CEPH_OSD_DATA_TYPE_BVECS
;
170 osd_data
->bvec_pos
= *bvec_pos
;
171 osd_data
->num_bvecs
= num_bvecs
;
174 #define osd_req_op_data(oreq, whch, typ, fld) \
176 struct ceph_osd_request *__oreq = (oreq); \
177 unsigned int __whch = (whch); \
178 BUG_ON(__whch >= __oreq->r_num_ops); \
179 &__oreq->r_ops[__whch].typ.fld; \
182 static struct ceph_osd_data
*
183 osd_req_op_raw_data_in(struct ceph_osd_request
*osd_req
, unsigned int which
)
185 BUG_ON(which
>= osd_req
->r_num_ops
);
187 return &osd_req
->r_ops
[which
].raw_data_in
;
190 struct ceph_osd_data
*
191 osd_req_op_extent_osd_data(struct ceph_osd_request
*osd_req
,
194 return osd_req_op_data(osd_req
, which
, extent
, osd_data
);
196 EXPORT_SYMBOL(osd_req_op_extent_osd_data
);
198 void osd_req_op_raw_data_in_pages(struct ceph_osd_request
*osd_req
,
199 unsigned int which
, struct page
**pages
,
200 u64 length
, u32 alignment
,
201 bool pages_from_pool
, bool own_pages
)
203 struct ceph_osd_data
*osd_data
;
205 osd_data
= osd_req_op_raw_data_in(osd_req
, which
);
206 ceph_osd_data_pages_init(osd_data
, pages
, length
, alignment
,
207 pages_from_pool
, own_pages
);
209 EXPORT_SYMBOL(osd_req_op_raw_data_in_pages
);
211 void osd_req_op_extent_osd_data_pages(struct ceph_osd_request
*osd_req
,
212 unsigned int which
, struct page
**pages
,
213 u64 length
, u32 alignment
,
214 bool pages_from_pool
, bool own_pages
)
216 struct ceph_osd_data
*osd_data
;
218 osd_data
= osd_req_op_data(osd_req
, which
, extent
, osd_data
);
219 ceph_osd_data_pages_init(osd_data
, pages
, length
, alignment
,
220 pages_from_pool
, own_pages
);
222 EXPORT_SYMBOL(osd_req_op_extent_osd_data_pages
);
224 void osd_req_op_extent_osd_data_pagelist(struct ceph_osd_request
*osd_req
,
225 unsigned int which
, struct ceph_pagelist
*pagelist
)
227 struct ceph_osd_data
*osd_data
;
229 osd_data
= osd_req_op_data(osd_req
, which
, extent
, osd_data
);
230 ceph_osd_data_pagelist_init(osd_data
, pagelist
);
232 EXPORT_SYMBOL(osd_req_op_extent_osd_data_pagelist
);
235 void osd_req_op_extent_osd_data_bio(struct ceph_osd_request
*osd_req
,
237 struct ceph_bio_iter
*bio_pos
,
240 struct ceph_osd_data
*osd_data
;
242 osd_data
= osd_req_op_data(osd_req
, which
, extent
, osd_data
);
243 ceph_osd_data_bio_init(osd_data
, bio_pos
, bio_length
);
245 EXPORT_SYMBOL(osd_req_op_extent_osd_data_bio
);
246 #endif /* CONFIG_BLOCK */
248 void osd_req_op_extent_osd_data_bvecs(struct ceph_osd_request
*osd_req
,
250 struct bio_vec
*bvecs
, u32 num_bvecs
,
253 struct ceph_osd_data
*osd_data
;
254 struct ceph_bvec_iter it
= {
256 .iter
= { .bi_size
= bytes
},
259 osd_data
= osd_req_op_data(osd_req
, which
, extent
, osd_data
);
260 ceph_osd_data_bvecs_init(osd_data
, &it
, num_bvecs
);
262 EXPORT_SYMBOL(osd_req_op_extent_osd_data_bvecs
);
264 void osd_req_op_extent_osd_data_bvec_pos(struct ceph_osd_request
*osd_req
,
266 struct ceph_bvec_iter
*bvec_pos
)
268 struct ceph_osd_data
*osd_data
;
270 osd_data
= osd_req_op_data(osd_req
, which
, extent
, osd_data
);
271 ceph_osd_data_bvecs_init(osd_data
, bvec_pos
, 0);
273 EXPORT_SYMBOL(osd_req_op_extent_osd_data_bvec_pos
);
275 static void osd_req_op_cls_request_info_pagelist(
276 struct ceph_osd_request
*osd_req
,
277 unsigned int which
, struct ceph_pagelist
*pagelist
)
279 struct ceph_osd_data
*osd_data
;
281 osd_data
= osd_req_op_data(osd_req
, which
, cls
, request_info
);
282 ceph_osd_data_pagelist_init(osd_data
, pagelist
);
285 void osd_req_op_cls_request_data_pagelist(
286 struct ceph_osd_request
*osd_req
,
287 unsigned int which
, struct ceph_pagelist
*pagelist
)
289 struct ceph_osd_data
*osd_data
;
291 osd_data
= osd_req_op_data(osd_req
, which
, cls
, request_data
);
292 ceph_osd_data_pagelist_init(osd_data
, pagelist
);
293 osd_req
->r_ops
[which
].cls
.indata_len
+= pagelist
->length
;
294 osd_req
->r_ops
[which
].indata_len
+= pagelist
->length
;
296 EXPORT_SYMBOL(osd_req_op_cls_request_data_pagelist
);
298 void osd_req_op_cls_request_data_pages(struct ceph_osd_request
*osd_req
,
299 unsigned int which
, struct page
**pages
, u64 length
,
300 u32 alignment
, bool pages_from_pool
, bool own_pages
)
302 struct ceph_osd_data
*osd_data
;
304 osd_data
= osd_req_op_data(osd_req
, which
, cls
, request_data
);
305 ceph_osd_data_pages_init(osd_data
, pages
, length
, alignment
,
306 pages_from_pool
, own_pages
);
307 osd_req
->r_ops
[which
].cls
.indata_len
+= length
;
308 osd_req
->r_ops
[which
].indata_len
+= length
;
310 EXPORT_SYMBOL(osd_req_op_cls_request_data_pages
);
312 void osd_req_op_cls_request_data_bvecs(struct ceph_osd_request
*osd_req
,
314 struct bio_vec
*bvecs
, u32 num_bvecs
,
317 struct ceph_osd_data
*osd_data
;
318 struct ceph_bvec_iter it
= {
320 .iter
= { .bi_size
= bytes
},
323 osd_data
= osd_req_op_data(osd_req
, which
, cls
, request_data
);
324 ceph_osd_data_bvecs_init(osd_data
, &it
, num_bvecs
);
325 osd_req
->r_ops
[which
].cls
.indata_len
+= bytes
;
326 osd_req
->r_ops
[which
].indata_len
+= bytes
;
328 EXPORT_SYMBOL(osd_req_op_cls_request_data_bvecs
);
330 void osd_req_op_cls_response_data_pages(struct ceph_osd_request
*osd_req
,
331 unsigned int which
, struct page
**pages
, u64 length
,
332 u32 alignment
, bool pages_from_pool
, bool own_pages
)
334 struct ceph_osd_data
*osd_data
;
336 osd_data
= osd_req_op_data(osd_req
, which
, cls
, response_data
);
337 ceph_osd_data_pages_init(osd_data
, pages
, length
, alignment
,
338 pages_from_pool
, own_pages
);
340 EXPORT_SYMBOL(osd_req_op_cls_response_data_pages
);
342 static u64
ceph_osd_data_length(struct ceph_osd_data
*osd_data
)
344 switch (osd_data
->type
) {
345 case CEPH_OSD_DATA_TYPE_NONE
:
347 case CEPH_OSD_DATA_TYPE_PAGES
:
348 return osd_data
->length
;
349 case CEPH_OSD_DATA_TYPE_PAGELIST
:
350 return (u64
)osd_data
->pagelist
->length
;
352 case CEPH_OSD_DATA_TYPE_BIO
:
353 return (u64
)osd_data
->bio_length
;
354 #endif /* CONFIG_BLOCK */
355 case CEPH_OSD_DATA_TYPE_BVECS
:
356 return osd_data
->bvec_pos
.iter
.bi_size
;
358 WARN(true, "unrecognized data type %d\n", (int)osd_data
->type
);
363 static void ceph_osd_data_release(struct ceph_osd_data
*osd_data
)
365 if (osd_data
->type
== CEPH_OSD_DATA_TYPE_PAGES
&& osd_data
->own_pages
) {
368 num_pages
= calc_pages_for((u64
)osd_data
->alignment
,
369 (u64
)osd_data
->length
);
370 ceph_release_page_vector(osd_data
->pages
, num_pages
);
371 } else if (osd_data
->type
== CEPH_OSD_DATA_TYPE_PAGELIST
) {
372 ceph_pagelist_release(osd_data
->pagelist
);
374 ceph_osd_data_init(osd_data
);
377 static void osd_req_op_data_release(struct ceph_osd_request
*osd_req
,
380 struct ceph_osd_req_op
*op
;
382 BUG_ON(which
>= osd_req
->r_num_ops
);
383 op
= &osd_req
->r_ops
[which
];
386 case CEPH_OSD_OP_READ
:
387 case CEPH_OSD_OP_WRITE
:
388 case CEPH_OSD_OP_WRITEFULL
:
389 ceph_osd_data_release(&op
->extent
.osd_data
);
391 case CEPH_OSD_OP_CALL
:
392 ceph_osd_data_release(&op
->cls
.request_info
);
393 ceph_osd_data_release(&op
->cls
.request_data
);
394 ceph_osd_data_release(&op
->cls
.response_data
);
396 case CEPH_OSD_OP_SETXATTR
:
397 case CEPH_OSD_OP_CMPXATTR
:
398 ceph_osd_data_release(&op
->xattr
.osd_data
);
400 case CEPH_OSD_OP_STAT
:
401 ceph_osd_data_release(&op
->raw_data_in
);
403 case CEPH_OSD_OP_NOTIFY_ACK
:
404 ceph_osd_data_release(&op
->notify_ack
.request_data
);
406 case CEPH_OSD_OP_NOTIFY
:
407 ceph_osd_data_release(&op
->notify
.request_data
);
408 ceph_osd_data_release(&op
->notify
.response_data
);
410 case CEPH_OSD_OP_LIST_WATCHERS
:
411 ceph_osd_data_release(&op
->list_watchers
.response_data
);
413 case CEPH_OSD_OP_COPY_FROM
:
414 ceph_osd_data_release(&op
->copy_from
.osd_data
);
422 * Assumes @t is zero-initialized.
424 static void target_init(struct ceph_osd_request_target
*t
)
426 ceph_oid_init(&t
->base_oid
);
427 ceph_oloc_init(&t
->base_oloc
);
428 ceph_oid_init(&t
->target_oid
);
429 ceph_oloc_init(&t
->target_oloc
);
431 ceph_osds_init(&t
->acting
);
432 ceph_osds_init(&t
->up
);
436 t
->osd
= CEPH_HOMELESS_OSD
;
439 static void target_copy(struct ceph_osd_request_target
*dest
,
440 const struct ceph_osd_request_target
*src
)
442 ceph_oid_copy(&dest
->base_oid
, &src
->base_oid
);
443 ceph_oloc_copy(&dest
->base_oloc
, &src
->base_oloc
);
444 ceph_oid_copy(&dest
->target_oid
, &src
->target_oid
);
445 ceph_oloc_copy(&dest
->target_oloc
, &src
->target_oloc
);
447 dest
->pgid
= src
->pgid
; /* struct */
448 dest
->spgid
= src
->spgid
; /* struct */
449 dest
->pg_num
= src
->pg_num
;
450 dest
->pg_num_mask
= src
->pg_num_mask
;
451 ceph_osds_copy(&dest
->acting
, &src
->acting
);
452 ceph_osds_copy(&dest
->up
, &src
->up
);
453 dest
->size
= src
->size
;
454 dest
->min_size
= src
->min_size
;
455 dest
->sort_bitwise
= src
->sort_bitwise
;
457 dest
->flags
= src
->flags
;
458 dest
->paused
= src
->paused
;
460 dest
->epoch
= src
->epoch
;
461 dest
->last_force_resend
= src
->last_force_resend
;
463 dest
->osd
= src
->osd
;
466 static void target_destroy(struct ceph_osd_request_target
*t
)
468 ceph_oid_destroy(&t
->base_oid
);
469 ceph_oloc_destroy(&t
->base_oloc
);
470 ceph_oid_destroy(&t
->target_oid
);
471 ceph_oloc_destroy(&t
->target_oloc
);
477 static void request_release_checks(struct ceph_osd_request
*req
)
479 WARN_ON(!RB_EMPTY_NODE(&req
->r_node
));
480 WARN_ON(!RB_EMPTY_NODE(&req
->r_mc_node
));
481 WARN_ON(!list_empty(&req
->r_unsafe_item
));
485 static void ceph_osdc_release_request(struct kref
*kref
)
487 struct ceph_osd_request
*req
= container_of(kref
,
488 struct ceph_osd_request
, r_kref
);
491 dout("%s %p (r_request %p r_reply %p)\n", __func__
, req
,
492 req
->r_request
, req
->r_reply
);
493 request_release_checks(req
);
496 ceph_msg_put(req
->r_request
);
498 ceph_msg_put(req
->r_reply
);
500 for (which
= 0; which
< req
->r_num_ops
; which
++)
501 osd_req_op_data_release(req
, which
);
503 target_destroy(&req
->r_t
);
504 ceph_put_snap_context(req
->r_snapc
);
507 mempool_free(req
, req
->r_osdc
->req_mempool
);
508 else if (req
->r_num_ops
<= CEPH_OSD_SLAB_OPS
)
509 kmem_cache_free(ceph_osd_request_cache
, req
);
514 void ceph_osdc_get_request(struct ceph_osd_request
*req
)
516 dout("%s %p (was %d)\n", __func__
, req
,
517 kref_read(&req
->r_kref
));
518 kref_get(&req
->r_kref
);
520 EXPORT_SYMBOL(ceph_osdc_get_request
);
522 void ceph_osdc_put_request(struct ceph_osd_request
*req
)
525 dout("%s %p (was %d)\n", __func__
, req
,
526 kref_read(&req
->r_kref
));
527 kref_put(&req
->r_kref
, ceph_osdc_release_request
);
530 EXPORT_SYMBOL(ceph_osdc_put_request
);
532 static void request_init(struct ceph_osd_request
*req
)
534 /* req only, each op is zeroed in _osd_req_op_init() */
535 memset(req
, 0, sizeof(*req
));
537 kref_init(&req
->r_kref
);
538 init_completion(&req
->r_completion
);
539 RB_CLEAR_NODE(&req
->r_node
);
540 RB_CLEAR_NODE(&req
->r_mc_node
);
541 INIT_LIST_HEAD(&req
->r_unsafe_item
);
543 target_init(&req
->r_t
);
547 * This is ugly, but it allows us to reuse linger registration and ping
548 * requests, keeping the structure of the code around send_linger{_ping}()
549 * reasonable. Setting up a min_nr=2 mempool for each linger request
550 * and dealing with copying ops (this blasts req only, watch op remains
551 * intact) isn't any better.
553 static void request_reinit(struct ceph_osd_request
*req
)
555 struct ceph_osd_client
*osdc
= req
->r_osdc
;
556 bool mempool
= req
->r_mempool
;
557 unsigned int num_ops
= req
->r_num_ops
;
558 u64 snapid
= req
->r_snapid
;
559 struct ceph_snap_context
*snapc
= req
->r_snapc
;
560 bool linger
= req
->r_linger
;
561 struct ceph_msg
*request_msg
= req
->r_request
;
562 struct ceph_msg
*reply_msg
= req
->r_reply
;
564 dout("%s req %p\n", __func__
, req
);
565 WARN_ON(kref_read(&req
->r_kref
) != 1);
566 request_release_checks(req
);
568 WARN_ON(kref_read(&request_msg
->kref
) != 1);
569 WARN_ON(kref_read(&reply_msg
->kref
) != 1);
570 target_destroy(&req
->r_t
);
574 req
->r_mempool
= mempool
;
575 req
->r_num_ops
= num_ops
;
576 req
->r_snapid
= snapid
;
577 req
->r_snapc
= snapc
;
578 req
->r_linger
= linger
;
579 req
->r_request
= request_msg
;
580 req
->r_reply
= reply_msg
;
583 struct ceph_osd_request
*ceph_osdc_alloc_request(struct ceph_osd_client
*osdc
,
584 struct ceph_snap_context
*snapc
,
585 unsigned int num_ops
,
589 struct ceph_osd_request
*req
;
592 BUG_ON(num_ops
> CEPH_OSD_SLAB_OPS
);
593 req
= mempool_alloc(osdc
->req_mempool
, gfp_flags
);
594 } else if (num_ops
<= CEPH_OSD_SLAB_OPS
) {
595 req
= kmem_cache_alloc(ceph_osd_request_cache
, gfp_flags
);
597 BUG_ON(num_ops
> CEPH_OSD_MAX_OPS
);
598 req
= kmalloc(struct_size(req
, r_ops
, num_ops
), gfp_flags
);
605 req
->r_mempool
= use_mempool
;
606 req
->r_num_ops
= num_ops
;
607 req
->r_snapid
= CEPH_NOSNAP
;
608 req
->r_snapc
= ceph_get_snap_context(snapc
);
610 dout("%s req %p\n", __func__
, req
);
613 EXPORT_SYMBOL(ceph_osdc_alloc_request
);
615 static int ceph_oloc_encoding_size(const struct ceph_object_locator
*oloc
)
617 return 8 + 4 + 4 + 4 + (oloc
->pool_ns
? oloc
->pool_ns
->len
: 0);
620 static int __ceph_osdc_alloc_messages(struct ceph_osd_request
*req
, gfp_t gfp
,
621 int num_request_data_items
,
622 int num_reply_data_items
)
624 struct ceph_osd_client
*osdc
= req
->r_osdc
;
625 struct ceph_msg
*msg
;
628 WARN_ON(req
->r_request
|| req
->r_reply
);
629 WARN_ON(ceph_oid_empty(&req
->r_base_oid
));
630 WARN_ON(ceph_oloc_empty(&req
->r_base_oloc
));
632 /* create request message */
633 msg_size
= CEPH_ENCODING_START_BLK_LEN
+
634 CEPH_PGID_ENCODING_LEN
+ 1; /* spgid */
635 msg_size
+= 4 + 4 + 4; /* hash, osdmap_epoch, flags */
636 msg_size
+= CEPH_ENCODING_START_BLK_LEN
+
637 sizeof(struct ceph_osd_reqid
); /* reqid */
638 msg_size
+= sizeof(struct ceph_blkin_trace_info
); /* trace */
639 msg_size
+= 4 + sizeof(struct ceph_timespec
); /* client_inc, mtime */
640 msg_size
+= CEPH_ENCODING_START_BLK_LEN
+
641 ceph_oloc_encoding_size(&req
->r_base_oloc
); /* oloc */
642 msg_size
+= 4 + req
->r_base_oid
.name_len
; /* oid */
643 msg_size
+= 2 + req
->r_num_ops
* sizeof(struct ceph_osd_op
);
644 msg_size
+= 8; /* snapid */
645 msg_size
+= 8; /* snap_seq */
646 msg_size
+= 4 + 8 * (req
->r_snapc
? req
->r_snapc
->num_snaps
: 0);
647 msg_size
+= 4 + 8; /* retry_attempt, features */
650 msg
= ceph_msgpool_get(&osdc
->msgpool_op
, msg_size
,
651 num_request_data_items
);
653 msg
= ceph_msg_new2(CEPH_MSG_OSD_OP
, msg_size
,
654 num_request_data_items
, gfp
, true);
658 memset(msg
->front
.iov_base
, 0, msg
->front
.iov_len
);
659 req
->r_request
= msg
;
661 /* create reply message */
662 msg_size
= OSD_OPREPLY_FRONT_LEN
;
663 msg_size
+= req
->r_base_oid
.name_len
;
664 msg_size
+= req
->r_num_ops
* sizeof(struct ceph_osd_op
);
667 msg
= ceph_msgpool_get(&osdc
->msgpool_op_reply
, msg_size
,
668 num_reply_data_items
);
670 msg
= ceph_msg_new2(CEPH_MSG_OSD_OPREPLY
, msg_size
,
671 num_reply_data_items
, gfp
, true);
680 static bool osd_req_opcode_valid(u16 opcode
)
683 #define GENERATE_CASE(op, opcode, str) case CEPH_OSD_OP_##op: return true;
684 __CEPH_FORALL_OSD_OPS(GENERATE_CASE
)
691 static void get_num_data_items(struct ceph_osd_request
*req
,
692 int *num_request_data_items
,
693 int *num_reply_data_items
)
695 struct ceph_osd_req_op
*op
;
697 *num_request_data_items
= 0;
698 *num_reply_data_items
= 0;
700 for (op
= req
->r_ops
; op
!= &req
->r_ops
[req
->r_num_ops
]; op
++) {
703 case CEPH_OSD_OP_WRITE
:
704 case CEPH_OSD_OP_WRITEFULL
:
705 case CEPH_OSD_OP_SETXATTR
:
706 case CEPH_OSD_OP_CMPXATTR
:
707 case CEPH_OSD_OP_NOTIFY_ACK
:
708 case CEPH_OSD_OP_COPY_FROM
:
709 *num_request_data_items
+= 1;
713 case CEPH_OSD_OP_STAT
:
714 case CEPH_OSD_OP_READ
:
715 case CEPH_OSD_OP_LIST_WATCHERS
:
716 *num_reply_data_items
+= 1;
720 case CEPH_OSD_OP_NOTIFY
:
721 *num_request_data_items
+= 1;
722 *num_reply_data_items
+= 1;
724 case CEPH_OSD_OP_CALL
:
725 *num_request_data_items
+= 2;
726 *num_reply_data_items
+= 1;
730 WARN_ON(!osd_req_opcode_valid(op
->op
));
737 * oid, oloc and OSD op opcode(s) must be filled in before this function
740 int ceph_osdc_alloc_messages(struct ceph_osd_request
*req
, gfp_t gfp
)
742 int num_request_data_items
, num_reply_data_items
;
744 get_num_data_items(req
, &num_request_data_items
, &num_reply_data_items
);
745 return __ceph_osdc_alloc_messages(req
, gfp
, num_request_data_items
,
746 num_reply_data_items
);
748 EXPORT_SYMBOL(ceph_osdc_alloc_messages
);
751 * This is an osd op init function for opcodes that have no data or
752 * other information associated with them. It also serves as a
753 * common init routine for all the other init functions, below.
755 static struct ceph_osd_req_op
*
756 _osd_req_op_init(struct ceph_osd_request
*osd_req
, unsigned int which
,
757 u16 opcode
, u32 flags
)
759 struct ceph_osd_req_op
*op
;
761 BUG_ON(which
>= osd_req
->r_num_ops
);
762 BUG_ON(!osd_req_opcode_valid(opcode
));
764 op
= &osd_req
->r_ops
[which
];
765 memset(op
, 0, sizeof (*op
));
772 void osd_req_op_init(struct ceph_osd_request
*osd_req
,
773 unsigned int which
, u16 opcode
, u32 flags
)
775 (void)_osd_req_op_init(osd_req
, which
, opcode
, flags
);
777 EXPORT_SYMBOL(osd_req_op_init
);
779 void osd_req_op_extent_init(struct ceph_osd_request
*osd_req
,
780 unsigned int which
, u16 opcode
,
781 u64 offset
, u64 length
,
782 u64 truncate_size
, u32 truncate_seq
)
784 struct ceph_osd_req_op
*op
= _osd_req_op_init(osd_req
, which
,
786 size_t payload_len
= 0;
788 BUG_ON(opcode
!= CEPH_OSD_OP_READ
&& opcode
!= CEPH_OSD_OP_WRITE
&&
789 opcode
!= CEPH_OSD_OP_WRITEFULL
&& opcode
!= CEPH_OSD_OP_ZERO
&&
790 opcode
!= CEPH_OSD_OP_TRUNCATE
);
792 op
->extent
.offset
= offset
;
793 op
->extent
.length
= length
;
794 op
->extent
.truncate_size
= truncate_size
;
795 op
->extent
.truncate_seq
= truncate_seq
;
796 if (opcode
== CEPH_OSD_OP_WRITE
|| opcode
== CEPH_OSD_OP_WRITEFULL
)
797 payload_len
+= length
;
799 op
->indata_len
= payload_len
;
801 EXPORT_SYMBOL(osd_req_op_extent_init
);
803 void osd_req_op_extent_update(struct ceph_osd_request
*osd_req
,
804 unsigned int which
, u64 length
)
806 struct ceph_osd_req_op
*op
;
809 BUG_ON(which
>= osd_req
->r_num_ops
);
810 op
= &osd_req
->r_ops
[which
];
811 previous
= op
->extent
.length
;
813 if (length
== previous
)
814 return; /* Nothing to do */
815 BUG_ON(length
> previous
);
817 op
->extent
.length
= length
;
818 if (op
->op
== CEPH_OSD_OP_WRITE
|| op
->op
== CEPH_OSD_OP_WRITEFULL
)
819 op
->indata_len
-= previous
- length
;
821 EXPORT_SYMBOL(osd_req_op_extent_update
);
823 void osd_req_op_extent_dup_last(struct ceph_osd_request
*osd_req
,
824 unsigned int which
, u64 offset_inc
)
826 struct ceph_osd_req_op
*op
, *prev_op
;
828 BUG_ON(which
+ 1 >= osd_req
->r_num_ops
);
830 prev_op
= &osd_req
->r_ops
[which
];
831 op
= _osd_req_op_init(osd_req
, which
+ 1, prev_op
->op
, prev_op
->flags
);
832 /* dup previous one */
833 op
->indata_len
= prev_op
->indata_len
;
834 op
->outdata_len
= prev_op
->outdata_len
;
835 op
->extent
= prev_op
->extent
;
837 op
->extent
.offset
+= offset_inc
;
838 op
->extent
.length
-= offset_inc
;
840 if (op
->op
== CEPH_OSD_OP_WRITE
|| op
->op
== CEPH_OSD_OP_WRITEFULL
)
841 op
->indata_len
-= offset_inc
;
843 EXPORT_SYMBOL(osd_req_op_extent_dup_last
);
845 int osd_req_op_cls_init(struct ceph_osd_request
*osd_req
, unsigned int which
,
846 const char *class, const char *method
)
848 struct ceph_osd_req_op
*op
;
849 struct ceph_pagelist
*pagelist
;
850 size_t payload_len
= 0;
853 op
= _osd_req_op_init(osd_req
, which
, CEPH_OSD_OP_CALL
, 0);
855 pagelist
= ceph_pagelist_alloc(GFP_NOFS
);
859 op
->cls
.class_name
= class;
860 size
= strlen(class);
861 BUG_ON(size
> (size_t) U8_MAX
);
862 op
->cls
.class_len
= size
;
863 ceph_pagelist_append(pagelist
, class, size
);
866 op
->cls
.method_name
= method
;
867 size
= strlen(method
);
868 BUG_ON(size
> (size_t) U8_MAX
);
869 op
->cls
.method_len
= size
;
870 ceph_pagelist_append(pagelist
, method
, size
);
873 osd_req_op_cls_request_info_pagelist(osd_req
, which
, pagelist
);
875 op
->indata_len
= payload_len
;
878 EXPORT_SYMBOL(osd_req_op_cls_init
);
880 int osd_req_op_xattr_init(struct ceph_osd_request
*osd_req
, unsigned int which
,
881 u16 opcode
, const char *name
, const void *value
,
882 size_t size
, u8 cmp_op
, u8 cmp_mode
)
884 struct ceph_osd_req_op
*op
= _osd_req_op_init(osd_req
, which
,
886 struct ceph_pagelist
*pagelist
;
889 BUG_ON(opcode
!= CEPH_OSD_OP_SETXATTR
&& opcode
!= CEPH_OSD_OP_CMPXATTR
);
891 pagelist
= ceph_pagelist_alloc(GFP_NOFS
);
895 payload_len
= strlen(name
);
896 op
->xattr
.name_len
= payload_len
;
897 ceph_pagelist_append(pagelist
, name
, payload_len
);
899 op
->xattr
.value_len
= size
;
900 ceph_pagelist_append(pagelist
, value
, size
);
903 op
->xattr
.cmp_op
= cmp_op
;
904 op
->xattr
.cmp_mode
= cmp_mode
;
906 ceph_osd_data_pagelist_init(&op
->xattr
.osd_data
, pagelist
);
907 op
->indata_len
= payload_len
;
910 EXPORT_SYMBOL(osd_req_op_xattr_init
);
913 * @watch_opcode: CEPH_OSD_WATCH_OP_*
915 static void osd_req_op_watch_init(struct ceph_osd_request
*req
, int which
,
916 u64 cookie
, u8 watch_opcode
)
918 struct ceph_osd_req_op
*op
;
920 op
= _osd_req_op_init(req
, which
, CEPH_OSD_OP_WATCH
, 0);
921 op
->watch
.cookie
= cookie
;
922 op
->watch
.op
= watch_opcode
;
926 void osd_req_op_alloc_hint_init(struct ceph_osd_request
*osd_req
,
928 u64 expected_object_size
,
929 u64 expected_write_size
)
931 struct ceph_osd_req_op
*op
= _osd_req_op_init(osd_req
, which
,
932 CEPH_OSD_OP_SETALLOCHINT
,
935 op
->alloc_hint
.expected_object_size
= expected_object_size
;
936 op
->alloc_hint
.expected_write_size
= expected_write_size
;
939 * CEPH_OSD_OP_SETALLOCHINT op is advisory and therefore deemed
940 * not worth a feature bit. Set FAILOK per-op flag to make
941 * sure older osds don't trip over an unsupported opcode.
943 op
->flags
|= CEPH_OSD_OP_FLAG_FAILOK
;
945 EXPORT_SYMBOL(osd_req_op_alloc_hint_init
);
947 static void ceph_osdc_msg_data_add(struct ceph_msg
*msg
,
948 struct ceph_osd_data
*osd_data
)
950 u64 length
= ceph_osd_data_length(osd_data
);
952 if (osd_data
->type
== CEPH_OSD_DATA_TYPE_PAGES
) {
953 BUG_ON(length
> (u64
) SIZE_MAX
);
955 ceph_msg_data_add_pages(msg
, osd_data
->pages
,
956 length
, osd_data
->alignment
);
957 } else if (osd_data
->type
== CEPH_OSD_DATA_TYPE_PAGELIST
) {
959 ceph_msg_data_add_pagelist(msg
, osd_data
->pagelist
);
961 } else if (osd_data
->type
== CEPH_OSD_DATA_TYPE_BIO
) {
962 ceph_msg_data_add_bio(msg
, &osd_data
->bio_pos
, length
);
964 } else if (osd_data
->type
== CEPH_OSD_DATA_TYPE_BVECS
) {
965 ceph_msg_data_add_bvecs(msg
, &osd_data
->bvec_pos
);
967 BUG_ON(osd_data
->type
!= CEPH_OSD_DATA_TYPE_NONE
);
971 static u32
osd_req_encode_op(struct ceph_osd_op
*dst
,
972 const struct ceph_osd_req_op
*src
)
975 case CEPH_OSD_OP_STAT
:
977 case CEPH_OSD_OP_READ
:
978 case CEPH_OSD_OP_WRITE
:
979 case CEPH_OSD_OP_WRITEFULL
:
980 case CEPH_OSD_OP_ZERO
:
981 case CEPH_OSD_OP_TRUNCATE
:
982 dst
->extent
.offset
= cpu_to_le64(src
->extent
.offset
);
983 dst
->extent
.length
= cpu_to_le64(src
->extent
.length
);
984 dst
->extent
.truncate_size
=
985 cpu_to_le64(src
->extent
.truncate_size
);
986 dst
->extent
.truncate_seq
=
987 cpu_to_le32(src
->extent
.truncate_seq
);
989 case CEPH_OSD_OP_CALL
:
990 dst
->cls
.class_len
= src
->cls
.class_len
;
991 dst
->cls
.method_len
= src
->cls
.method_len
;
992 dst
->cls
.indata_len
= cpu_to_le32(src
->cls
.indata_len
);
994 case CEPH_OSD_OP_WATCH
:
995 dst
->watch
.cookie
= cpu_to_le64(src
->watch
.cookie
);
996 dst
->watch
.ver
= cpu_to_le64(0);
997 dst
->watch
.op
= src
->watch
.op
;
998 dst
->watch
.gen
= cpu_to_le32(src
->watch
.gen
);
1000 case CEPH_OSD_OP_NOTIFY_ACK
:
1002 case CEPH_OSD_OP_NOTIFY
:
1003 dst
->notify
.cookie
= cpu_to_le64(src
->notify
.cookie
);
1005 case CEPH_OSD_OP_LIST_WATCHERS
:
1007 case CEPH_OSD_OP_SETALLOCHINT
:
1008 dst
->alloc_hint
.expected_object_size
=
1009 cpu_to_le64(src
->alloc_hint
.expected_object_size
);
1010 dst
->alloc_hint
.expected_write_size
=
1011 cpu_to_le64(src
->alloc_hint
.expected_write_size
);
1013 case CEPH_OSD_OP_SETXATTR
:
1014 case CEPH_OSD_OP_CMPXATTR
:
1015 dst
->xattr
.name_len
= cpu_to_le32(src
->xattr
.name_len
);
1016 dst
->xattr
.value_len
= cpu_to_le32(src
->xattr
.value_len
);
1017 dst
->xattr
.cmp_op
= src
->xattr
.cmp_op
;
1018 dst
->xattr
.cmp_mode
= src
->xattr
.cmp_mode
;
1020 case CEPH_OSD_OP_CREATE
:
1021 case CEPH_OSD_OP_DELETE
:
1023 case CEPH_OSD_OP_COPY_FROM
:
1024 dst
->copy_from
.snapid
= cpu_to_le64(src
->copy_from
.snapid
);
1025 dst
->copy_from
.src_version
=
1026 cpu_to_le64(src
->copy_from
.src_version
);
1027 dst
->copy_from
.flags
= src
->copy_from
.flags
;
1028 dst
->copy_from
.src_fadvise_flags
=
1029 cpu_to_le32(src
->copy_from
.src_fadvise_flags
);
1032 pr_err("unsupported osd opcode %s\n",
1033 ceph_osd_op_name(src
->op
));
1039 dst
->op
= cpu_to_le16(src
->op
);
1040 dst
->flags
= cpu_to_le32(src
->flags
);
1041 dst
->payload_len
= cpu_to_le32(src
->indata_len
);
1043 return src
->indata_len
;
1047 * build new request AND message, calculate layout, and adjust file
1050 * if the file was recently truncated, we include information about its
1051 * old and new size so that the object can be updated appropriately. (we
1052 * avoid synchronously deleting truncated objects because it's slow.)
1054 struct ceph_osd_request
*ceph_osdc_new_request(struct ceph_osd_client
*osdc
,
1055 struct ceph_file_layout
*layout
,
1056 struct ceph_vino vino
,
1058 unsigned int which
, int num_ops
,
1059 int opcode
, int flags
,
1060 struct ceph_snap_context
*snapc
,
1065 struct ceph_osd_request
*req
;
1071 BUG_ON(opcode
!= CEPH_OSD_OP_READ
&& opcode
!= CEPH_OSD_OP_WRITE
&&
1072 opcode
!= CEPH_OSD_OP_ZERO
&& opcode
!= CEPH_OSD_OP_TRUNCATE
&&
1073 opcode
!= CEPH_OSD_OP_CREATE
&& opcode
!= CEPH_OSD_OP_DELETE
);
1075 req
= ceph_osdc_alloc_request(osdc
, snapc
, num_ops
, use_mempool
,
1082 /* calculate max write size */
1083 r
= calc_layout(layout
, off
, plen
, &objnum
, &objoff
, &objlen
);
1087 if (opcode
== CEPH_OSD_OP_CREATE
|| opcode
== CEPH_OSD_OP_DELETE
) {
1088 osd_req_op_init(req
, which
, opcode
, 0);
1090 u32 object_size
= layout
->object_size
;
1091 u32 object_base
= off
- objoff
;
1092 if (!(truncate_seq
== 1 && truncate_size
== -1ULL)) {
1093 if (truncate_size
<= object_base
) {
1096 truncate_size
-= object_base
;
1097 if (truncate_size
> object_size
)
1098 truncate_size
= object_size
;
1101 osd_req_op_extent_init(req
, which
, opcode
, objoff
, objlen
,
1102 truncate_size
, truncate_seq
);
1105 req
->r_flags
= flags
;
1106 req
->r_base_oloc
.pool
= layout
->pool_id
;
1107 req
->r_base_oloc
.pool_ns
= ceph_try_get_string(layout
->pool_ns
);
1108 ceph_oid_printf(&req
->r_base_oid
, "%llx.%08llx", vino
.ino
, objnum
);
1110 req
->r_snapid
= vino
.snap
;
1111 if (flags
& CEPH_OSD_FLAG_WRITE
)
1112 req
->r_data_offset
= off
;
1116 * This is a special case for ceph_writepages_start(), but it
1117 * also covers ceph_uninline_data(). If more multi-op request
1118 * use cases emerge, we will need a separate helper.
1120 r
= __ceph_osdc_alloc_messages(req
, GFP_NOFS
, num_ops
, 0);
1122 r
= ceph_osdc_alloc_messages(req
, GFP_NOFS
);
1129 ceph_osdc_put_request(req
);
1132 EXPORT_SYMBOL(ceph_osdc_new_request
);
1135 * We keep osd requests in an rbtree, sorted by ->r_tid.
1137 DEFINE_RB_FUNCS(request
, struct ceph_osd_request
, r_tid
, r_node
)
1138 DEFINE_RB_FUNCS(request_mc
, struct ceph_osd_request
, r_tid
, r_mc_node
)
1141 * Call @fn on each OSD request as long as @fn returns 0.
1143 static void for_each_request(struct ceph_osd_client
*osdc
,
1144 int (*fn
)(struct ceph_osd_request
*req
, void *arg
),
1147 struct rb_node
*n
, *p
;
1149 for (n
= rb_first(&osdc
->osds
); n
; n
= rb_next(n
)) {
1150 struct ceph_osd
*osd
= rb_entry(n
, struct ceph_osd
, o_node
);
1152 for (p
= rb_first(&osd
->o_requests
); p
; ) {
1153 struct ceph_osd_request
*req
=
1154 rb_entry(p
, struct ceph_osd_request
, r_node
);
1162 for (p
= rb_first(&osdc
->homeless_osd
.o_requests
); p
; ) {
1163 struct ceph_osd_request
*req
=
1164 rb_entry(p
, struct ceph_osd_request
, r_node
);
1172 static bool osd_homeless(struct ceph_osd
*osd
)
1174 return osd
->o_osd
== CEPH_HOMELESS_OSD
;
1177 static bool osd_registered(struct ceph_osd
*osd
)
1179 verify_osdc_locked(osd
->o_osdc
);
1181 return !RB_EMPTY_NODE(&osd
->o_node
);
1185 * Assumes @osd is zero-initialized.
1187 static void osd_init(struct ceph_osd
*osd
)
1189 refcount_set(&osd
->o_ref
, 1);
1190 RB_CLEAR_NODE(&osd
->o_node
);
1191 osd
->o_requests
= RB_ROOT
;
1192 osd
->o_linger_requests
= RB_ROOT
;
1193 osd
->o_backoff_mappings
= RB_ROOT
;
1194 osd
->o_backoffs_by_id
= RB_ROOT
;
1195 INIT_LIST_HEAD(&osd
->o_osd_lru
);
1196 INIT_LIST_HEAD(&osd
->o_keepalive_item
);
1197 osd
->o_incarnation
= 1;
1198 mutex_init(&osd
->lock
);
1201 static void osd_cleanup(struct ceph_osd
*osd
)
1203 WARN_ON(!RB_EMPTY_NODE(&osd
->o_node
));
1204 WARN_ON(!RB_EMPTY_ROOT(&osd
->o_requests
));
1205 WARN_ON(!RB_EMPTY_ROOT(&osd
->o_linger_requests
));
1206 WARN_ON(!RB_EMPTY_ROOT(&osd
->o_backoff_mappings
));
1207 WARN_ON(!RB_EMPTY_ROOT(&osd
->o_backoffs_by_id
));
1208 WARN_ON(!list_empty(&osd
->o_osd_lru
));
1209 WARN_ON(!list_empty(&osd
->o_keepalive_item
));
1211 if (osd
->o_auth
.authorizer
) {
1212 WARN_ON(osd_homeless(osd
));
1213 ceph_auth_destroy_authorizer(osd
->o_auth
.authorizer
);
1218 * Track open sessions with osds.
1220 static struct ceph_osd
*create_osd(struct ceph_osd_client
*osdc
, int onum
)
1222 struct ceph_osd
*osd
;
1224 WARN_ON(onum
== CEPH_HOMELESS_OSD
);
1226 osd
= kzalloc(sizeof(*osd
), GFP_NOIO
| __GFP_NOFAIL
);
1231 ceph_con_init(&osd
->o_con
, osd
, &osd_con_ops
, &osdc
->client
->msgr
);
1236 static struct ceph_osd
*get_osd(struct ceph_osd
*osd
)
1238 if (refcount_inc_not_zero(&osd
->o_ref
)) {
1239 dout("get_osd %p %d -> %d\n", osd
, refcount_read(&osd
->o_ref
)-1,
1240 refcount_read(&osd
->o_ref
));
1243 dout("get_osd %p FAIL\n", osd
);
1248 static void put_osd(struct ceph_osd
*osd
)
1250 dout("put_osd %p %d -> %d\n", osd
, refcount_read(&osd
->o_ref
),
1251 refcount_read(&osd
->o_ref
) - 1);
1252 if (refcount_dec_and_test(&osd
->o_ref
)) {
1258 DEFINE_RB_FUNCS(osd
, struct ceph_osd
, o_osd
, o_node
)
1260 static void __move_osd_to_lru(struct ceph_osd
*osd
)
1262 struct ceph_osd_client
*osdc
= osd
->o_osdc
;
1264 dout("%s osd %p osd%d\n", __func__
, osd
, osd
->o_osd
);
1265 BUG_ON(!list_empty(&osd
->o_osd_lru
));
1267 spin_lock(&osdc
->osd_lru_lock
);
1268 list_add_tail(&osd
->o_osd_lru
, &osdc
->osd_lru
);
1269 spin_unlock(&osdc
->osd_lru_lock
);
1271 osd
->lru_ttl
= jiffies
+ osdc
->client
->options
->osd_idle_ttl
;
1274 static void maybe_move_osd_to_lru(struct ceph_osd
*osd
)
1276 if (RB_EMPTY_ROOT(&osd
->o_requests
) &&
1277 RB_EMPTY_ROOT(&osd
->o_linger_requests
))
1278 __move_osd_to_lru(osd
);
1281 static void __remove_osd_from_lru(struct ceph_osd
*osd
)
1283 struct ceph_osd_client
*osdc
= osd
->o_osdc
;
1285 dout("%s osd %p osd%d\n", __func__
, osd
, osd
->o_osd
);
1287 spin_lock(&osdc
->osd_lru_lock
);
1288 if (!list_empty(&osd
->o_osd_lru
))
1289 list_del_init(&osd
->o_osd_lru
);
1290 spin_unlock(&osdc
->osd_lru_lock
);
1294 * Close the connection and assign any leftover requests to the
1297 static void close_osd(struct ceph_osd
*osd
)
1299 struct ceph_osd_client
*osdc
= osd
->o_osdc
;
1302 verify_osdc_wrlocked(osdc
);
1303 dout("%s osd %p osd%d\n", __func__
, osd
, osd
->o_osd
);
1305 ceph_con_close(&osd
->o_con
);
1307 for (n
= rb_first(&osd
->o_requests
); n
; ) {
1308 struct ceph_osd_request
*req
=
1309 rb_entry(n
, struct ceph_osd_request
, r_node
);
1311 n
= rb_next(n
); /* unlink_request() */
1313 dout(" reassigning req %p tid %llu\n", req
, req
->r_tid
);
1314 unlink_request(osd
, req
);
1315 link_request(&osdc
->homeless_osd
, req
);
1317 for (n
= rb_first(&osd
->o_linger_requests
); n
; ) {
1318 struct ceph_osd_linger_request
*lreq
=
1319 rb_entry(n
, struct ceph_osd_linger_request
, node
);
1321 n
= rb_next(n
); /* unlink_linger() */
1323 dout(" reassigning lreq %p linger_id %llu\n", lreq
,
1325 unlink_linger(osd
, lreq
);
1326 link_linger(&osdc
->homeless_osd
, lreq
);
1328 clear_backoffs(osd
);
1330 __remove_osd_from_lru(osd
);
1331 erase_osd(&osdc
->osds
, osd
);
1338 static int reopen_osd(struct ceph_osd
*osd
)
1340 struct ceph_entity_addr
*peer_addr
;
1342 dout("%s osd %p osd%d\n", __func__
, osd
, osd
->o_osd
);
1344 if (RB_EMPTY_ROOT(&osd
->o_requests
) &&
1345 RB_EMPTY_ROOT(&osd
->o_linger_requests
)) {
1350 peer_addr
= &osd
->o_osdc
->osdmap
->osd_addr
[osd
->o_osd
];
1351 if (!memcmp(peer_addr
, &osd
->o_con
.peer_addr
, sizeof (*peer_addr
)) &&
1352 !ceph_con_opened(&osd
->o_con
)) {
1355 dout("osd addr hasn't changed and connection never opened, "
1356 "letting msgr retry\n");
1357 /* touch each r_stamp for handle_timeout()'s benfit */
1358 for (n
= rb_first(&osd
->o_requests
); n
; n
= rb_next(n
)) {
1359 struct ceph_osd_request
*req
=
1360 rb_entry(n
, struct ceph_osd_request
, r_node
);
1361 req
->r_stamp
= jiffies
;
1367 ceph_con_close(&osd
->o_con
);
1368 ceph_con_open(&osd
->o_con
, CEPH_ENTITY_TYPE_OSD
, osd
->o_osd
, peer_addr
);
1369 osd
->o_incarnation
++;
1374 static struct ceph_osd
*lookup_create_osd(struct ceph_osd_client
*osdc
, int o
,
1377 struct ceph_osd
*osd
;
1380 verify_osdc_wrlocked(osdc
);
1382 verify_osdc_locked(osdc
);
1384 if (o
!= CEPH_HOMELESS_OSD
)
1385 osd
= lookup_osd(&osdc
->osds
, o
);
1387 osd
= &osdc
->homeless_osd
;
1390 return ERR_PTR(-EAGAIN
);
1392 osd
= create_osd(osdc
, o
);
1393 insert_osd(&osdc
->osds
, osd
);
1394 ceph_con_open(&osd
->o_con
, CEPH_ENTITY_TYPE_OSD
, osd
->o_osd
,
1395 &osdc
->osdmap
->osd_addr
[osd
->o_osd
]);
1398 dout("%s osdc %p osd%d -> osd %p\n", __func__
, osdc
, o
, osd
);
1403 * Create request <-> OSD session relation.
1405 * @req has to be assigned a tid, @osd may be homeless.
1407 static void link_request(struct ceph_osd
*osd
, struct ceph_osd_request
*req
)
1409 verify_osd_locked(osd
);
1410 WARN_ON(!req
->r_tid
|| req
->r_osd
);
1411 dout("%s osd %p osd%d req %p tid %llu\n", __func__
, osd
, osd
->o_osd
,
1414 if (!osd_homeless(osd
))
1415 __remove_osd_from_lru(osd
);
1417 atomic_inc(&osd
->o_osdc
->num_homeless
);
1420 insert_request(&osd
->o_requests
, req
);
1424 static void unlink_request(struct ceph_osd
*osd
, struct ceph_osd_request
*req
)
1426 verify_osd_locked(osd
);
1427 WARN_ON(req
->r_osd
!= osd
);
1428 dout("%s osd %p osd%d req %p tid %llu\n", __func__
, osd
, osd
->o_osd
,
1432 erase_request(&osd
->o_requests
, req
);
1435 if (!osd_homeless(osd
))
1436 maybe_move_osd_to_lru(osd
);
1438 atomic_dec(&osd
->o_osdc
->num_homeless
);
1441 static bool __pool_full(struct ceph_pg_pool_info
*pi
)
1443 return pi
->flags
& CEPH_POOL_FLAG_FULL
;
1446 static bool have_pool_full(struct ceph_osd_client
*osdc
)
1450 for (n
= rb_first(&osdc
->osdmap
->pg_pools
); n
; n
= rb_next(n
)) {
1451 struct ceph_pg_pool_info
*pi
=
1452 rb_entry(n
, struct ceph_pg_pool_info
, node
);
1454 if (__pool_full(pi
))
1461 static bool pool_full(struct ceph_osd_client
*osdc
, s64 pool_id
)
1463 struct ceph_pg_pool_info
*pi
;
1465 pi
= ceph_pg_pool_by_id(osdc
->osdmap
, pool_id
);
1469 return __pool_full(pi
);
1473 * Returns whether a request should be blocked from being sent
1474 * based on the current osdmap and osd_client settings.
1476 static bool target_should_be_paused(struct ceph_osd_client
*osdc
,
1477 const struct ceph_osd_request_target
*t
,
1478 struct ceph_pg_pool_info
*pi
)
1480 bool pauserd
= ceph_osdmap_flag(osdc
, CEPH_OSDMAP_PAUSERD
);
1481 bool pausewr
= ceph_osdmap_flag(osdc
, CEPH_OSDMAP_PAUSEWR
) ||
1482 ceph_osdmap_flag(osdc
, CEPH_OSDMAP_FULL
) ||
1485 WARN_ON(pi
->id
!= t
->target_oloc
.pool
);
1486 return ((t
->flags
& CEPH_OSD_FLAG_READ
) && pauserd
) ||
1487 ((t
->flags
& CEPH_OSD_FLAG_WRITE
) && pausewr
) ||
1488 (osdc
->osdmap
->epoch
< osdc
->epoch_barrier
);
1491 enum calc_target_result
{
1492 CALC_TARGET_NO_ACTION
= 0,
1493 CALC_TARGET_NEED_RESEND
,
1494 CALC_TARGET_POOL_DNE
,
1497 static enum calc_target_result
calc_target(struct ceph_osd_client
*osdc
,
1498 struct ceph_osd_request_target
*t
,
1499 struct ceph_connection
*con
,
1502 struct ceph_pg_pool_info
*pi
;
1503 struct ceph_pg pgid
, last_pgid
;
1504 struct ceph_osds up
, acting
;
1505 bool force_resend
= false;
1506 bool unpaused
= false;
1509 bool sort_bitwise
= ceph_osdmap_flag(osdc
, CEPH_OSDMAP_SORTBITWISE
);
1510 bool recovery_deletes
= ceph_osdmap_flag(osdc
,
1511 CEPH_OSDMAP_RECOVERY_DELETES
);
1512 enum calc_target_result ct_res
;
1514 t
->epoch
= osdc
->osdmap
->epoch
;
1515 pi
= ceph_pg_pool_by_id(osdc
->osdmap
, t
->base_oloc
.pool
);
1517 t
->osd
= CEPH_HOMELESS_OSD
;
1518 ct_res
= CALC_TARGET_POOL_DNE
;
1522 if (osdc
->osdmap
->epoch
== pi
->last_force_request_resend
) {
1523 if (t
->last_force_resend
< pi
->last_force_request_resend
) {
1524 t
->last_force_resend
= pi
->last_force_request_resend
;
1525 force_resend
= true;
1526 } else if (t
->last_force_resend
== 0) {
1527 force_resend
= true;
1532 ceph_oid_copy(&t
->target_oid
, &t
->base_oid
);
1533 ceph_oloc_copy(&t
->target_oloc
, &t
->base_oloc
);
1534 if ((t
->flags
& CEPH_OSD_FLAG_IGNORE_OVERLAY
) == 0) {
1535 if (t
->flags
& CEPH_OSD_FLAG_READ
&& pi
->read_tier
>= 0)
1536 t
->target_oloc
.pool
= pi
->read_tier
;
1537 if (t
->flags
& CEPH_OSD_FLAG_WRITE
&& pi
->write_tier
>= 0)
1538 t
->target_oloc
.pool
= pi
->write_tier
;
1540 pi
= ceph_pg_pool_by_id(osdc
->osdmap
, t
->target_oloc
.pool
);
1542 t
->osd
= CEPH_HOMELESS_OSD
;
1543 ct_res
= CALC_TARGET_POOL_DNE
;
1548 __ceph_object_locator_to_pg(pi
, &t
->target_oid
, &t
->target_oloc
, &pgid
);
1549 last_pgid
.pool
= pgid
.pool
;
1550 last_pgid
.seed
= ceph_stable_mod(pgid
.seed
, t
->pg_num
, t
->pg_num_mask
);
1552 ceph_pg_to_up_acting_osds(osdc
->osdmap
, pi
, &pgid
, &up
, &acting
);
1554 ceph_is_new_interval(&t
->acting
,
1566 t
->recovery_deletes
,
1569 force_resend
= true;
1571 if (t
->paused
&& !target_should_be_paused(osdc
, t
, pi
)) {
1575 legacy_change
= ceph_pg_compare(&t
->pgid
, &pgid
) ||
1576 ceph_osds_changed(&t
->acting
, &acting
, any_change
);
1578 split
= ceph_pg_is_split(&last_pgid
, t
->pg_num
, pi
->pg_num
);
1580 if (legacy_change
|| force_resend
|| split
) {
1581 t
->pgid
= pgid
; /* struct */
1582 ceph_pg_to_primary_shard(osdc
->osdmap
, pi
, &pgid
, &t
->spgid
);
1583 ceph_osds_copy(&t
->acting
, &acting
);
1584 ceph_osds_copy(&t
->up
, &up
);
1586 t
->min_size
= pi
->min_size
;
1587 t
->pg_num
= pi
->pg_num
;
1588 t
->pg_num_mask
= pi
->pg_num_mask
;
1589 t
->sort_bitwise
= sort_bitwise
;
1590 t
->recovery_deletes
= recovery_deletes
;
1592 t
->osd
= acting
.primary
;
1595 if (unpaused
|| legacy_change
|| force_resend
||
1596 (split
&& con
&& CEPH_HAVE_FEATURE(con
->peer_features
,
1598 ct_res
= CALC_TARGET_NEED_RESEND
;
1600 ct_res
= CALC_TARGET_NO_ACTION
;
1603 dout("%s t %p -> ct_res %d osd %d\n", __func__
, t
, ct_res
, t
->osd
);
1607 static struct ceph_spg_mapping
*alloc_spg_mapping(void)
1609 struct ceph_spg_mapping
*spg
;
1611 spg
= kmalloc(sizeof(*spg
), GFP_NOIO
);
1615 RB_CLEAR_NODE(&spg
->node
);
1616 spg
->backoffs
= RB_ROOT
;
1620 static void free_spg_mapping(struct ceph_spg_mapping
*spg
)
1622 WARN_ON(!RB_EMPTY_NODE(&spg
->node
));
1623 WARN_ON(!RB_EMPTY_ROOT(&spg
->backoffs
));
1629 * rbtree of ceph_spg_mapping for handling map<spg_t, ...>, similar to
1630 * ceph_pg_mapping. Used to track OSD backoffs -- a backoff [range] is
1631 * defined only within a specific spgid; it does not pass anything to
1632 * children on split, or to another primary.
1634 DEFINE_RB_FUNCS2(spg_mapping
, struct ceph_spg_mapping
, spgid
, ceph_spg_compare
,
1635 RB_BYPTR
, const struct ceph_spg
*, node
)
1637 static u64
hoid_get_bitwise_key(const struct ceph_hobject_id
*hoid
)
1639 return hoid
->is_max
? 0x100000000ull
: hoid
->hash_reverse_bits
;
1642 static void hoid_get_effective_key(const struct ceph_hobject_id
*hoid
,
1643 void **pkey
, size_t *pkey_len
)
1645 if (hoid
->key_len
) {
1647 *pkey_len
= hoid
->key_len
;
1650 *pkey_len
= hoid
->oid_len
;
1654 static int compare_names(const void *name1
, size_t name1_len
,
1655 const void *name2
, size_t name2_len
)
1659 ret
= memcmp(name1
, name2
, min(name1_len
, name2_len
));
1661 if (name1_len
< name2_len
)
1663 else if (name1_len
> name2_len
)
1669 static int hoid_compare(const struct ceph_hobject_id
*lhs
,
1670 const struct ceph_hobject_id
*rhs
)
1672 void *effective_key1
, *effective_key2
;
1673 size_t effective_key1_len
, effective_key2_len
;
1676 if (lhs
->is_max
< rhs
->is_max
)
1678 if (lhs
->is_max
> rhs
->is_max
)
1681 if (lhs
->pool
< rhs
->pool
)
1683 if (lhs
->pool
> rhs
->pool
)
1686 if (hoid_get_bitwise_key(lhs
) < hoid_get_bitwise_key(rhs
))
1688 if (hoid_get_bitwise_key(lhs
) > hoid_get_bitwise_key(rhs
))
1691 ret
= compare_names(lhs
->nspace
, lhs
->nspace_len
,
1692 rhs
->nspace
, rhs
->nspace_len
);
1696 hoid_get_effective_key(lhs
, &effective_key1
, &effective_key1_len
);
1697 hoid_get_effective_key(rhs
, &effective_key2
, &effective_key2_len
);
1698 ret
= compare_names(effective_key1
, effective_key1_len
,
1699 effective_key2
, effective_key2_len
);
1703 ret
= compare_names(lhs
->oid
, lhs
->oid_len
, rhs
->oid
, rhs
->oid_len
);
1707 if (lhs
->snapid
< rhs
->snapid
)
1709 if (lhs
->snapid
> rhs
->snapid
)
1716 * For decoding ->begin and ->end of MOSDBackoff only -- no MIN/MAX
1717 * compat stuff here.
1719 * Assumes @hoid is zero-initialized.
1721 static int decode_hoid(void **p
, void *end
, struct ceph_hobject_id
*hoid
)
1727 ret
= ceph_start_decoding(p
, end
, 4, "hobject_t", &struct_v
,
1733 pr_err("got struct_v %d < 4 of hobject_t\n", struct_v
);
1737 hoid
->key
= ceph_extract_encoded_string(p
, end
, &hoid
->key_len
,
1739 if (IS_ERR(hoid
->key
)) {
1740 ret
= PTR_ERR(hoid
->key
);
1745 hoid
->oid
= ceph_extract_encoded_string(p
, end
, &hoid
->oid_len
,
1747 if (IS_ERR(hoid
->oid
)) {
1748 ret
= PTR_ERR(hoid
->oid
);
1753 ceph_decode_64_safe(p
, end
, hoid
->snapid
, e_inval
);
1754 ceph_decode_32_safe(p
, end
, hoid
->hash
, e_inval
);
1755 ceph_decode_8_safe(p
, end
, hoid
->is_max
, e_inval
);
1757 hoid
->nspace
= ceph_extract_encoded_string(p
, end
, &hoid
->nspace_len
,
1759 if (IS_ERR(hoid
->nspace
)) {
1760 ret
= PTR_ERR(hoid
->nspace
);
1761 hoid
->nspace
= NULL
;
1765 ceph_decode_64_safe(p
, end
, hoid
->pool
, e_inval
);
1767 ceph_hoid_build_hash_cache(hoid
);
1774 static int hoid_encoding_size(const struct ceph_hobject_id
*hoid
)
1776 return 8 + 4 + 1 + 8 + /* snapid, hash, is_max, pool */
1777 4 + hoid
->key_len
+ 4 + hoid
->oid_len
+ 4 + hoid
->nspace_len
;
1780 static void encode_hoid(void **p
, void *end
, const struct ceph_hobject_id
*hoid
)
1782 ceph_start_encoding(p
, 4, 3, hoid_encoding_size(hoid
));
1783 ceph_encode_string(p
, end
, hoid
->key
, hoid
->key_len
);
1784 ceph_encode_string(p
, end
, hoid
->oid
, hoid
->oid_len
);
1785 ceph_encode_64(p
, hoid
->snapid
);
1786 ceph_encode_32(p
, hoid
->hash
);
1787 ceph_encode_8(p
, hoid
->is_max
);
1788 ceph_encode_string(p
, end
, hoid
->nspace
, hoid
->nspace_len
);
1789 ceph_encode_64(p
, hoid
->pool
);
1792 static void free_hoid(struct ceph_hobject_id
*hoid
)
1797 kfree(hoid
->nspace
);
1802 static struct ceph_osd_backoff
*alloc_backoff(void)
1804 struct ceph_osd_backoff
*backoff
;
1806 backoff
= kzalloc(sizeof(*backoff
), GFP_NOIO
);
1810 RB_CLEAR_NODE(&backoff
->spg_node
);
1811 RB_CLEAR_NODE(&backoff
->id_node
);
1815 static void free_backoff(struct ceph_osd_backoff
*backoff
)
1817 WARN_ON(!RB_EMPTY_NODE(&backoff
->spg_node
));
1818 WARN_ON(!RB_EMPTY_NODE(&backoff
->id_node
));
1820 free_hoid(backoff
->begin
);
1821 free_hoid(backoff
->end
);
1826 * Within a specific spgid, backoffs are managed by ->begin hoid.
1828 DEFINE_RB_INSDEL_FUNCS2(backoff
, struct ceph_osd_backoff
, begin
, hoid_compare
,
1829 RB_BYVAL
, spg_node
);
1831 static struct ceph_osd_backoff
*lookup_containing_backoff(struct rb_root
*root
,
1832 const struct ceph_hobject_id
*hoid
)
1834 struct rb_node
*n
= root
->rb_node
;
1837 struct ceph_osd_backoff
*cur
=
1838 rb_entry(n
, struct ceph_osd_backoff
, spg_node
);
1841 cmp
= hoid_compare(hoid
, cur
->begin
);
1844 } else if (cmp
> 0) {
1845 if (hoid_compare(hoid
, cur
->end
) < 0)
1858 * Each backoff has a unique id within its OSD session.
1860 DEFINE_RB_FUNCS(backoff_by_id
, struct ceph_osd_backoff
, id
, id_node
)
1862 static void clear_backoffs(struct ceph_osd
*osd
)
1864 while (!RB_EMPTY_ROOT(&osd
->o_backoff_mappings
)) {
1865 struct ceph_spg_mapping
*spg
=
1866 rb_entry(rb_first(&osd
->o_backoff_mappings
),
1867 struct ceph_spg_mapping
, node
);
1869 while (!RB_EMPTY_ROOT(&spg
->backoffs
)) {
1870 struct ceph_osd_backoff
*backoff
=
1871 rb_entry(rb_first(&spg
->backoffs
),
1872 struct ceph_osd_backoff
, spg_node
);
1874 erase_backoff(&spg
->backoffs
, backoff
);
1875 erase_backoff_by_id(&osd
->o_backoffs_by_id
, backoff
);
1876 free_backoff(backoff
);
1878 erase_spg_mapping(&osd
->o_backoff_mappings
, spg
);
1879 free_spg_mapping(spg
);
1884 * Set up a temporary, non-owning view into @t.
1886 static void hoid_fill_from_target(struct ceph_hobject_id
*hoid
,
1887 const struct ceph_osd_request_target
*t
)
1891 hoid
->oid
= t
->target_oid
.name
;
1892 hoid
->oid_len
= t
->target_oid
.name_len
;
1893 hoid
->snapid
= CEPH_NOSNAP
;
1894 hoid
->hash
= t
->pgid
.seed
;
1895 hoid
->is_max
= false;
1896 if (t
->target_oloc
.pool_ns
) {
1897 hoid
->nspace
= t
->target_oloc
.pool_ns
->str
;
1898 hoid
->nspace_len
= t
->target_oloc
.pool_ns
->len
;
1900 hoid
->nspace
= NULL
;
1901 hoid
->nspace_len
= 0;
1903 hoid
->pool
= t
->target_oloc
.pool
;
1904 ceph_hoid_build_hash_cache(hoid
);
1907 static bool should_plug_request(struct ceph_osd_request
*req
)
1909 struct ceph_osd
*osd
= req
->r_osd
;
1910 struct ceph_spg_mapping
*spg
;
1911 struct ceph_osd_backoff
*backoff
;
1912 struct ceph_hobject_id hoid
;
1914 spg
= lookup_spg_mapping(&osd
->o_backoff_mappings
, &req
->r_t
.spgid
);
1918 hoid_fill_from_target(&hoid
, &req
->r_t
);
1919 backoff
= lookup_containing_backoff(&spg
->backoffs
, &hoid
);
1923 dout("%s req %p tid %llu backoff osd%d spgid %llu.%xs%d id %llu\n",
1924 __func__
, req
, req
->r_tid
, osd
->o_osd
, backoff
->spgid
.pgid
.pool
,
1925 backoff
->spgid
.pgid
.seed
, backoff
->spgid
.shard
, backoff
->id
);
1930 * Keep get_num_data_items() in sync with this function.
1932 static void setup_request_data(struct ceph_osd_request
*req
)
1934 struct ceph_msg
*request_msg
= req
->r_request
;
1935 struct ceph_msg
*reply_msg
= req
->r_reply
;
1936 struct ceph_osd_req_op
*op
;
1938 if (req
->r_request
->num_data_items
|| req
->r_reply
->num_data_items
)
1941 WARN_ON(request_msg
->data_length
|| reply_msg
->data_length
);
1942 for (op
= req
->r_ops
; op
!= &req
->r_ops
[req
->r_num_ops
]; op
++) {
1945 case CEPH_OSD_OP_WRITE
:
1946 case CEPH_OSD_OP_WRITEFULL
:
1947 WARN_ON(op
->indata_len
!= op
->extent
.length
);
1948 ceph_osdc_msg_data_add(request_msg
,
1949 &op
->extent
.osd_data
);
1951 case CEPH_OSD_OP_SETXATTR
:
1952 case CEPH_OSD_OP_CMPXATTR
:
1953 WARN_ON(op
->indata_len
!= op
->xattr
.name_len
+
1954 op
->xattr
.value_len
);
1955 ceph_osdc_msg_data_add(request_msg
,
1956 &op
->xattr
.osd_data
);
1958 case CEPH_OSD_OP_NOTIFY_ACK
:
1959 ceph_osdc_msg_data_add(request_msg
,
1960 &op
->notify_ack
.request_data
);
1962 case CEPH_OSD_OP_COPY_FROM
:
1963 ceph_osdc_msg_data_add(request_msg
,
1964 &op
->copy_from
.osd_data
);
1968 case CEPH_OSD_OP_STAT
:
1969 ceph_osdc_msg_data_add(reply_msg
,
1972 case CEPH_OSD_OP_READ
:
1973 ceph_osdc_msg_data_add(reply_msg
,
1974 &op
->extent
.osd_data
);
1976 case CEPH_OSD_OP_LIST_WATCHERS
:
1977 ceph_osdc_msg_data_add(reply_msg
,
1978 &op
->list_watchers
.response_data
);
1982 case CEPH_OSD_OP_CALL
:
1983 WARN_ON(op
->indata_len
!= op
->cls
.class_len
+
1984 op
->cls
.method_len
+
1985 op
->cls
.indata_len
);
1986 ceph_osdc_msg_data_add(request_msg
,
1987 &op
->cls
.request_info
);
1988 /* optional, can be NONE */
1989 ceph_osdc_msg_data_add(request_msg
,
1990 &op
->cls
.request_data
);
1991 /* optional, can be NONE */
1992 ceph_osdc_msg_data_add(reply_msg
,
1993 &op
->cls
.response_data
);
1995 case CEPH_OSD_OP_NOTIFY
:
1996 ceph_osdc_msg_data_add(request_msg
,
1997 &op
->notify
.request_data
);
1998 ceph_osdc_msg_data_add(reply_msg
,
1999 &op
->notify
.response_data
);
2005 static void encode_pgid(void **p
, const struct ceph_pg
*pgid
)
2007 ceph_encode_8(p
, 1);
2008 ceph_encode_64(p
, pgid
->pool
);
2009 ceph_encode_32(p
, pgid
->seed
);
2010 ceph_encode_32(p
, -1); /* preferred */
2013 static void encode_spgid(void **p
, const struct ceph_spg
*spgid
)
2015 ceph_start_encoding(p
, 1, 1, CEPH_PGID_ENCODING_LEN
+ 1);
2016 encode_pgid(p
, &spgid
->pgid
);
2017 ceph_encode_8(p
, spgid
->shard
);
2020 static void encode_oloc(void **p
, void *end
,
2021 const struct ceph_object_locator
*oloc
)
2023 ceph_start_encoding(p
, 5, 4, ceph_oloc_encoding_size(oloc
));
2024 ceph_encode_64(p
, oloc
->pool
);
2025 ceph_encode_32(p
, -1); /* preferred */
2026 ceph_encode_32(p
, 0); /* key len */
2028 ceph_encode_string(p
, end
, oloc
->pool_ns
->str
,
2029 oloc
->pool_ns
->len
);
2031 ceph_encode_32(p
, 0);
2034 static void encode_request_partial(struct ceph_osd_request
*req
,
2035 struct ceph_msg
*msg
)
2037 void *p
= msg
->front
.iov_base
;
2038 void *const end
= p
+ msg
->front_alloc_len
;
2042 if (req
->r_flags
& CEPH_OSD_FLAG_WRITE
) {
2043 /* snapshots aren't writeable */
2044 WARN_ON(req
->r_snapid
!= CEPH_NOSNAP
);
2046 WARN_ON(req
->r_mtime
.tv_sec
|| req
->r_mtime
.tv_nsec
||
2047 req
->r_data_offset
|| req
->r_snapc
);
2050 setup_request_data(req
);
2052 encode_spgid(&p
, &req
->r_t
.spgid
); /* actual spg */
2053 ceph_encode_32(&p
, req
->r_t
.pgid
.seed
); /* raw hash */
2054 ceph_encode_32(&p
, req
->r_osdc
->osdmap
->epoch
);
2055 ceph_encode_32(&p
, req
->r_flags
);
2058 ceph_start_encoding(&p
, 2, 2, sizeof(struct ceph_osd_reqid
));
2059 memset(p
, 0, sizeof(struct ceph_osd_reqid
));
2060 p
+= sizeof(struct ceph_osd_reqid
);
2063 memset(p
, 0, sizeof(struct ceph_blkin_trace_info
));
2064 p
+= sizeof(struct ceph_blkin_trace_info
);
2066 ceph_encode_32(&p
, 0); /* client_inc, always 0 */
2067 ceph_encode_timespec64(p
, &req
->r_mtime
);
2068 p
+= sizeof(struct ceph_timespec
);
2070 encode_oloc(&p
, end
, &req
->r_t
.target_oloc
);
2071 ceph_encode_string(&p
, end
, req
->r_t
.target_oid
.name
,
2072 req
->r_t
.target_oid
.name_len
);
2074 /* ops, can imply data */
2075 ceph_encode_16(&p
, req
->r_num_ops
);
2076 for (i
= 0; i
< req
->r_num_ops
; i
++) {
2077 data_len
+= osd_req_encode_op(p
, &req
->r_ops
[i
]);
2078 p
+= sizeof(struct ceph_osd_op
);
2081 ceph_encode_64(&p
, req
->r_snapid
); /* snapid */
2083 ceph_encode_64(&p
, req
->r_snapc
->seq
);
2084 ceph_encode_32(&p
, req
->r_snapc
->num_snaps
);
2085 for (i
= 0; i
< req
->r_snapc
->num_snaps
; i
++)
2086 ceph_encode_64(&p
, req
->r_snapc
->snaps
[i
]);
2088 ceph_encode_64(&p
, 0); /* snap_seq */
2089 ceph_encode_32(&p
, 0); /* snaps len */
2092 ceph_encode_32(&p
, req
->r_attempts
); /* retry_attempt */
2093 BUG_ON(p
> end
- 8); /* space for features */
2095 msg
->hdr
.version
= cpu_to_le16(8); /* MOSDOp v8 */
2096 /* front_len is finalized in encode_request_finish() */
2097 msg
->front
.iov_len
= p
- msg
->front
.iov_base
;
2098 msg
->hdr
.front_len
= cpu_to_le32(msg
->front
.iov_len
);
2099 msg
->hdr
.data_len
= cpu_to_le32(data_len
);
2101 * The header "data_off" is a hint to the receiver allowing it
2102 * to align received data into its buffers such that there's no
2103 * need to re-copy it before writing it to disk (direct I/O).
2105 msg
->hdr
.data_off
= cpu_to_le16(req
->r_data_offset
);
2107 dout("%s req %p msg %p oid %s oid_len %d\n", __func__
, req
, msg
,
2108 req
->r_t
.target_oid
.name
, req
->r_t
.target_oid
.name_len
);
2111 static void encode_request_finish(struct ceph_msg
*msg
)
2113 void *p
= msg
->front
.iov_base
;
2114 void *const partial_end
= p
+ msg
->front
.iov_len
;
2115 void *const end
= p
+ msg
->front_alloc_len
;
2117 if (CEPH_HAVE_FEATURE(msg
->con
->peer_features
, RESEND_ON_SPLIT
)) {
2118 /* luminous OSD -- encode features and be done */
2120 ceph_encode_64(&p
, msg
->con
->peer_features
);
2123 char spgid
[CEPH_ENCODING_START_BLK_LEN
+
2124 CEPH_PGID_ENCODING_LEN
+ 1];
2128 char reqid
[CEPH_ENCODING_START_BLK_LEN
+
2129 sizeof(struct ceph_osd_reqid
)];
2130 char trace
[sizeof(struct ceph_blkin_trace_info
)];
2132 struct ceph_timespec mtime
;
2134 struct ceph_pg pgid
;
2135 void *oloc
, *oid
, *tail
;
2136 int oloc_len
, oid_len
, tail_len
;
2140 * Pre-luminous OSD -- reencode v8 into v4 using @head
2141 * as a temporary buffer. Encode the raw PG; the rest
2142 * is just a matter of moving oloc, oid and tail blobs
2145 memcpy(&head
, p
, sizeof(head
));
2149 p
+= CEPH_ENCODING_START_BLK_LEN
;
2150 pgid
.pool
= ceph_decode_64(&p
);
2151 p
+= 4 + 4; /* preferred, key len */
2152 len
= ceph_decode_32(&p
);
2153 p
+= len
; /* nspace */
2154 oloc_len
= p
- oloc
;
2157 len
= ceph_decode_32(&p
);
2162 tail_len
= partial_end
- p
;
2164 p
= msg
->front
.iov_base
;
2165 ceph_encode_copy(&p
, &head
.client_inc
, sizeof(head
.client_inc
));
2166 ceph_encode_copy(&p
, &head
.epoch
, sizeof(head
.epoch
));
2167 ceph_encode_copy(&p
, &head
.flags
, sizeof(head
.flags
));
2168 ceph_encode_copy(&p
, &head
.mtime
, sizeof(head
.mtime
));
2170 /* reassert_version */
2171 memset(p
, 0, sizeof(struct ceph_eversion
));
2172 p
+= sizeof(struct ceph_eversion
);
2175 memmove(p
, oloc
, oloc_len
);
2178 pgid
.seed
= le32_to_cpu(head
.hash
);
2179 encode_pgid(&p
, &pgid
); /* raw pg */
2182 memmove(p
, oid
, oid_len
);
2185 /* tail -- ops, snapid, snapc, retry_attempt */
2187 memmove(p
, tail
, tail_len
);
2190 msg
->hdr
.version
= cpu_to_le16(4); /* MOSDOp v4 */
2194 msg
->front
.iov_len
= p
- msg
->front
.iov_base
;
2195 msg
->hdr
.front_len
= cpu_to_le32(msg
->front
.iov_len
);
2197 dout("%s msg %p tid %llu %u+%u+%u v%d\n", __func__
, msg
,
2198 le64_to_cpu(msg
->hdr
.tid
), le32_to_cpu(msg
->hdr
.front_len
),
2199 le32_to_cpu(msg
->hdr
.middle_len
), le32_to_cpu(msg
->hdr
.data_len
),
2200 le16_to_cpu(msg
->hdr
.version
));
2204 * @req has to be assigned a tid and registered.
2206 static void send_request(struct ceph_osd_request
*req
)
2208 struct ceph_osd
*osd
= req
->r_osd
;
2210 verify_osd_locked(osd
);
2211 WARN_ON(osd
->o_osd
!= req
->r_t
.osd
);
2214 if (should_plug_request(req
))
2218 * We may have a previously queued request message hanging
2219 * around. Cancel it to avoid corrupting the msgr.
2222 ceph_msg_revoke(req
->r_request
);
2224 req
->r_flags
|= CEPH_OSD_FLAG_KNOWN_REDIR
;
2225 if (req
->r_attempts
)
2226 req
->r_flags
|= CEPH_OSD_FLAG_RETRY
;
2228 WARN_ON(req
->r_flags
& CEPH_OSD_FLAG_RETRY
);
2230 encode_request_partial(req
, req
->r_request
);
2232 dout("%s req %p tid %llu to pgid %llu.%x spgid %llu.%xs%d osd%d e%u flags 0x%x attempt %d\n",
2233 __func__
, req
, req
->r_tid
, req
->r_t
.pgid
.pool
, req
->r_t
.pgid
.seed
,
2234 req
->r_t
.spgid
.pgid
.pool
, req
->r_t
.spgid
.pgid
.seed
,
2235 req
->r_t
.spgid
.shard
, osd
->o_osd
, req
->r_t
.epoch
, req
->r_flags
,
2238 req
->r_t
.paused
= false;
2239 req
->r_stamp
= jiffies
;
2242 req
->r_sent
= osd
->o_incarnation
;
2243 req
->r_request
->hdr
.tid
= cpu_to_le64(req
->r_tid
);
2244 ceph_con_send(&osd
->o_con
, ceph_msg_get(req
->r_request
));
2247 static void maybe_request_map(struct ceph_osd_client
*osdc
)
2249 bool continuous
= false;
2251 verify_osdc_locked(osdc
);
2252 WARN_ON(!osdc
->osdmap
->epoch
);
2254 if (ceph_osdmap_flag(osdc
, CEPH_OSDMAP_FULL
) ||
2255 ceph_osdmap_flag(osdc
, CEPH_OSDMAP_PAUSERD
) ||
2256 ceph_osdmap_flag(osdc
, CEPH_OSDMAP_PAUSEWR
)) {
2257 dout("%s osdc %p continuous\n", __func__
, osdc
);
2260 dout("%s osdc %p onetime\n", __func__
, osdc
);
2263 if (ceph_monc_want_map(&osdc
->client
->monc
, CEPH_SUB_OSDMAP
,
2264 osdc
->osdmap
->epoch
+ 1, continuous
))
2265 ceph_monc_renew_subs(&osdc
->client
->monc
);
2268 static void complete_request(struct ceph_osd_request
*req
, int err
);
2269 static void send_map_check(struct ceph_osd_request
*req
);
2271 static void __submit_request(struct ceph_osd_request
*req
, bool wrlocked
)
2273 struct ceph_osd_client
*osdc
= req
->r_osdc
;
2274 struct ceph_osd
*osd
;
2275 enum calc_target_result ct_res
;
2277 bool need_send
= false;
2278 bool promoted
= false;
2280 WARN_ON(req
->r_tid
);
2281 dout("%s req %p wrlocked %d\n", __func__
, req
, wrlocked
);
2284 ct_res
= calc_target(osdc
, &req
->r_t
, NULL
, false);
2285 if (ct_res
== CALC_TARGET_POOL_DNE
&& !wrlocked
)
2288 osd
= lookup_create_osd(osdc
, req
->r_t
.osd
, wrlocked
);
2290 WARN_ON(PTR_ERR(osd
) != -EAGAIN
|| wrlocked
);
2294 if (osdc
->abort_err
) {
2295 dout("req %p abort_err %d\n", req
, osdc
->abort_err
);
2296 err
= osdc
->abort_err
;
2297 } else if (osdc
->osdmap
->epoch
< osdc
->epoch_barrier
) {
2298 dout("req %p epoch %u barrier %u\n", req
, osdc
->osdmap
->epoch
,
2299 osdc
->epoch_barrier
);
2300 req
->r_t
.paused
= true;
2301 maybe_request_map(osdc
);
2302 } else if ((req
->r_flags
& CEPH_OSD_FLAG_WRITE
) &&
2303 ceph_osdmap_flag(osdc
, CEPH_OSDMAP_PAUSEWR
)) {
2304 dout("req %p pausewr\n", req
);
2305 req
->r_t
.paused
= true;
2306 maybe_request_map(osdc
);
2307 } else if ((req
->r_flags
& CEPH_OSD_FLAG_READ
) &&
2308 ceph_osdmap_flag(osdc
, CEPH_OSDMAP_PAUSERD
)) {
2309 dout("req %p pauserd\n", req
);
2310 req
->r_t
.paused
= true;
2311 maybe_request_map(osdc
);
2312 } else if ((req
->r_flags
& CEPH_OSD_FLAG_WRITE
) &&
2313 !(req
->r_flags
& (CEPH_OSD_FLAG_FULL_TRY
|
2314 CEPH_OSD_FLAG_FULL_FORCE
)) &&
2315 (ceph_osdmap_flag(osdc
, CEPH_OSDMAP_FULL
) ||
2316 pool_full(osdc
, req
->r_t
.base_oloc
.pool
))) {
2317 dout("req %p full/pool_full\n", req
);
2318 if (ceph_test_opt(osdc
->client
, ABORT_ON_FULL
)) {
2321 pr_warn_ratelimited("FULL or reached pool quota\n");
2322 req
->r_t
.paused
= true;
2323 maybe_request_map(osdc
);
2325 } else if (!osd_homeless(osd
)) {
2328 maybe_request_map(osdc
);
2331 mutex_lock(&osd
->lock
);
2333 * Assign the tid atomically with send_request() to protect
2334 * multiple writes to the same object from racing with each
2335 * other, resulting in out of order ops on the OSDs.
2337 req
->r_tid
= atomic64_inc_return(&osdc
->last_tid
);
2338 link_request(osd
, req
);
2342 complete_request(req
, err
);
2343 mutex_unlock(&osd
->lock
);
2345 if (!err
&& ct_res
== CALC_TARGET_POOL_DNE
)
2346 send_map_check(req
);
2349 downgrade_write(&osdc
->lock
);
2353 up_read(&osdc
->lock
);
2354 down_write(&osdc
->lock
);
2360 static void account_request(struct ceph_osd_request
*req
)
2362 WARN_ON(req
->r_flags
& (CEPH_OSD_FLAG_ACK
| CEPH_OSD_FLAG_ONDISK
));
2363 WARN_ON(!(req
->r_flags
& (CEPH_OSD_FLAG_READ
| CEPH_OSD_FLAG_WRITE
)));
2365 req
->r_flags
|= CEPH_OSD_FLAG_ONDISK
;
2366 atomic_inc(&req
->r_osdc
->num_requests
);
2368 req
->r_start_stamp
= jiffies
;
2371 static void submit_request(struct ceph_osd_request
*req
, bool wrlocked
)
2373 ceph_osdc_get_request(req
);
2374 account_request(req
);
2375 __submit_request(req
, wrlocked
);
2378 static void finish_request(struct ceph_osd_request
*req
)
2380 struct ceph_osd_client
*osdc
= req
->r_osdc
;
2382 WARN_ON(lookup_request_mc(&osdc
->map_checks
, req
->r_tid
));
2383 dout("%s req %p tid %llu\n", __func__
, req
, req
->r_tid
);
2386 unlink_request(req
->r_osd
, req
);
2387 atomic_dec(&osdc
->num_requests
);
2390 * If an OSD has failed or returned and a request has been sent
2391 * twice, it's possible to get a reply and end up here while the
2392 * request message is queued for delivery. We will ignore the
2393 * reply, so not a big deal, but better to try and catch it.
2395 ceph_msg_revoke(req
->r_request
);
2396 ceph_msg_revoke_incoming(req
->r_reply
);
2399 static void __complete_request(struct ceph_osd_request
*req
)
2401 dout("%s req %p tid %llu cb %pf result %d\n", __func__
, req
,
2402 req
->r_tid
, req
->r_callback
, req
->r_result
);
2404 if (req
->r_callback
)
2405 req
->r_callback(req
);
2406 complete_all(&req
->r_completion
);
2407 ceph_osdc_put_request(req
);
2410 static void complete_request_workfn(struct work_struct
*work
)
2412 struct ceph_osd_request
*req
=
2413 container_of(work
, struct ceph_osd_request
, r_complete_work
);
2415 __complete_request(req
);
2419 * This is open-coded in handle_reply().
2421 static void complete_request(struct ceph_osd_request
*req
, int err
)
2423 dout("%s req %p tid %llu err %d\n", __func__
, req
, req
->r_tid
, err
);
2425 req
->r_result
= err
;
2426 finish_request(req
);
2428 INIT_WORK(&req
->r_complete_work
, complete_request_workfn
);
2429 queue_work(req
->r_osdc
->completion_wq
, &req
->r_complete_work
);
2432 static void cancel_map_check(struct ceph_osd_request
*req
)
2434 struct ceph_osd_client
*osdc
= req
->r_osdc
;
2435 struct ceph_osd_request
*lookup_req
;
2437 verify_osdc_wrlocked(osdc
);
2439 lookup_req
= lookup_request_mc(&osdc
->map_checks
, req
->r_tid
);
2443 WARN_ON(lookup_req
!= req
);
2444 erase_request_mc(&osdc
->map_checks
, req
);
2445 ceph_osdc_put_request(req
);
2448 static void cancel_request(struct ceph_osd_request
*req
)
2450 dout("%s req %p tid %llu\n", __func__
, req
, req
->r_tid
);
2452 cancel_map_check(req
);
2453 finish_request(req
);
2454 complete_all(&req
->r_completion
);
2455 ceph_osdc_put_request(req
);
2458 static void abort_request(struct ceph_osd_request
*req
, int err
)
2460 dout("%s req %p tid %llu err %d\n", __func__
, req
, req
->r_tid
, err
);
2462 cancel_map_check(req
);
2463 complete_request(req
, err
);
2466 static int abort_fn(struct ceph_osd_request
*req
, void *arg
)
2468 int err
= *(int *)arg
;
2470 abort_request(req
, err
);
2471 return 0; /* continue iteration */
2475 * Abort all in-flight requests with @err and arrange for all future
2476 * requests to be failed immediately.
2478 void ceph_osdc_abort_requests(struct ceph_osd_client
*osdc
, int err
)
2480 dout("%s osdc %p err %d\n", __func__
, osdc
, err
);
2481 down_write(&osdc
->lock
);
2482 for_each_request(osdc
, abort_fn
, &err
);
2483 osdc
->abort_err
= err
;
2484 up_write(&osdc
->lock
);
2486 EXPORT_SYMBOL(ceph_osdc_abort_requests
);
2488 static void update_epoch_barrier(struct ceph_osd_client
*osdc
, u32 eb
)
2490 if (likely(eb
> osdc
->epoch_barrier
)) {
2491 dout("updating epoch_barrier from %u to %u\n",
2492 osdc
->epoch_barrier
, eb
);
2493 osdc
->epoch_barrier
= eb
;
2494 /* Request map if we're not to the barrier yet */
2495 if (eb
> osdc
->osdmap
->epoch
)
2496 maybe_request_map(osdc
);
2500 void ceph_osdc_update_epoch_barrier(struct ceph_osd_client
*osdc
, u32 eb
)
2502 down_read(&osdc
->lock
);
2503 if (unlikely(eb
> osdc
->epoch_barrier
)) {
2504 up_read(&osdc
->lock
);
2505 down_write(&osdc
->lock
);
2506 update_epoch_barrier(osdc
, eb
);
2507 up_write(&osdc
->lock
);
2509 up_read(&osdc
->lock
);
2512 EXPORT_SYMBOL(ceph_osdc_update_epoch_barrier
);
2515 * We can end up releasing caps as a result of abort_request().
2516 * In that case, we probably want to ensure that the cap release message
2517 * has an updated epoch barrier in it, so set the epoch barrier prior to
2518 * aborting the first request.
2520 static int abort_on_full_fn(struct ceph_osd_request
*req
, void *arg
)
2522 struct ceph_osd_client
*osdc
= req
->r_osdc
;
2523 bool *victims
= arg
;
2525 if ((req
->r_flags
& CEPH_OSD_FLAG_WRITE
) &&
2526 (ceph_osdmap_flag(osdc
, CEPH_OSDMAP_FULL
) ||
2527 pool_full(osdc
, req
->r_t
.base_oloc
.pool
))) {
2529 update_epoch_barrier(osdc
, osdc
->osdmap
->epoch
);
2532 abort_request(req
, -ENOSPC
);
2535 return 0; /* continue iteration */
2539 * Drop all pending requests that are stalled waiting on a full condition to
2540 * clear, and complete them with ENOSPC as the return code. Set the
2541 * osdc->epoch_barrier to the latest map epoch that we've seen if any were
2544 static void ceph_osdc_abort_on_full(struct ceph_osd_client
*osdc
)
2546 bool victims
= false;
2548 if (ceph_test_opt(osdc
->client
, ABORT_ON_FULL
) &&
2549 (ceph_osdmap_flag(osdc
, CEPH_OSDMAP_FULL
) || have_pool_full(osdc
)))
2550 for_each_request(osdc
, abort_on_full_fn
, &victims
);
2553 static void check_pool_dne(struct ceph_osd_request
*req
)
2555 struct ceph_osd_client
*osdc
= req
->r_osdc
;
2556 struct ceph_osdmap
*map
= osdc
->osdmap
;
2558 verify_osdc_wrlocked(osdc
);
2559 WARN_ON(!map
->epoch
);
2561 if (req
->r_attempts
) {
2563 * We sent a request earlier, which means that
2564 * previously the pool existed, and now it does not
2565 * (i.e., it was deleted).
2567 req
->r_map_dne_bound
= map
->epoch
;
2568 dout("%s req %p tid %llu pool disappeared\n", __func__
, req
,
2571 dout("%s req %p tid %llu map_dne_bound %u have %u\n", __func__
,
2572 req
, req
->r_tid
, req
->r_map_dne_bound
, map
->epoch
);
2575 if (req
->r_map_dne_bound
) {
2576 if (map
->epoch
>= req
->r_map_dne_bound
) {
2577 /* we had a new enough map */
2578 pr_info_ratelimited("tid %llu pool does not exist\n",
2580 complete_request(req
, -ENOENT
);
2583 send_map_check(req
);
2587 static void map_check_cb(struct ceph_mon_generic_request
*greq
)
2589 struct ceph_osd_client
*osdc
= &greq
->monc
->client
->osdc
;
2590 struct ceph_osd_request
*req
;
2591 u64 tid
= greq
->private_data
;
2593 WARN_ON(greq
->result
|| !greq
->u
.newest
);
2595 down_write(&osdc
->lock
);
2596 req
= lookup_request_mc(&osdc
->map_checks
, tid
);
2598 dout("%s tid %llu dne\n", __func__
, tid
);
2602 dout("%s req %p tid %llu map_dne_bound %u newest %llu\n", __func__
,
2603 req
, req
->r_tid
, req
->r_map_dne_bound
, greq
->u
.newest
);
2604 if (!req
->r_map_dne_bound
)
2605 req
->r_map_dne_bound
= greq
->u
.newest
;
2606 erase_request_mc(&osdc
->map_checks
, req
);
2607 check_pool_dne(req
);
2609 ceph_osdc_put_request(req
);
2611 up_write(&osdc
->lock
);
2614 static void send_map_check(struct ceph_osd_request
*req
)
2616 struct ceph_osd_client
*osdc
= req
->r_osdc
;
2617 struct ceph_osd_request
*lookup_req
;
2620 verify_osdc_wrlocked(osdc
);
2622 lookup_req
= lookup_request_mc(&osdc
->map_checks
, req
->r_tid
);
2624 WARN_ON(lookup_req
!= req
);
2628 ceph_osdc_get_request(req
);
2629 insert_request_mc(&osdc
->map_checks
, req
);
2630 ret
= ceph_monc_get_version_async(&osdc
->client
->monc
, "osdmap",
2631 map_check_cb
, req
->r_tid
);
2636 * lingering requests, watch/notify v2 infrastructure
2638 static void linger_release(struct kref
*kref
)
2640 struct ceph_osd_linger_request
*lreq
=
2641 container_of(kref
, struct ceph_osd_linger_request
, kref
);
2643 dout("%s lreq %p reg_req %p ping_req %p\n", __func__
, lreq
,
2644 lreq
->reg_req
, lreq
->ping_req
);
2645 WARN_ON(!RB_EMPTY_NODE(&lreq
->node
));
2646 WARN_ON(!RB_EMPTY_NODE(&lreq
->osdc_node
));
2647 WARN_ON(!RB_EMPTY_NODE(&lreq
->mc_node
));
2648 WARN_ON(!list_empty(&lreq
->scan_item
));
2649 WARN_ON(!list_empty(&lreq
->pending_lworks
));
2653 ceph_osdc_put_request(lreq
->reg_req
);
2655 ceph_osdc_put_request(lreq
->ping_req
);
2656 target_destroy(&lreq
->t
);
2660 static void linger_put(struct ceph_osd_linger_request
*lreq
)
2663 kref_put(&lreq
->kref
, linger_release
);
2666 static struct ceph_osd_linger_request
*
2667 linger_get(struct ceph_osd_linger_request
*lreq
)
2669 kref_get(&lreq
->kref
);
2673 static struct ceph_osd_linger_request
*
2674 linger_alloc(struct ceph_osd_client
*osdc
)
2676 struct ceph_osd_linger_request
*lreq
;
2678 lreq
= kzalloc(sizeof(*lreq
), GFP_NOIO
);
2682 kref_init(&lreq
->kref
);
2683 mutex_init(&lreq
->lock
);
2684 RB_CLEAR_NODE(&lreq
->node
);
2685 RB_CLEAR_NODE(&lreq
->osdc_node
);
2686 RB_CLEAR_NODE(&lreq
->mc_node
);
2687 INIT_LIST_HEAD(&lreq
->scan_item
);
2688 INIT_LIST_HEAD(&lreq
->pending_lworks
);
2689 init_completion(&lreq
->reg_commit_wait
);
2690 init_completion(&lreq
->notify_finish_wait
);
2693 target_init(&lreq
->t
);
2695 dout("%s lreq %p\n", __func__
, lreq
);
2699 DEFINE_RB_INSDEL_FUNCS(linger
, struct ceph_osd_linger_request
, linger_id
, node
)
2700 DEFINE_RB_FUNCS(linger_osdc
, struct ceph_osd_linger_request
, linger_id
, osdc_node
)
2701 DEFINE_RB_FUNCS(linger_mc
, struct ceph_osd_linger_request
, linger_id
, mc_node
)
2704 * Create linger request <-> OSD session relation.
2706 * @lreq has to be registered, @osd may be homeless.
2708 static void link_linger(struct ceph_osd
*osd
,
2709 struct ceph_osd_linger_request
*lreq
)
2711 verify_osd_locked(osd
);
2712 WARN_ON(!lreq
->linger_id
|| lreq
->osd
);
2713 dout("%s osd %p osd%d lreq %p linger_id %llu\n", __func__
, osd
,
2714 osd
->o_osd
, lreq
, lreq
->linger_id
);
2716 if (!osd_homeless(osd
))
2717 __remove_osd_from_lru(osd
);
2719 atomic_inc(&osd
->o_osdc
->num_homeless
);
2722 insert_linger(&osd
->o_linger_requests
, lreq
);
2726 static void unlink_linger(struct ceph_osd
*osd
,
2727 struct ceph_osd_linger_request
*lreq
)
2729 verify_osd_locked(osd
);
2730 WARN_ON(lreq
->osd
!= osd
);
2731 dout("%s osd %p osd%d lreq %p linger_id %llu\n", __func__
, osd
,
2732 osd
->o_osd
, lreq
, lreq
->linger_id
);
2735 erase_linger(&osd
->o_linger_requests
, lreq
);
2738 if (!osd_homeless(osd
))
2739 maybe_move_osd_to_lru(osd
);
2741 atomic_dec(&osd
->o_osdc
->num_homeless
);
2744 static bool __linger_registered(struct ceph_osd_linger_request
*lreq
)
2746 verify_osdc_locked(lreq
->osdc
);
2748 return !RB_EMPTY_NODE(&lreq
->osdc_node
);
2751 static bool linger_registered(struct ceph_osd_linger_request
*lreq
)
2753 struct ceph_osd_client
*osdc
= lreq
->osdc
;
2756 down_read(&osdc
->lock
);
2757 registered
= __linger_registered(lreq
);
2758 up_read(&osdc
->lock
);
2763 static void linger_register(struct ceph_osd_linger_request
*lreq
)
2765 struct ceph_osd_client
*osdc
= lreq
->osdc
;
2767 verify_osdc_wrlocked(osdc
);
2768 WARN_ON(lreq
->linger_id
);
2771 lreq
->linger_id
= ++osdc
->last_linger_id
;
2772 insert_linger_osdc(&osdc
->linger_requests
, lreq
);
2775 static void linger_unregister(struct ceph_osd_linger_request
*lreq
)
2777 struct ceph_osd_client
*osdc
= lreq
->osdc
;
2779 verify_osdc_wrlocked(osdc
);
2781 erase_linger_osdc(&osdc
->linger_requests
, lreq
);
2785 static void cancel_linger_request(struct ceph_osd_request
*req
)
2787 struct ceph_osd_linger_request
*lreq
= req
->r_priv
;
2789 WARN_ON(!req
->r_linger
);
2790 cancel_request(req
);
2794 struct linger_work
{
2795 struct work_struct work
;
2796 struct ceph_osd_linger_request
*lreq
;
2797 struct list_head pending_item
;
2798 unsigned long queued_stamp
;
2804 void *payload
; /* points into @msg front */
2807 struct ceph_msg
*msg
; /* for ceph_msg_put() */
2815 static struct linger_work
*lwork_alloc(struct ceph_osd_linger_request
*lreq
,
2818 struct linger_work
*lwork
;
2820 lwork
= kzalloc(sizeof(*lwork
), GFP_NOIO
);
2824 INIT_WORK(&lwork
->work
, workfn
);
2825 INIT_LIST_HEAD(&lwork
->pending_item
);
2826 lwork
->lreq
= linger_get(lreq
);
2831 static void lwork_free(struct linger_work
*lwork
)
2833 struct ceph_osd_linger_request
*lreq
= lwork
->lreq
;
2835 mutex_lock(&lreq
->lock
);
2836 list_del(&lwork
->pending_item
);
2837 mutex_unlock(&lreq
->lock
);
2843 static void lwork_queue(struct linger_work
*lwork
)
2845 struct ceph_osd_linger_request
*lreq
= lwork
->lreq
;
2846 struct ceph_osd_client
*osdc
= lreq
->osdc
;
2848 verify_lreq_locked(lreq
);
2849 WARN_ON(!list_empty(&lwork
->pending_item
));
2851 lwork
->queued_stamp
= jiffies
;
2852 list_add_tail(&lwork
->pending_item
, &lreq
->pending_lworks
);
2853 queue_work(osdc
->notify_wq
, &lwork
->work
);
2856 static void do_watch_notify(struct work_struct
*w
)
2858 struct linger_work
*lwork
= container_of(w
, struct linger_work
, work
);
2859 struct ceph_osd_linger_request
*lreq
= lwork
->lreq
;
2861 if (!linger_registered(lreq
)) {
2862 dout("%s lreq %p not registered\n", __func__
, lreq
);
2866 WARN_ON(!lreq
->is_watch
);
2867 dout("%s lreq %p notify_id %llu notifier_id %llu payload_len %zu\n",
2868 __func__
, lreq
, lwork
->notify
.notify_id
, lwork
->notify
.notifier_id
,
2869 lwork
->notify
.payload_len
);
2870 lreq
->wcb(lreq
->data
, lwork
->notify
.notify_id
, lreq
->linger_id
,
2871 lwork
->notify
.notifier_id
, lwork
->notify
.payload
,
2872 lwork
->notify
.payload_len
);
2875 ceph_msg_put(lwork
->notify
.msg
);
2879 static void do_watch_error(struct work_struct
*w
)
2881 struct linger_work
*lwork
= container_of(w
, struct linger_work
, work
);
2882 struct ceph_osd_linger_request
*lreq
= lwork
->lreq
;
2884 if (!linger_registered(lreq
)) {
2885 dout("%s lreq %p not registered\n", __func__
, lreq
);
2889 dout("%s lreq %p err %d\n", __func__
, lreq
, lwork
->error
.err
);
2890 lreq
->errcb(lreq
->data
, lreq
->linger_id
, lwork
->error
.err
);
2896 static void queue_watch_error(struct ceph_osd_linger_request
*lreq
)
2898 struct linger_work
*lwork
;
2900 lwork
= lwork_alloc(lreq
, do_watch_error
);
2902 pr_err("failed to allocate error-lwork\n");
2906 lwork
->error
.err
= lreq
->last_error
;
2910 static void linger_reg_commit_complete(struct ceph_osd_linger_request
*lreq
,
2913 if (!completion_done(&lreq
->reg_commit_wait
)) {
2914 lreq
->reg_commit_error
= (result
<= 0 ? result
: 0);
2915 complete_all(&lreq
->reg_commit_wait
);
2919 static void linger_commit_cb(struct ceph_osd_request
*req
)
2921 struct ceph_osd_linger_request
*lreq
= req
->r_priv
;
2923 mutex_lock(&lreq
->lock
);
2924 dout("%s lreq %p linger_id %llu result %d\n", __func__
, lreq
,
2925 lreq
->linger_id
, req
->r_result
);
2926 linger_reg_commit_complete(lreq
, req
->r_result
);
2927 lreq
->committed
= true;
2929 if (!lreq
->is_watch
) {
2930 struct ceph_osd_data
*osd_data
=
2931 osd_req_op_data(req
, 0, notify
, response_data
);
2932 void *p
= page_address(osd_data
->pages
[0]);
2934 WARN_ON(req
->r_ops
[0].op
!= CEPH_OSD_OP_NOTIFY
||
2935 osd_data
->type
!= CEPH_OSD_DATA_TYPE_PAGES
);
2937 /* make note of the notify_id */
2938 if (req
->r_ops
[0].outdata_len
>= sizeof(u64
)) {
2939 lreq
->notify_id
= ceph_decode_64(&p
);
2940 dout("lreq %p notify_id %llu\n", lreq
,
2943 dout("lreq %p no notify_id\n", lreq
);
2947 mutex_unlock(&lreq
->lock
);
2951 static int normalize_watch_error(int err
)
2954 * Translate ENOENT -> ENOTCONN so that a delete->disconnection
2955 * notification and a failure to reconnect because we raced with
2956 * the delete appear the same to the user.
2964 static void linger_reconnect_cb(struct ceph_osd_request
*req
)
2966 struct ceph_osd_linger_request
*lreq
= req
->r_priv
;
2968 mutex_lock(&lreq
->lock
);
2969 dout("%s lreq %p linger_id %llu result %d last_error %d\n", __func__
,
2970 lreq
, lreq
->linger_id
, req
->r_result
, lreq
->last_error
);
2971 if (req
->r_result
< 0) {
2972 if (!lreq
->last_error
) {
2973 lreq
->last_error
= normalize_watch_error(req
->r_result
);
2974 queue_watch_error(lreq
);
2978 mutex_unlock(&lreq
->lock
);
2982 static void send_linger(struct ceph_osd_linger_request
*lreq
)
2984 struct ceph_osd_request
*req
= lreq
->reg_req
;
2985 struct ceph_osd_req_op
*op
= &req
->r_ops
[0];
2987 verify_osdc_wrlocked(req
->r_osdc
);
2988 dout("%s lreq %p linger_id %llu\n", __func__
, lreq
, lreq
->linger_id
);
2991 cancel_linger_request(req
);
2993 request_reinit(req
);
2994 ceph_oid_copy(&req
->r_base_oid
, &lreq
->t
.base_oid
);
2995 ceph_oloc_copy(&req
->r_base_oloc
, &lreq
->t
.base_oloc
);
2996 req
->r_flags
= lreq
->t
.flags
;
2997 req
->r_mtime
= lreq
->mtime
;
2999 mutex_lock(&lreq
->lock
);
3000 if (lreq
->is_watch
&& lreq
->committed
) {
3001 WARN_ON(op
->op
!= CEPH_OSD_OP_WATCH
||
3002 op
->watch
.cookie
!= lreq
->linger_id
);
3003 op
->watch
.op
= CEPH_OSD_WATCH_OP_RECONNECT
;
3004 op
->watch
.gen
= ++lreq
->register_gen
;
3005 dout("lreq %p reconnect register_gen %u\n", lreq
,
3007 req
->r_callback
= linger_reconnect_cb
;
3009 if (!lreq
->is_watch
)
3010 lreq
->notify_id
= 0;
3012 WARN_ON(op
->watch
.op
!= CEPH_OSD_WATCH_OP_WATCH
);
3013 dout("lreq %p register\n", lreq
);
3014 req
->r_callback
= linger_commit_cb
;
3016 mutex_unlock(&lreq
->lock
);
3018 req
->r_priv
= linger_get(lreq
);
3019 req
->r_linger
= true;
3021 submit_request(req
, true);
3024 static void linger_ping_cb(struct ceph_osd_request
*req
)
3026 struct ceph_osd_linger_request
*lreq
= req
->r_priv
;
3028 mutex_lock(&lreq
->lock
);
3029 dout("%s lreq %p linger_id %llu result %d ping_sent %lu last_error %d\n",
3030 __func__
, lreq
, lreq
->linger_id
, req
->r_result
, lreq
->ping_sent
,
3032 if (lreq
->register_gen
== req
->r_ops
[0].watch
.gen
) {
3033 if (!req
->r_result
) {
3034 lreq
->watch_valid_thru
= lreq
->ping_sent
;
3035 } else if (!lreq
->last_error
) {
3036 lreq
->last_error
= normalize_watch_error(req
->r_result
);
3037 queue_watch_error(lreq
);
3040 dout("lreq %p register_gen %u ignoring old pong %u\n", lreq
,
3041 lreq
->register_gen
, req
->r_ops
[0].watch
.gen
);
3044 mutex_unlock(&lreq
->lock
);
3048 static void send_linger_ping(struct ceph_osd_linger_request
*lreq
)
3050 struct ceph_osd_client
*osdc
= lreq
->osdc
;
3051 struct ceph_osd_request
*req
= lreq
->ping_req
;
3052 struct ceph_osd_req_op
*op
= &req
->r_ops
[0];
3054 if (ceph_osdmap_flag(osdc
, CEPH_OSDMAP_PAUSERD
)) {
3055 dout("%s PAUSERD\n", __func__
);
3059 lreq
->ping_sent
= jiffies
;
3060 dout("%s lreq %p linger_id %llu ping_sent %lu register_gen %u\n",
3061 __func__
, lreq
, lreq
->linger_id
, lreq
->ping_sent
,
3062 lreq
->register_gen
);
3065 cancel_linger_request(req
);
3067 request_reinit(req
);
3068 target_copy(&req
->r_t
, &lreq
->t
);
3070 WARN_ON(op
->op
!= CEPH_OSD_OP_WATCH
||
3071 op
->watch
.cookie
!= lreq
->linger_id
||
3072 op
->watch
.op
!= CEPH_OSD_WATCH_OP_PING
);
3073 op
->watch
.gen
= lreq
->register_gen
;
3074 req
->r_callback
= linger_ping_cb
;
3075 req
->r_priv
= linger_get(lreq
);
3076 req
->r_linger
= true;
3078 ceph_osdc_get_request(req
);
3079 account_request(req
);
3080 req
->r_tid
= atomic64_inc_return(&osdc
->last_tid
);
3081 link_request(lreq
->osd
, req
);
3085 static void linger_submit(struct ceph_osd_linger_request
*lreq
)
3087 struct ceph_osd_client
*osdc
= lreq
->osdc
;
3088 struct ceph_osd
*osd
;
3090 down_write(&osdc
->lock
);
3091 linger_register(lreq
);
3092 if (lreq
->is_watch
) {
3093 lreq
->reg_req
->r_ops
[0].watch
.cookie
= lreq
->linger_id
;
3094 lreq
->ping_req
->r_ops
[0].watch
.cookie
= lreq
->linger_id
;
3096 lreq
->reg_req
->r_ops
[0].notify
.cookie
= lreq
->linger_id
;
3099 calc_target(osdc
, &lreq
->t
, NULL
, false);
3100 osd
= lookup_create_osd(osdc
, lreq
->t
.osd
, true);
3101 link_linger(osd
, lreq
);
3104 up_write(&osdc
->lock
);
3107 static void cancel_linger_map_check(struct ceph_osd_linger_request
*lreq
)
3109 struct ceph_osd_client
*osdc
= lreq
->osdc
;
3110 struct ceph_osd_linger_request
*lookup_lreq
;
3112 verify_osdc_wrlocked(osdc
);
3114 lookup_lreq
= lookup_linger_mc(&osdc
->linger_map_checks
,
3119 WARN_ON(lookup_lreq
!= lreq
);
3120 erase_linger_mc(&osdc
->linger_map_checks
, lreq
);
3125 * @lreq has to be both registered and linked.
3127 static void __linger_cancel(struct ceph_osd_linger_request
*lreq
)
3129 if (lreq
->is_watch
&& lreq
->ping_req
->r_osd
)
3130 cancel_linger_request(lreq
->ping_req
);
3131 if (lreq
->reg_req
->r_osd
)
3132 cancel_linger_request(lreq
->reg_req
);
3133 cancel_linger_map_check(lreq
);
3134 unlink_linger(lreq
->osd
, lreq
);
3135 linger_unregister(lreq
);
3138 static void linger_cancel(struct ceph_osd_linger_request
*lreq
)
3140 struct ceph_osd_client
*osdc
= lreq
->osdc
;
3142 down_write(&osdc
->lock
);
3143 if (__linger_registered(lreq
))
3144 __linger_cancel(lreq
);
3145 up_write(&osdc
->lock
);
3148 static void send_linger_map_check(struct ceph_osd_linger_request
*lreq
);
3150 static void check_linger_pool_dne(struct ceph_osd_linger_request
*lreq
)
3152 struct ceph_osd_client
*osdc
= lreq
->osdc
;
3153 struct ceph_osdmap
*map
= osdc
->osdmap
;
3155 verify_osdc_wrlocked(osdc
);
3156 WARN_ON(!map
->epoch
);
3158 if (lreq
->register_gen
) {
3159 lreq
->map_dne_bound
= map
->epoch
;
3160 dout("%s lreq %p linger_id %llu pool disappeared\n", __func__
,
3161 lreq
, lreq
->linger_id
);
3163 dout("%s lreq %p linger_id %llu map_dne_bound %u have %u\n",
3164 __func__
, lreq
, lreq
->linger_id
, lreq
->map_dne_bound
,
3168 if (lreq
->map_dne_bound
) {
3169 if (map
->epoch
>= lreq
->map_dne_bound
) {
3170 /* we had a new enough map */
3171 pr_info("linger_id %llu pool does not exist\n",
3173 linger_reg_commit_complete(lreq
, -ENOENT
);
3174 __linger_cancel(lreq
);
3177 send_linger_map_check(lreq
);
3181 static void linger_map_check_cb(struct ceph_mon_generic_request
*greq
)
3183 struct ceph_osd_client
*osdc
= &greq
->monc
->client
->osdc
;
3184 struct ceph_osd_linger_request
*lreq
;
3185 u64 linger_id
= greq
->private_data
;
3187 WARN_ON(greq
->result
|| !greq
->u
.newest
);
3189 down_write(&osdc
->lock
);
3190 lreq
= lookup_linger_mc(&osdc
->linger_map_checks
, linger_id
);
3192 dout("%s linger_id %llu dne\n", __func__
, linger_id
);
3196 dout("%s lreq %p linger_id %llu map_dne_bound %u newest %llu\n",
3197 __func__
, lreq
, lreq
->linger_id
, lreq
->map_dne_bound
,
3199 if (!lreq
->map_dne_bound
)
3200 lreq
->map_dne_bound
= greq
->u
.newest
;
3201 erase_linger_mc(&osdc
->linger_map_checks
, lreq
);
3202 check_linger_pool_dne(lreq
);
3206 up_write(&osdc
->lock
);
3209 static void send_linger_map_check(struct ceph_osd_linger_request
*lreq
)
3211 struct ceph_osd_client
*osdc
= lreq
->osdc
;
3212 struct ceph_osd_linger_request
*lookup_lreq
;
3215 verify_osdc_wrlocked(osdc
);
3217 lookup_lreq
= lookup_linger_mc(&osdc
->linger_map_checks
,
3220 WARN_ON(lookup_lreq
!= lreq
);
3225 insert_linger_mc(&osdc
->linger_map_checks
, lreq
);
3226 ret
= ceph_monc_get_version_async(&osdc
->client
->monc
, "osdmap",
3227 linger_map_check_cb
, lreq
->linger_id
);
3231 static int linger_reg_commit_wait(struct ceph_osd_linger_request
*lreq
)
3235 dout("%s lreq %p linger_id %llu\n", __func__
, lreq
, lreq
->linger_id
);
3236 ret
= wait_for_completion_interruptible(&lreq
->reg_commit_wait
);
3237 return ret
?: lreq
->reg_commit_error
;
3240 static int linger_notify_finish_wait(struct ceph_osd_linger_request
*lreq
)
3244 dout("%s lreq %p linger_id %llu\n", __func__
, lreq
, lreq
->linger_id
);
3245 ret
= wait_for_completion_interruptible(&lreq
->notify_finish_wait
);
3246 return ret
?: lreq
->notify_finish_error
;
3250 * Timeout callback, called every N seconds. When 1 or more OSD
3251 * requests has been active for more than N seconds, we send a keepalive
3252 * (tag + timestamp) to its OSD to ensure any communications channel
3253 * reset is detected.
3255 static void handle_timeout(struct work_struct
*work
)
3257 struct ceph_osd_client
*osdc
=
3258 container_of(work
, struct ceph_osd_client
, timeout_work
.work
);
3259 struct ceph_options
*opts
= osdc
->client
->options
;
3260 unsigned long cutoff
= jiffies
- opts
->osd_keepalive_timeout
;
3261 unsigned long expiry_cutoff
= jiffies
- opts
->osd_request_timeout
;
3262 LIST_HEAD(slow_osds
);
3263 struct rb_node
*n
, *p
;
3265 dout("%s osdc %p\n", __func__
, osdc
);
3266 down_write(&osdc
->lock
);
3269 * ping osds that are a bit slow. this ensures that if there
3270 * is a break in the TCP connection we will notice, and reopen
3271 * a connection with that osd (from the fault callback).
3273 for (n
= rb_first(&osdc
->osds
); n
; n
= rb_next(n
)) {
3274 struct ceph_osd
*osd
= rb_entry(n
, struct ceph_osd
, o_node
);
3277 for (p
= rb_first(&osd
->o_requests
); p
; ) {
3278 struct ceph_osd_request
*req
=
3279 rb_entry(p
, struct ceph_osd_request
, r_node
);
3281 p
= rb_next(p
); /* abort_request() */
3283 if (time_before(req
->r_stamp
, cutoff
)) {
3284 dout(" req %p tid %llu on osd%d is laggy\n",
3285 req
, req
->r_tid
, osd
->o_osd
);
3288 if (opts
->osd_request_timeout
&&
3289 time_before(req
->r_start_stamp
, expiry_cutoff
)) {
3290 pr_err_ratelimited("tid %llu on osd%d timeout\n",
3291 req
->r_tid
, osd
->o_osd
);
3292 abort_request(req
, -ETIMEDOUT
);
3295 for (p
= rb_first(&osd
->o_linger_requests
); p
; p
= rb_next(p
)) {
3296 struct ceph_osd_linger_request
*lreq
=
3297 rb_entry(p
, struct ceph_osd_linger_request
, node
);
3299 dout(" lreq %p linger_id %llu is served by osd%d\n",
3300 lreq
, lreq
->linger_id
, osd
->o_osd
);
3303 mutex_lock(&lreq
->lock
);
3304 if (lreq
->is_watch
&& lreq
->committed
&& !lreq
->last_error
)
3305 send_linger_ping(lreq
);
3306 mutex_unlock(&lreq
->lock
);
3310 list_move_tail(&osd
->o_keepalive_item
, &slow_osds
);
3313 if (opts
->osd_request_timeout
) {
3314 for (p
= rb_first(&osdc
->homeless_osd
.o_requests
); p
; ) {
3315 struct ceph_osd_request
*req
=
3316 rb_entry(p
, struct ceph_osd_request
, r_node
);
3318 p
= rb_next(p
); /* abort_request() */
3320 if (time_before(req
->r_start_stamp
, expiry_cutoff
)) {
3321 pr_err_ratelimited("tid %llu on osd%d timeout\n",
3322 req
->r_tid
, osdc
->homeless_osd
.o_osd
);
3323 abort_request(req
, -ETIMEDOUT
);
3328 if (atomic_read(&osdc
->num_homeless
) || !list_empty(&slow_osds
))
3329 maybe_request_map(osdc
);
3331 while (!list_empty(&slow_osds
)) {
3332 struct ceph_osd
*osd
= list_first_entry(&slow_osds
,
3335 list_del_init(&osd
->o_keepalive_item
);
3336 ceph_con_keepalive(&osd
->o_con
);
3339 up_write(&osdc
->lock
);
3340 schedule_delayed_work(&osdc
->timeout_work
,
3341 osdc
->client
->options
->osd_keepalive_timeout
);
3344 static void handle_osds_timeout(struct work_struct
*work
)
3346 struct ceph_osd_client
*osdc
=
3347 container_of(work
, struct ceph_osd_client
,
3348 osds_timeout_work
.work
);
3349 unsigned long delay
= osdc
->client
->options
->osd_idle_ttl
/ 4;
3350 struct ceph_osd
*osd
, *nosd
;
3352 dout("%s osdc %p\n", __func__
, osdc
);
3353 down_write(&osdc
->lock
);
3354 list_for_each_entry_safe(osd
, nosd
, &osdc
->osd_lru
, o_osd_lru
) {
3355 if (time_before(jiffies
, osd
->lru_ttl
))
3358 WARN_ON(!RB_EMPTY_ROOT(&osd
->o_requests
));
3359 WARN_ON(!RB_EMPTY_ROOT(&osd
->o_linger_requests
));
3363 up_write(&osdc
->lock
);
3364 schedule_delayed_work(&osdc
->osds_timeout_work
,
3365 round_jiffies_relative(delay
));
3368 static int ceph_oloc_decode(void **p
, void *end
,
3369 struct ceph_object_locator
*oloc
)
3371 u8 struct_v
, struct_cv
;
3376 ceph_decode_need(p
, end
, 1 + 1 + 4, e_inval
);
3377 struct_v
= ceph_decode_8(p
);
3378 struct_cv
= ceph_decode_8(p
);
3380 pr_warn("got v %d < 3 cv %d of ceph_object_locator\n",
3381 struct_v
, struct_cv
);
3384 if (struct_cv
> 6) {
3385 pr_warn("got v %d cv %d > 6 of ceph_object_locator\n",
3386 struct_v
, struct_cv
);
3389 len
= ceph_decode_32(p
);
3390 ceph_decode_need(p
, end
, len
, e_inval
);
3391 struct_end
= *p
+ len
;
3393 oloc
->pool
= ceph_decode_64(p
);
3394 *p
+= 4; /* skip preferred */
3396 len
= ceph_decode_32(p
);
3398 pr_warn("ceph_object_locator::key is set\n");
3402 if (struct_v
>= 5) {
3403 bool changed
= false;
3405 len
= ceph_decode_32(p
);
3407 ceph_decode_need(p
, end
, len
, e_inval
);
3408 if (!oloc
->pool_ns
||
3409 ceph_compare_string(oloc
->pool_ns
, *p
, len
))
3417 /* redirect changes namespace */
3418 pr_warn("ceph_object_locator::nspace is changed\n");
3423 if (struct_v
>= 6) {
3424 s64 hash
= ceph_decode_64(p
);
3426 pr_warn("ceph_object_locator::hash is set\n");
3441 static int ceph_redirect_decode(void **p
, void *end
,
3442 struct ceph_request_redirect
*redir
)
3444 u8 struct_v
, struct_cv
;
3449 ceph_decode_need(p
, end
, 1 + 1 + 4, e_inval
);
3450 struct_v
= ceph_decode_8(p
);
3451 struct_cv
= ceph_decode_8(p
);
3452 if (struct_cv
> 1) {
3453 pr_warn("got v %d cv %d > 1 of ceph_request_redirect\n",
3454 struct_v
, struct_cv
);
3457 len
= ceph_decode_32(p
);
3458 ceph_decode_need(p
, end
, len
, e_inval
);
3459 struct_end
= *p
+ len
;
3461 ret
= ceph_oloc_decode(p
, end
, &redir
->oloc
);
3465 len
= ceph_decode_32(p
);
3467 pr_warn("ceph_request_redirect::object_name is set\n");
3471 len
= ceph_decode_32(p
);
3472 *p
+= len
; /* skip osd_instructions */
3484 struct MOSDOpReply
{
3485 struct ceph_pg pgid
;
3490 u32 outdata_len
[CEPH_OSD_MAX_OPS
];
3491 s32 rval
[CEPH_OSD_MAX_OPS
];
3493 struct ceph_eversion replay_version
;
3495 struct ceph_request_redirect redirect
;
3498 static int decode_MOSDOpReply(const struct ceph_msg
*msg
, struct MOSDOpReply
*m
)
3500 void *p
= msg
->front
.iov_base
;
3501 void *const end
= p
+ msg
->front
.iov_len
;
3502 u16 version
= le16_to_cpu(msg
->hdr
.version
);
3503 struct ceph_eversion bad_replay_version
;
3509 ceph_decode_32_safe(&p
, end
, len
, e_inval
);
3510 ceph_decode_need(&p
, end
, len
, e_inval
);
3511 p
+= len
; /* skip oid */
3513 ret
= ceph_decode_pgid(&p
, end
, &m
->pgid
);
3517 ceph_decode_64_safe(&p
, end
, m
->flags
, e_inval
);
3518 ceph_decode_32_safe(&p
, end
, m
->result
, e_inval
);
3519 ceph_decode_need(&p
, end
, sizeof(bad_replay_version
), e_inval
);
3520 memcpy(&bad_replay_version
, p
, sizeof(bad_replay_version
));
3521 p
+= sizeof(bad_replay_version
);
3522 ceph_decode_32_safe(&p
, end
, m
->epoch
, e_inval
);
3524 ceph_decode_32_safe(&p
, end
, m
->num_ops
, e_inval
);
3525 if (m
->num_ops
> ARRAY_SIZE(m
->outdata_len
))
3528 ceph_decode_need(&p
, end
, m
->num_ops
* sizeof(struct ceph_osd_op
),
3530 for (i
= 0; i
< m
->num_ops
; i
++) {
3531 struct ceph_osd_op
*op
= p
;
3533 m
->outdata_len
[i
] = le32_to_cpu(op
->payload_len
);
3537 ceph_decode_32_safe(&p
, end
, m
->retry_attempt
, e_inval
);
3538 for (i
= 0; i
< m
->num_ops
; i
++)
3539 ceph_decode_32_safe(&p
, end
, m
->rval
[i
], e_inval
);
3542 ceph_decode_need(&p
, end
, sizeof(m
->replay_version
), e_inval
);
3543 memcpy(&m
->replay_version
, p
, sizeof(m
->replay_version
));
3544 p
+= sizeof(m
->replay_version
);
3545 ceph_decode_64_safe(&p
, end
, m
->user_version
, e_inval
);
3547 m
->replay_version
= bad_replay_version
; /* struct */
3548 m
->user_version
= le64_to_cpu(m
->replay_version
.version
);
3553 ceph_decode_8_safe(&p
, end
, decode_redir
, e_inval
);
3561 ret
= ceph_redirect_decode(&p
, end
, &m
->redirect
);
3565 ceph_oloc_init(&m
->redirect
.oloc
);
3575 * Handle MOSDOpReply. Set ->r_result and call the callback if it is
3578 static void handle_reply(struct ceph_osd
*osd
, struct ceph_msg
*msg
)
3580 struct ceph_osd_client
*osdc
= osd
->o_osdc
;
3581 struct ceph_osd_request
*req
;
3582 struct MOSDOpReply m
;
3583 u64 tid
= le64_to_cpu(msg
->hdr
.tid
);
3588 dout("%s msg %p tid %llu\n", __func__
, msg
, tid
);
3590 down_read(&osdc
->lock
);
3591 if (!osd_registered(osd
)) {
3592 dout("%s osd%d unknown\n", __func__
, osd
->o_osd
);
3593 goto out_unlock_osdc
;
3595 WARN_ON(osd
->o_osd
!= le64_to_cpu(msg
->hdr
.src
.num
));
3597 mutex_lock(&osd
->lock
);
3598 req
= lookup_request(&osd
->o_requests
, tid
);
3600 dout("%s osd%d tid %llu unknown\n", __func__
, osd
->o_osd
, tid
);
3601 goto out_unlock_session
;
3604 m
.redirect
.oloc
.pool_ns
= req
->r_t
.target_oloc
.pool_ns
;
3605 ret
= decode_MOSDOpReply(msg
, &m
);
3606 m
.redirect
.oloc
.pool_ns
= NULL
;
3608 pr_err("failed to decode MOSDOpReply for tid %llu: %d\n",
3613 dout("%s req %p tid %llu flags 0x%llx pgid %llu.%x epoch %u attempt %d v %u'%llu uv %llu\n",
3614 __func__
, req
, req
->r_tid
, m
.flags
, m
.pgid
.pool
, m
.pgid
.seed
,
3615 m
.epoch
, m
.retry_attempt
, le32_to_cpu(m
.replay_version
.epoch
),
3616 le64_to_cpu(m
.replay_version
.version
), m
.user_version
);
3618 if (m
.retry_attempt
>= 0) {
3619 if (m
.retry_attempt
!= req
->r_attempts
- 1) {
3620 dout("req %p tid %llu retry_attempt %d != %d, ignoring\n",
3621 req
, req
->r_tid
, m
.retry_attempt
,
3622 req
->r_attempts
- 1);
3623 goto out_unlock_session
;
3626 WARN_ON(1); /* MOSDOpReply v4 is assumed */
3629 if (!ceph_oloc_empty(&m
.redirect
.oloc
)) {
3630 dout("req %p tid %llu redirect pool %lld\n", req
, req
->r_tid
,
3631 m
.redirect
.oloc
.pool
);
3632 unlink_request(osd
, req
);
3633 mutex_unlock(&osd
->lock
);
3636 * Not ceph_oloc_copy() - changing pool_ns is not
3639 req
->r_t
.target_oloc
.pool
= m
.redirect
.oloc
.pool
;
3640 req
->r_flags
|= CEPH_OSD_FLAG_REDIRECTED
;
3642 __submit_request(req
, false);
3643 goto out_unlock_osdc
;
3646 if (m
.num_ops
!= req
->r_num_ops
) {
3647 pr_err("num_ops %d != %d for tid %llu\n", m
.num_ops
,
3648 req
->r_num_ops
, req
->r_tid
);
3651 for (i
= 0; i
< req
->r_num_ops
; i
++) {
3652 dout(" req %p tid %llu op %d rval %d len %u\n", req
,
3653 req
->r_tid
, i
, m
.rval
[i
], m
.outdata_len
[i
]);
3654 req
->r_ops
[i
].rval
= m
.rval
[i
];
3655 req
->r_ops
[i
].outdata_len
= m
.outdata_len
[i
];
3656 data_len
+= m
.outdata_len
[i
];
3658 if (data_len
!= le32_to_cpu(msg
->hdr
.data_len
)) {
3659 pr_err("sum of lens %u != %u for tid %llu\n", data_len
,
3660 le32_to_cpu(msg
->hdr
.data_len
), req
->r_tid
);
3663 dout("%s req %p tid %llu result %d data_len %u\n", __func__
,
3664 req
, req
->r_tid
, m
.result
, data_len
);
3667 * Since we only ever request ONDISK, we should only ever get
3668 * one (type of) reply back.
3670 WARN_ON(!(m
.flags
& CEPH_OSD_FLAG_ONDISK
));
3671 req
->r_result
= m
.result
?: data_len
;
3672 finish_request(req
);
3673 mutex_unlock(&osd
->lock
);
3674 up_read(&osdc
->lock
);
3676 __complete_request(req
);
3680 complete_request(req
, -EIO
);
3682 mutex_unlock(&osd
->lock
);
3684 up_read(&osdc
->lock
);
3687 static void set_pool_was_full(struct ceph_osd_client
*osdc
)
3691 for (n
= rb_first(&osdc
->osdmap
->pg_pools
); n
; n
= rb_next(n
)) {
3692 struct ceph_pg_pool_info
*pi
=
3693 rb_entry(n
, struct ceph_pg_pool_info
, node
);
3695 pi
->was_full
= __pool_full(pi
);
3699 static bool pool_cleared_full(struct ceph_osd_client
*osdc
, s64 pool_id
)
3701 struct ceph_pg_pool_info
*pi
;
3703 pi
= ceph_pg_pool_by_id(osdc
->osdmap
, pool_id
);
3707 return pi
->was_full
&& !__pool_full(pi
);
3710 static enum calc_target_result
3711 recalc_linger_target(struct ceph_osd_linger_request
*lreq
)
3713 struct ceph_osd_client
*osdc
= lreq
->osdc
;
3714 enum calc_target_result ct_res
;
3716 ct_res
= calc_target(osdc
, &lreq
->t
, NULL
, true);
3717 if (ct_res
== CALC_TARGET_NEED_RESEND
) {
3718 struct ceph_osd
*osd
;
3720 osd
= lookup_create_osd(osdc
, lreq
->t
.osd
, true);
3721 if (osd
!= lreq
->osd
) {
3722 unlink_linger(lreq
->osd
, lreq
);
3723 link_linger(osd
, lreq
);
3731 * Requeue requests whose mapping to an OSD has changed.
3733 static void scan_requests(struct ceph_osd
*osd
,
3736 bool check_pool_cleared_full
,
3737 struct rb_root
*need_resend
,
3738 struct list_head
*need_resend_linger
)
3740 struct ceph_osd_client
*osdc
= osd
->o_osdc
;
3742 bool force_resend_writes
;
3744 for (n
= rb_first(&osd
->o_linger_requests
); n
; ) {
3745 struct ceph_osd_linger_request
*lreq
=
3746 rb_entry(n
, struct ceph_osd_linger_request
, node
);
3747 enum calc_target_result ct_res
;
3749 n
= rb_next(n
); /* recalc_linger_target() */
3751 dout("%s lreq %p linger_id %llu\n", __func__
, lreq
,
3753 ct_res
= recalc_linger_target(lreq
);
3755 case CALC_TARGET_NO_ACTION
:
3756 force_resend_writes
= cleared_full
||
3757 (check_pool_cleared_full
&&
3758 pool_cleared_full(osdc
, lreq
->t
.base_oloc
.pool
));
3759 if (!force_resend
&& !force_resend_writes
)
3763 case CALC_TARGET_NEED_RESEND
:
3764 cancel_linger_map_check(lreq
);
3766 * scan_requests() for the previous epoch(s)
3767 * may have already added it to the list, since
3768 * it's not unlinked here.
3770 if (list_empty(&lreq
->scan_item
))
3771 list_add_tail(&lreq
->scan_item
, need_resend_linger
);
3773 case CALC_TARGET_POOL_DNE
:
3774 list_del_init(&lreq
->scan_item
);
3775 check_linger_pool_dne(lreq
);
3780 for (n
= rb_first(&osd
->o_requests
); n
; ) {
3781 struct ceph_osd_request
*req
=
3782 rb_entry(n
, struct ceph_osd_request
, r_node
);
3783 enum calc_target_result ct_res
;
3785 n
= rb_next(n
); /* unlink_request(), check_pool_dne() */
3787 dout("%s req %p tid %llu\n", __func__
, req
, req
->r_tid
);
3788 ct_res
= calc_target(osdc
, &req
->r_t
, &req
->r_osd
->o_con
,
3791 case CALC_TARGET_NO_ACTION
:
3792 force_resend_writes
= cleared_full
||
3793 (check_pool_cleared_full
&&
3794 pool_cleared_full(osdc
, req
->r_t
.base_oloc
.pool
));
3795 if (!force_resend
&&
3796 (!(req
->r_flags
& CEPH_OSD_FLAG_WRITE
) ||
3797 !force_resend_writes
))
3801 case CALC_TARGET_NEED_RESEND
:
3802 cancel_map_check(req
);
3803 unlink_request(osd
, req
);
3804 insert_request(need_resend
, req
);
3806 case CALC_TARGET_POOL_DNE
:
3807 check_pool_dne(req
);
3813 static int handle_one_map(struct ceph_osd_client
*osdc
,
3814 void *p
, void *end
, bool incremental
,
3815 struct rb_root
*need_resend
,
3816 struct list_head
*need_resend_linger
)
3818 struct ceph_osdmap
*newmap
;
3820 bool skipped_map
= false;
3823 was_full
= ceph_osdmap_flag(osdc
, CEPH_OSDMAP_FULL
);
3824 set_pool_was_full(osdc
);
3827 newmap
= osdmap_apply_incremental(&p
, end
, osdc
->osdmap
);
3829 newmap
= ceph_osdmap_decode(&p
, end
);
3831 return PTR_ERR(newmap
);
3833 if (newmap
!= osdc
->osdmap
) {
3835 * Preserve ->was_full before destroying the old map.
3836 * For pools that weren't in the old map, ->was_full
3839 for (n
= rb_first(&newmap
->pg_pools
); n
; n
= rb_next(n
)) {
3840 struct ceph_pg_pool_info
*pi
=
3841 rb_entry(n
, struct ceph_pg_pool_info
, node
);
3842 struct ceph_pg_pool_info
*old_pi
;
3844 old_pi
= ceph_pg_pool_by_id(osdc
->osdmap
, pi
->id
);
3846 pi
->was_full
= old_pi
->was_full
;
3848 WARN_ON(pi
->was_full
);
3851 if (osdc
->osdmap
->epoch
&&
3852 osdc
->osdmap
->epoch
+ 1 < newmap
->epoch
) {
3853 WARN_ON(incremental
);
3857 ceph_osdmap_destroy(osdc
->osdmap
);
3858 osdc
->osdmap
= newmap
;
3861 was_full
&= !ceph_osdmap_flag(osdc
, CEPH_OSDMAP_FULL
);
3862 scan_requests(&osdc
->homeless_osd
, skipped_map
, was_full
, true,
3863 need_resend
, need_resend_linger
);
3865 for (n
= rb_first(&osdc
->osds
); n
; ) {
3866 struct ceph_osd
*osd
= rb_entry(n
, struct ceph_osd
, o_node
);
3868 n
= rb_next(n
); /* close_osd() */
3870 scan_requests(osd
, skipped_map
, was_full
, true, need_resend
,
3871 need_resend_linger
);
3872 if (!ceph_osd_is_up(osdc
->osdmap
, osd
->o_osd
) ||
3873 memcmp(&osd
->o_con
.peer_addr
,
3874 ceph_osd_addr(osdc
->osdmap
, osd
->o_osd
),
3875 sizeof(struct ceph_entity_addr
)))
3882 static void kick_requests(struct ceph_osd_client
*osdc
,
3883 struct rb_root
*need_resend
,
3884 struct list_head
*need_resend_linger
)
3886 struct ceph_osd_linger_request
*lreq
, *nlreq
;
3887 enum calc_target_result ct_res
;
3890 /* make sure need_resend targets reflect latest map */
3891 for (n
= rb_first(need_resend
); n
; ) {
3892 struct ceph_osd_request
*req
=
3893 rb_entry(n
, struct ceph_osd_request
, r_node
);
3897 if (req
->r_t
.epoch
< osdc
->osdmap
->epoch
) {
3898 ct_res
= calc_target(osdc
, &req
->r_t
, NULL
, false);
3899 if (ct_res
== CALC_TARGET_POOL_DNE
) {
3900 erase_request(need_resend
, req
);
3901 check_pool_dne(req
);
3906 for (n
= rb_first(need_resend
); n
; ) {
3907 struct ceph_osd_request
*req
=
3908 rb_entry(n
, struct ceph_osd_request
, r_node
);
3909 struct ceph_osd
*osd
;
3912 erase_request(need_resend
, req
); /* before link_request() */
3914 osd
= lookup_create_osd(osdc
, req
->r_t
.osd
, true);
3915 link_request(osd
, req
);
3916 if (!req
->r_linger
) {
3917 if (!osd_homeless(osd
) && !req
->r_t
.paused
)
3920 cancel_linger_request(req
);
3924 list_for_each_entry_safe(lreq
, nlreq
, need_resend_linger
, scan_item
) {
3925 if (!osd_homeless(lreq
->osd
))
3928 list_del_init(&lreq
->scan_item
);
3933 * Process updated osd map.
3935 * The message contains any number of incremental and full maps, normally
3936 * indicating some sort of topology change in the cluster. Kick requests
3937 * off to different OSDs as needed.
3939 void ceph_osdc_handle_map(struct ceph_osd_client
*osdc
, struct ceph_msg
*msg
)
3941 void *p
= msg
->front
.iov_base
;
3942 void *const end
= p
+ msg
->front
.iov_len
;
3943 u32 nr_maps
, maplen
;
3945 struct ceph_fsid fsid
;
3946 struct rb_root need_resend
= RB_ROOT
;
3947 LIST_HEAD(need_resend_linger
);
3948 bool handled_incremental
= false;
3949 bool was_pauserd
, was_pausewr
;
3950 bool pauserd
, pausewr
;
3953 dout("%s have %u\n", __func__
, osdc
->osdmap
->epoch
);
3954 down_write(&osdc
->lock
);
3957 ceph_decode_need(&p
, end
, sizeof(fsid
), bad
);
3958 ceph_decode_copy(&p
, &fsid
, sizeof(fsid
));
3959 if (ceph_check_fsid(osdc
->client
, &fsid
) < 0)
3962 was_pauserd
= ceph_osdmap_flag(osdc
, CEPH_OSDMAP_PAUSERD
);
3963 was_pausewr
= ceph_osdmap_flag(osdc
, CEPH_OSDMAP_PAUSEWR
) ||
3964 ceph_osdmap_flag(osdc
, CEPH_OSDMAP_FULL
) ||
3965 have_pool_full(osdc
);
3967 /* incremental maps */
3968 ceph_decode_32_safe(&p
, end
, nr_maps
, bad
);
3969 dout(" %d inc maps\n", nr_maps
);
3970 while (nr_maps
> 0) {
3971 ceph_decode_need(&p
, end
, 2*sizeof(u32
), bad
);
3972 epoch
= ceph_decode_32(&p
);
3973 maplen
= ceph_decode_32(&p
);
3974 ceph_decode_need(&p
, end
, maplen
, bad
);
3975 if (osdc
->osdmap
->epoch
&&
3976 osdc
->osdmap
->epoch
+ 1 == epoch
) {
3977 dout("applying incremental map %u len %d\n",
3979 err
= handle_one_map(osdc
, p
, p
+ maplen
, true,
3980 &need_resend
, &need_resend_linger
);
3983 handled_incremental
= true;
3985 dout("ignoring incremental map %u len %d\n",
3991 if (handled_incremental
)
3995 ceph_decode_32_safe(&p
, end
, nr_maps
, bad
);
3996 dout(" %d full maps\n", nr_maps
);
3998 ceph_decode_need(&p
, end
, 2*sizeof(u32
), bad
);
3999 epoch
= ceph_decode_32(&p
);
4000 maplen
= ceph_decode_32(&p
);
4001 ceph_decode_need(&p
, end
, maplen
, bad
);
4003 dout("skipping non-latest full map %u len %d\n",
4005 } else if (osdc
->osdmap
->epoch
>= epoch
) {
4006 dout("skipping full map %u len %d, "
4007 "older than our %u\n", epoch
, maplen
,
4008 osdc
->osdmap
->epoch
);
4010 dout("taking full map %u len %d\n", epoch
, maplen
);
4011 err
= handle_one_map(osdc
, p
, p
+ maplen
, false,
4012 &need_resend
, &need_resend_linger
);
4022 * subscribe to subsequent osdmap updates if full to ensure
4023 * we find out when we are no longer full and stop returning
4026 pauserd
= ceph_osdmap_flag(osdc
, CEPH_OSDMAP_PAUSERD
);
4027 pausewr
= ceph_osdmap_flag(osdc
, CEPH_OSDMAP_PAUSEWR
) ||
4028 ceph_osdmap_flag(osdc
, CEPH_OSDMAP_FULL
) ||
4029 have_pool_full(osdc
);
4030 if (was_pauserd
|| was_pausewr
|| pauserd
|| pausewr
||
4031 osdc
->osdmap
->epoch
< osdc
->epoch_barrier
)
4032 maybe_request_map(osdc
);
4034 kick_requests(osdc
, &need_resend
, &need_resend_linger
);
4036 ceph_osdc_abort_on_full(osdc
);
4037 ceph_monc_got_map(&osdc
->client
->monc
, CEPH_SUB_OSDMAP
,
4038 osdc
->osdmap
->epoch
);
4039 up_write(&osdc
->lock
);
4040 wake_up_all(&osdc
->client
->auth_wq
);
4044 pr_err("osdc handle_map corrupt msg\n");
4046 up_write(&osdc
->lock
);
4050 * Resubmit requests pending on the given osd.
4052 static void kick_osd_requests(struct ceph_osd
*osd
)
4056 clear_backoffs(osd
);
4058 for (n
= rb_first(&osd
->o_requests
); n
; ) {
4059 struct ceph_osd_request
*req
=
4060 rb_entry(n
, struct ceph_osd_request
, r_node
);
4062 n
= rb_next(n
); /* cancel_linger_request() */
4064 if (!req
->r_linger
) {
4065 if (!req
->r_t
.paused
)
4068 cancel_linger_request(req
);
4071 for (n
= rb_first(&osd
->o_linger_requests
); n
; n
= rb_next(n
)) {
4072 struct ceph_osd_linger_request
*lreq
=
4073 rb_entry(n
, struct ceph_osd_linger_request
, node
);
4080 * If the osd connection drops, we need to resubmit all requests.
4082 static void osd_fault(struct ceph_connection
*con
)
4084 struct ceph_osd
*osd
= con
->private;
4085 struct ceph_osd_client
*osdc
= osd
->o_osdc
;
4087 dout("%s osd %p osd%d\n", __func__
, osd
, osd
->o_osd
);
4089 down_write(&osdc
->lock
);
4090 if (!osd_registered(osd
)) {
4091 dout("%s osd%d unknown\n", __func__
, osd
->o_osd
);
4095 if (!reopen_osd(osd
))
4096 kick_osd_requests(osd
);
4097 maybe_request_map(osdc
);
4100 up_write(&osdc
->lock
);
4103 struct MOSDBackoff
{
4104 struct ceph_spg spgid
;
4108 struct ceph_hobject_id
*begin
;
4109 struct ceph_hobject_id
*end
;
4112 static int decode_MOSDBackoff(const struct ceph_msg
*msg
, struct MOSDBackoff
*m
)
4114 void *p
= msg
->front
.iov_base
;
4115 void *const end
= p
+ msg
->front
.iov_len
;
4120 ret
= ceph_start_decoding(&p
, end
, 1, "spg_t", &struct_v
, &struct_len
);
4124 ret
= ceph_decode_pgid(&p
, end
, &m
->spgid
.pgid
);
4128 ceph_decode_8_safe(&p
, end
, m
->spgid
.shard
, e_inval
);
4129 ceph_decode_32_safe(&p
, end
, m
->map_epoch
, e_inval
);
4130 ceph_decode_8_safe(&p
, end
, m
->op
, e_inval
);
4131 ceph_decode_64_safe(&p
, end
, m
->id
, e_inval
);
4133 m
->begin
= kzalloc(sizeof(*m
->begin
), GFP_NOIO
);
4137 ret
= decode_hoid(&p
, end
, m
->begin
);
4139 free_hoid(m
->begin
);
4143 m
->end
= kzalloc(sizeof(*m
->end
), GFP_NOIO
);
4145 free_hoid(m
->begin
);
4149 ret
= decode_hoid(&p
, end
, m
->end
);
4151 free_hoid(m
->begin
);
4162 static struct ceph_msg
*create_backoff_message(
4163 const struct ceph_osd_backoff
*backoff
,
4166 struct ceph_msg
*msg
;
4170 msg_size
= CEPH_ENCODING_START_BLK_LEN
+
4171 CEPH_PGID_ENCODING_LEN
+ 1; /* spgid */
4172 msg_size
+= 4 + 1 + 8; /* map_epoch, op, id */
4173 msg_size
+= CEPH_ENCODING_START_BLK_LEN
+
4174 hoid_encoding_size(backoff
->begin
);
4175 msg_size
+= CEPH_ENCODING_START_BLK_LEN
+
4176 hoid_encoding_size(backoff
->end
);
4178 msg
= ceph_msg_new(CEPH_MSG_OSD_BACKOFF
, msg_size
, GFP_NOIO
, true);
4182 p
= msg
->front
.iov_base
;
4183 end
= p
+ msg
->front_alloc_len
;
4185 encode_spgid(&p
, &backoff
->spgid
);
4186 ceph_encode_32(&p
, map_epoch
);
4187 ceph_encode_8(&p
, CEPH_OSD_BACKOFF_OP_ACK_BLOCK
);
4188 ceph_encode_64(&p
, backoff
->id
);
4189 encode_hoid(&p
, end
, backoff
->begin
);
4190 encode_hoid(&p
, end
, backoff
->end
);
4193 msg
->front
.iov_len
= p
- msg
->front
.iov_base
;
4194 msg
->hdr
.version
= cpu_to_le16(1); /* MOSDBackoff v1 */
4195 msg
->hdr
.front_len
= cpu_to_le32(msg
->front
.iov_len
);
4200 static void handle_backoff_block(struct ceph_osd
*osd
, struct MOSDBackoff
*m
)
4202 struct ceph_spg_mapping
*spg
;
4203 struct ceph_osd_backoff
*backoff
;
4204 struct ceph_msg
*msg
;
4206 dout("%s osd%d spgid %llu.%xs%d id %llu\n", __func__
, osd
->o_osd
,
4207 m
->spgid
.pgid
.pool
, m
->spgid
.pgid
.seed
, m
->spgid
.shard
, m
->id
);
4209 spg
= lookup_spg_mapping(&osd
->o_backoff_mappings
, &m
->spgid
);
4211 spg
= alloc_spg_mapping();
4213 pr_err("%s failed to allocate spg\n", __func__
);
4216 spg
->spgid
= m
->spgid
; /* struct */
4217 insert_spg_mapping(&osd
->o_backoff_mappings
, spg
);
4220 backoff
= alloc_backoff();
4222 pr_err("%s failed to allocate backoff\n", __func__
);
4225 backoff
->spgid
= m
->spgid
; /* struct */
4226 backoff
->id
= m
->id
;
4227 backoff
->begin
= m
->begin
;
4228 m
->begin
= NULL
; /* backoff now owns this */
4229 backoff
->end
= m
->end
;
4230 m
->end
= NULL
; /* ditto */
4232 insert_backoff(&spg
->backoffs
, backoff
);
4233 insert_backoff_by_id(&osd
->o_backoffs_by_id
, backoff
);
4236 * Ack with original backoff's epoch so that the OSD can
4237 * discard this if there was a PG split.
4239 msg
= create_backoff_message(backoff
, m
->map_epoch
);
4241 pr_err("%s failed to allocate msg\n", __func__
);
4244 ceph_con_send(&osd
->o_con
, msg
);
4247 static bool target_contained_by(const struct ceph_osd_request_target
*t
,
4248 const struct ceph_hobject_id
*begin
,
4249 const struct ceph_hobject_id
*end
)
4251 struct ceph_hobject_id hoid
;
4254 hoid_fill_from_target(&hoid
, t
);
4255 cmp
= hoid_compare(&hoid
, begin
);
4256 return !cmp
|| (cmp
> 0 && hoid_compare(&hoid
, end
) < 0);
4259 static void handle_backoff_unblock(struct ceph_osd
*osd
,
4260 const struct MOSDBackoff
*m
)
4262 struct ceph_spg_mapping
*spg
;
4263 struct ceph_osd_backoff
*backoff
;
4266 dout("%s osd%d spgid %llu.%xs%d id %llu\n", __func__
, osd
->o_osd
,
4267 m
->spgid
.pgid
.pool
, m
->spgid
.pgid
.seed
, m
->spgid
.shard
, m
->id
);
4269 backoff
= lookup_backoff_by_id(&osd
->o_backoffs_by_id
, m
->id
);
4271 pr_err("%s osd%d spgid %llu.%xs%d id %llu backoff dne\n",
4272 __func__
, osd
->o_osd
, m
->spgid
.pgid
.pool
,
4273 m
->spgid
.pgid
.seed
, m
->spgid
.shard
, m
->id
);
4277 if (hoid_compare(backoff
->begin
, m
->begin
) &&
4278 hoid_compare(backoff
->end
, m
->end
)) {
4279 pr_err("%s osd%d spgid %llu.%xs%d id %llu bad range?\n",
4280 __func__
, osd
->o_osd
, m
->spgid
.pgid
.pool
,
4281 m
->spgid
.pgid
.seed
, m
->spgid
.shard
, m
->id
);
4282 /* unblock it anyway... */
4285 spg
= lookup_spg_mapping(&osd
->o_backoff_mappings
, &backoff
->spgid
);
4288 erase_backoff(&spg
->backoffs
, backoff
);
4289 erase_backoff_by_id(&osd
->o_backoffs_by_id
, backoff
);
4290 free_backoff(backoff
);
4292 if (RB_EMPTY_ROOT(&spg
->backoffs
)) {
4293 erase_spg_mapping(&osd
->o_backoff_mappings
, spg
);
4294 free_spg_mapping(spg
);
4297 for (n
= rb_first(&osd
->o_requests
); n
; n
= rb_next(n
)) {
4298 struct ceph_osd_request
*req
=
4299 rb_entry(n
, struct ceph_osd_request
, r_node
);
4301 if (!ceph_spg_compare(&req
->r_t
.spgid
, &m
->spgid
)) {
4303 * Match against @m, not @backoff -- the PG may
4304 * have split on the OSD.
4306 if (target_contained_by(&req
->r_t
, m
->begin
, m
->end
)) {
4308 * If no other installed backoff applies,
4317 static void handle_backoff(struct ceph_osd
*osd
, struct ceph_msg
*msg
)
4319 struct ceph_osd_client
*osdc
= osd
->o_osdc
;
4320 struct MOSDBackoff m
;
4323 down_read(&osdc
->lock
);
4324 if (!osd_registered(osd
)) {
4325 dout("%s osd%d unknown\n", __func__
, osd
->o_osd
);
4326 up_read(&osdc
->lock
);
4329 WARN_ON(osd
->o_osd
!= le64_to_cpu(msg
->hdr
.src
.num
));
4331 mutex_lock(&osd
->lock
);
4332 ret
= decode_MOSDBackoff(msg
, &m
);
4334 pr_err("failed to decode MOSDBackoff: %d\n", ret
);
4340 case CEPH_OSD_BACKOFF_OP_BLOCK
:
4341 handle_backoff_block(osd
, &m
);
4343 case CEPH_OSD_BACKOFF_OP_UNBLOCK
:
4344 handle_backoff_unblock(osd
, &m
);
4347 pr_err("%s osd%d unknown op %d\n", __func__
, osd
->o_osd
, m
.op
);
4354 mutex_unlock(&osd
->lock
);
4355 up_read(&osdc
->lock
);
4359 * Process osd watch notifications
4361 static void handle_watch_notify(struct ceph_osd_client
*osdc
,
4362 struct ceph_msg
*msg
)
4364 void *p
= msg
->front
.iov_base
;
4365 void *const end
= p
+ msg
->front
.iov_len
;
4366 struct ceph_osd_linger_request
*lreq
;
4367 struct linger_work
*lwork
;
4368 u8 proto_ver
, opcode
;
4369 u64 cookie
, notify_id
;
4370 u64 notifier_id
= 0;
4371 s32 return_code
= 0;
4372 void *payload
= NULL
;
4373 u32 payload_len
= 0;
4375 ceph_decode_8_safe(&p
, end
, proto_ver
, bad
);
4376 ceph_decode_8_safe(&p
, end
, opcode
, bad
);
4377 ceph_decode_64_safe(&p
, end
, cookie
, bad
);
4378 p
+= 8; /* skip ver */
4379 ceph_decode_64_safe(&p
, end
, notify_id
, bad
);
4381 if (proto_ver
>= 1) {
4382 ceph_decode_32_safe(&p
, end
, payload_len
, bad
);
4383 ceph_decode_need(&p
, end
, payload_len
, bad
);
4388 if (le16_to_cpu(msg
->hdr
.version
) >= 2)
4389 ceph_decode_32_safe(&p
, end
, return_code
, bad
);
4391 if (le16_to_cpu(msg
->hdr
.version
) >= 3)
4392 ceph_decode_64_safe(&p
, end
, notifier_id
, bad
);
4394 down_read(&osdc
->lock
);
4395 lreq
= lookup_linger_osdc(&osdc
->linger_requests
, cookie
);
4397 dout("%s opcode %d cookie %llu dne\n", __func__
, opcode
,
4399 goto out_unlock_osdc
;
4402 mutex_lock(&lreq
->lock
);
4403 dout("%s opcode %d cookie %llu lreq %p is_watch %d\n", __func__
,
4404 opcode
, cookie
, lreq
, lreq
->is_watch
);
4405 if (opcode
== CEPH_WATCH_EVENT_DISCONNECT
) {
4406 if (!lreq
->last_error
) {
4407 lreq
->last_error
= -ENOTCONN
;
4408 queue_watch_error(lreq
);
4410 } else if (!lreq
->is_watch
) {
4411 /* CEPH_WATCH_EVENT_NOTIFY_COMPLETE */
4412 if (lreq
->notify_id
&& lreq
->notify_id
!= notify_id
) {
4413 dout("lreq %p notify_id %llu != %llu, ignoring\n", lreq
,
4414 lreq
->notify_id
, notify_id
);
4415 } else if (!completion_done(&lreq
->notify_finish_wait
)) {
4416 struct ceph_msg_data
*data
=
4417 msg
->num_data_items
? &msg
->data
[0] : NULL
;
4420 if (lreq
->preply_pages
) {
4421 WARN_ON(data
->type
!=
4422 CEPH_MSG_DATA_PAGES
);
4423 *lreq
->preply_pages
= data
->pages
;
4424 *lreq
->preply_len
= data
->length
;
4426 ceph_release_page_vector(data
->pages
,
4427 calc_pages_for(0, data
->length
));
4430 lreq
->notify_finish_error
= return_code
;
4431 complete_all(&lreq
->notify_finish_wait
);
4434 /* CEPH_WATCH_EVENT_NOTIFY */
4435 lwork
= lwork_alloc(lreq
, do_watch_notify
);
4437 pr_err("failed to allocate notify-lwork\n");
4438 goto out_unlock_lreq
;
4441 lwork
->notify
.notify_id
= notify_id
;
4442 lwork
->notify
.notifier_id
= notifier_id
;
4443 lwork
->notify
.payload
= payload
;
4444 lwork
->notify
.payload_len
= payload_len
;
4445 lwork
->notify
.msg
= ceph_msg_get(msg
);
4450 mutex_unlock(&lreq
->lock
);
4452 up_read(&osdc
->lock
);
4456 pr_err("osdc handle_watch_notify corrupt msg\n");
4460 * Register request, send initial attempt.
4462 int ceph_osdc_start_request(struct ceph_osd_client
*osdc
,
4463 struct ceph_osd_request
*req
,
4466 down_read(&osdc
->lock
);
4467 submit_request(req
, false);
4468 up_read(&osdc
->lock
);
4472 EXPORT_SYMBOL(ceph_osdc_start_request
);
4475 * Unregister a registered request. The request is not completed:
4476 * ->r_result isn't set and __complete_request() isn't called.
4478 void ceph_osdc_cancel_request(struct ceph_osd_request
*req
)
4480 struct ceph_osd_client
*osdc
= req
->r_osdc
;
4482 down_write(&osdc
->lock
);
4484 cancel_request(req
);
4485 up_write(&osdc
->lock
);
4487 EXPORT_SYMBOL(ceph_osdc_cancel_request
);
4490 * @timeout: in jiffies, 0 means "wait forever"
4492 static int wait_request_timeout(struct ceph_osd_request
*req
,
4493 unsigned long timeout
)
4497 dout("%s req %p tid %llu\n", __func__
, req
, req
->r_tid
);
4498 left
= wait_for_completion_killable_timeout(&req
->r_completion
,
4499 ceph_timeout_jiffies(timeout
));
4501 left
= left
?: -ETIMEDOUT
;
4502 ceph_osdc_cancel_request(req
);
4504 left
= req
->r_result
; /* completed */
4511 * wait for a request to complete
4513 int ceph_osdc_wait_request(struct ceph_osd_client
*osdc
,
4514 struct ceph_osd_request
*req
)
4516 return wait_request_timeout(req
, 0);
4518 EXPORT_SYMBOL(ceph_osdc_wait_request
);
4521 * sync - wait for all in-flight requests to flush. avoid starvation.
4523 void ceph_osdc_sync(struct ceph_osd_client
*osdc
)
4525 struct rb_node
*n
, *p
;
4526 u64 last_tid
= atomic64_read(&osdc
->last_tid
);
4529 down_read(&osdc
->lock
);
4530 for (n
= rb_first(&osdc
->osds
); n
; n
= rb_next(n
)) {
4531 struct ceph_osd
*osd
= rb_entry(n
, struct ceph_osd
, o_node
);
4533 mutex_lock(&osd
->lock
);
4534 for (p
= rb_first(&osd
->o_requests
); p
; p
= rb_next(p
)) {
4535 struct ceph_osd_request
*req
=
4536 rb_entry(p
, struct ceph_osd_request
, r_node
);
4538 if (req
->r_tid
> last_tid
)
4541 if (!(req
->r_flags
& CEPH_OSD_FLAG_WRITE
))
4544 ceph_osdc_get_request(req
);
4545 mutex_unlock(&osd
->lock
);
4546 up_read(&osdc
->lock
);
4547 dout("%s waiting on req %p tid %llu last_tid %llu\n",
4548 __func__
, req
, req
->r_tid
, last_tid
);
4549 wait_for_completion(&req
->r_completion
);
4550 ceph_osdc_put_request(req
);
4554 mutex_unlock(&osd
->lock
);
4557 up_read(&osdc
->lock
);
4558 dout("%s done last_tid %llu\n", __func__
, last_tid
);
4560 EXPORT_SYMBOL(ceph_osdc_sync
);
4562 static struct ceph_osd_request
*
4563 alloc_linger_request(struct ceph_osd_linger_request
*lreq
)
4565 struct ceph_osd_request
*req
;
4567 req
= ceph_osdc_alloc_request(lreq
->osdc
, NULL
, 1, false, GFP_NOIO
);
4571 ceph_oid_copy(&req
->r_base_oid
, &lreq
->t
.base_oid
);
4572 ceph_oloc_copy(&req
->r_base_oloc
, &lreq
->t
.base_oloc
);
4576 static struct ceph_osd_request
*
4577 alloc_watch_request(struct ceph_osd_linger_request
*lreq
, u8 watch_opcode
)
4579 struct ceph_osd_request
*req
;
4581 req
= alloc_linger_request(lreq
);
4586 * Pass 0 for cookie because we don't know it yet, it will be
4587 * filled in by linger_submit().
4589 osd_req_op_watch_init(req
, 0, 0, watch_opcode
);
4591 if (ceph_osdc_alloc_messages(req
, GFP_NOIO
)) {
4592 ceph_osdc_put_request(req
);
4600 * Returns a handle, caller owns a ref.
4602 struct ceph_osd_linger_request
*
4603 ceph_osdc_watch(struct ceph_osd_client
*osdc
,
4604 struct ceph_object_id
*oid
,
4605 struct ceph_object_locator
*oloc
,
4606 rados_watchcb2_t wcb
,
4607 rados_watcherrcb_t errcb
,
4610 struct ceph_osd_linger_request
*lreq
;
4613 lreq
= linger_alloc(osdc
);
4615 return ERR_PTR(-ENOMEM
);
4617 lreq
->is_watch
= true;
4619 lreq
->errcb
= errcb
;
4621 lreq
->watch_valid_thru
= jiffies
;
4623 ceph_oid_copy(&lreq
->t
.base_oid
, oid
);
4624 ceph_oloc_copy(&lreq
->t
.base_oloc
, oloc
);
4625 lreq
->t
.flags
= CEPH_OSD_FLAG_WRITE
;
4626 ktime_get_real_ts64(&lreq
->mtime
);
4628 lreq
->reg_req
= alloc_watch_request(lreq
, CEPH_OSD_WATCH_OP_WATCH
);
4629 if (!lreq
->reg_req
) {
4634 lreq
->ping_req
= alloc_watch_request(lreq
, CEPH_OSD_WATCH_OP_PING
);
4635 if (!lreq
->ping_req
) {
4640 linger_submit(lreq
);
4641 ret
= linger_reg_commit_wait(lreq
);
4643 linger_cancel(lreq
);
4651 return ERR_PTR(ret
);
4653 EXPORT_SYMBOL(ceph_osdc_watch
);
4658 * Times out after mount_timeout to preserve rbd unmap behaviour
4659 * introduced in 2894e1d76974 ("rbd: timeout watch teardown on unmap
4660 * with mount_timeout").
4662 int ceph_osdc_unwatch(struct ceph_osd_client
*osdc
,
4663 struct ceph_osd_linger_request
*lreq
)
4665 struct ceph_options
*opts
= osdc
->client
->options
;
4666 struct ceph_osd_request
*req
;
4669 req
= ceph_osdc_alloc_request(osdc
, NULL
, 1, false, GFP_NOIO
);
4673 ceph_oid_copy(&req
->r_base_oid
, &lreq
->t
.base_oid
);
4674 ceph_oloc_copy(&req
->r_base_oloc
, &lreq
->t
.base_oloc
);
4675 req
->r_flags
= CEPH_OSD_FLAG_WRITE
;
4676 ktime_get_real_ts64(&req
->r_mtime
);
4677 osd_req_op_watch_init(req
, 0, lreq
->linger_id
,
4678 CEPH_OSD_WATCH_OP_UNWATCH
);
4680 ret
= ceph_osdc_alloc_messages(req
, GFP_NOIO
);
4684 ceph_osdc_start_request(osdc
, req
, false);
4685 linger_cancel(lreq
);
4687 ret
= wait_request_timeout(req
, opts
->mount_timeout
);
4690 ceph_osdc_put_request(req
);
4693 EXPORT_SYMBOL(ceph_osdc_unwatch
);
4695 static int osd_req_op_notify_ack_init(struct ceph_osd_request
*req
, int which
,
4696 u64 notify_id
, u64 cookie
, void *payload
,
4699 struct ceph_osd_req_op
*op
;
4700 struct ceph_pagelist
*pl
;
4703 op
= _osd_req_op_init(req
, which
, CEPH_OSD_OP_NOTIFY_ACK
, 0);
4705 pl
= ceph_pagelist_alloc(GFP_NOIO
);
4709 ret
= ceph_pagelist_encode_64(pl
, notify_id
);
4710 ret
|= ceph_pagelist_encode_64(pl
, cookie
);
4712 ret
|= ceph_pagelist_encode_32(pl
, payload_len
);
4713 ret
|= ceph_pagelist_append(pl
, payload
, payload_len
);
4715 ret
|= ceph_pagelist_encode_32(pl
, 0);
4718 ceph_pagelist_release(pl
);
4722 ceph_osd_data_pagelist_init(&op
->notify_ack
.request_data
, pl
);
4723 op
->indata_len
= pl
->length
;
4727 int ceph_osdc_notify_ack(struct ceph_osd_client
*osdc
,
4728 struct ceph_object_id
*oid
,
4729 struct ceph_object_locator
*oloc
,
4735 struct ceph_osd_request
*req
;
4738 req
= ceph_osdc_alloc_request(osdc
, NULL
, 1, false, GFP_NOIO
);
4742 ceph_oid_copy(&req
->r_base_oid
, oid
);
4743 ceph_oloc_copy(&req
->r_base_oloc
, oloc
);
4744 req
->r_flags
= CEPH_OSD_FLAG_READ
;
4746 ret
= osd_req_op_notify_ack_init(req
, 0, notify_id
, cookie
, payload
,
4751 ret
= ceph_osdc_alloc_messages(req
, GFP_NOIO
);
4755 ceph_osdc_start_request(osdc
, req
, false);
4756 ret
= ceph_osdc_wait_request(osdc
, req
);
4759 ceph_osdc_put_request(req
);
4762 EXPORT_SYMBOL(ceph_osdc_notify_ack
);
4764 static int osd_req_op_notify_init(struct ceph_osd_request
*req
, int which
,
4765 u64 cookie
, u32 prot_ver
, u32 timeout
,
4766 void *payload
, u32 payload_len
)
4768 struct ceph_osd_req_op
*op
;
4769 struct ceph_pagelist
*pl
;
4772 op
= _osd_req_op_init(req
, which
, CEPH_OSD_OP_NOTIFY
, 0);
4773 op
->notify
.cookie
= cookie
;
4775 pl
= ceph_pagelist_alloc(GFP_NOIO
);
4779 ret
= ceph_pagelist_encode_32(pl
, 1); /* prot_ver */
4780 ret
|= ceph_pagelist_encode_32(pl
, timeout
);
4781 ret
|= ceph_pagelist_encode_32(pl
, payload_len
);
4782 ret
|= ceph_pagelist_append(pl
, payload
, payload_len
);
4784 ceph_pagelist_release(pl
);
4788 ceph_osd_data_pagelist_init(&op
->notify
.request_data
, pl
);
4789 op
->indata_len
= pl
->length
;
4794 * @timeout: in seconds
4796 * @preply_{pages,len} are initialized both on success and error.
4797 * The caller is responsible for:
4799 * ceph_release_page_vector(reply_pages, calc_pages_for(0, reply_len))
4801 int ceph_osdc_notify(struct ceph_osd_client
*osdc
,
4802 struct ceph_object_id
*oid
,
4803 struct ceph_object_locator
*oloc
,
4807 struct page
***preply_pages
,
4810 struct ceph_osd_linger_request
*lreq
;
4811 struct page
**pages
;
4816 *preply_pages
= NULL
;
4820 lreq
= linger_alloc(osdc
);
4824 lreq
->preply_pages
= preply_pages
;
4825 lreq
->preply_len
= preply_len
;
4827 ceph_oid_copy(&lreq
->t
.base_oid
, oid
);
4828 ceph_oloc_copy(&lreq
->t
.base_oloc
, oloc
);
4829 lreq
->t
.flags
= CEPH_OSD_FLAG_READ
;
4831 lreq
->reg_req
= alloc_linger_request(lreq
);
4832 if (!lreq
->reg_req
) {
4838 * Pass 0 for cookie because we don't know it yet, it will be
4839 * filled in by linger_submit().
4841 ret
= osd_req_op_notify_init(lreq
->reg_req
, 0, 0, 1, timeout
,
4842 payload
, payload_len
);
4847 pages
= ceph_alloc_page_vector(1, GFP_NOIO
);
4848 if (IS_ERR(pages
)) {
4849 ret
= PTR_ERR(pages
);
4852 ceph_osd_data_pages_init(osd_req_op_data(lreq
->reg_req
, 0, notify
,
4854 pages
, PAGE_SIZE
, 0, false, true);
4856 ret
= ceph_osdc_alloc_messages(lreq
->reg_req
, GFP_NOIO
);
4860 linger_submit(lreq
);
4861 ret
= linger_reg_commit_wait(lreq
);
4863 ret
= linger_notify_finish_wait(lreq
);
4865 dout("lreq %p failed to initiate notify %d\n", lreq
, ret
);
4867 linger_cancel(lreq
);
4872 EXPORT_SYMBOL(ceph_osdc_notify
);
4875 * Return the number of milliseconds since the watch was last
4876 * confirmed, or an error. If there is an error, the watch is no
4877 * longer valid, and should be destroyed with ceph_osdc_unwatch().
4879 int ceph_osdc_watch_check(struct ceph_osd_client
*osdc
,
4880 struct ceph_osd_linger_request
*lreq
)
4882 unsigned long stamp
, age
;
4885 down_read(&osdc
->lock
);
4886 mutex_lock(&lreq
->lock
);
4887 stamp
= lreq
->watch_valid_thru
;
4888 if (!list_empty(&lreq
->pending_lworks
)) {
4889 struct linger_work
*lwork
=
4890 list_first_entry(&lreq
->pending_lworks
,
4894 if (time_before(lwork
->queued_stamp
, stamp
))
4895 stamp
= lwork
->queued_stamp
;
4897 age
= jiffies
- stamp
;
4898 dout("%s lreq %p linger_id %llu age %lu last_error %d\n", __func__
,
4899 lreq
, lreq
->linger_id
, age
, lreq
->last_error
);
4900 /* we are truncating to msecs, so return a safe upper bound */
4901 ret
= lreq
->last_error
?: 1 + jiffies_to_msecs(age
);
4903 mutex_unlock(&lreq
->lock
);
4904 up_read(&osdc
->lock
);
4908 static int decode_watcher(void **p
, void *end
, struct ceph_watch_item
*item
)
4914 ret
= ceph_start_decoding(p
, end
, 2, "watch_item_t",
4915 &struct_v
, &struct_len
);
4919 ceph_decode_copy(p
, &item
->name
, sizeof(item
->name
));
4920 item
->cookie
= ceph_decode_64(p
);
4921 *p
+= 4; /* skip timeout_seconds */
4922 if (struct_v
>= 2) {
4923 ceph_decode_copy(p
, &item
->addr
, sizeof(item
->addr
));
4924 ceph_decode_addr(&item
->addr
);
4927 dout("%s %s%llu cookie %llu addr %s\n", __func__
,
4928 ENTITY_NAME(item
->name
), item
->cookie
,
4929 ceph_pr_addr(&item
->addr
.in_addr
));
4933 static int decode_watchers(void **p
, void *end
,
4934 struct ceph_watch_item
**watchers
,
4942 ret
= ceph_start_decoding(p
, end
, 1, "obj_list_watch_response_t",
4943 &struct_v
, &struct_len
);
4947 *num_watchers
= ceph_decode_32(p
);
4948 *watchers
= kcalloc(*num_watchers
, sizeof(**watchers
), GFP_NOIO
);
4952 for (i
= 0; i
< *num_watchers
; i
++) {
4953 ret
= decode_watcher(p
, end
, *watchers
+ i
);
4964 * On success, the caller is responsible for:
4968 int ceph_osdc_list_watchers(struct ceph_osd_client
*osdc
,
4969 struct ceph_object_id
*oid
,
4970 struct ceph_object_locator
*oloc
,
4971 struct ceph_watch_item
**watchers
,
4974 struct ceph_osd_request
*req
;
4975 struct page
**pages
;
4978 req
= ceph_osdc_alloc_request(osdc
, NULL
, 1, false, GFP_NOIO
);
4982 ceph_oid_copy(&req
->r_base_oid
, oid
);
4983 ceph_oloc_copy(&req
->r_base_oloc
, oloc
);
4984 req
->r_flags
= CEPH_OSD_FLAG_READ
;
4986 pages
= ceph_alloc_page_vector(1, GFP_NOIO
);
4987 if (IS_ERR(pages
)) {
4988 ret
= PTR_ERR(pages
);
4992 osd_req_op_init(req
, 0, CEPH_OSD_OP_LIST_WATCHERS
, 0);
4993 ceph_osd_data_pages_init(osd_req_op_data(req
, 0, list_watchers
,
4995 pages
, PAGE_SIZE
, 0, false, true);
4997 ret
= ceph_osdc_alloc_messages(req
, GFP_NOIO
);
5001 ceph_osdc_start_request(osdc
, req
, false);
5002 ret
= ceph_osdc_wait_request(osdc
, req
);
5004 void *p
= page_address(pages
[0]);
5005 void *const end
= p
+ req
->r_ops
[0].outdata_len
;
5007 ret
= decode_watchers(&p
, end
, watchers
, num_watchers
);
5011 ceph_osdc_put_request(req
);
5014 EXPORT_SYMBOL(ceph_osdc_list_watchers
);
5017 * Call all pending notify callbacks - for use after a watch is
5018 * unregistered, to make sure no more callbacks for it will be invoked
5020 void ceph_osdc_flush_notifies(struct ceph_osd_client
*osdc
)
5022 dout("%s osdc %p\n", __func__
, osdc
);
5023 flush_workqueue(osdc
->notify_wq
);
5025 EXPORT_SYMBOL(ceph_osdc_flush_notifies
);
5027 void ceph_osdc_maybe_request_map(struct ceph_osd_client
*osdc
)
5029 down_read(&osdc
->lock
);
5030 maybe_request_map(osdc
);
5031 up_read(&osdc
->lock
);
5033 EXPORT_SYMBOL(ceph_osdc_maybe_request_map
);
5036 * Execute an OSD class method on an object.
5038 * @flags: CEPH_OSD_FLAG_*
5039 * @resp_len: in/out param for reply length
5041 int ceph_osdc_call(struct ceph_osd_client
*osdc
,
5042 struct ceph_object_id
*oid
,
5043 struct ceph_object_locator
*oloc
,
5044 const char *class, const char *method
,
5046 struct page
*req_page
, size_t req_len
,
5047 struct page
*resp_page
, size_t *resp_len
)
5049 struct ceph_osd_request
*req
;
5052 if (req_len
> PAGE_SIZE
|| (resp_page
&& *resp_len
> PAGE_SIZE
))
5055 req
= ceph_osdc_alloc_request(osdc
, NULL
, 1, false, GFP_NOIO
);
5059 ceph_oid_copy(&req
->r_base_oid
, oid
);
5060 ceph_oloc_copy(&req
->r_base_oloc
, oloc
);
5061 req
->r_flags
= flags
;
5063 ret
= osd_req_op_cls_init(req
, 0, class, method
);
5068 osd_req_op_cls_request_data_pages(req
, 0, &req_page
, req_len
,
5071 osd_req_op_cls_response_data_pages(req
, 0, &resp_page
,
5072 *resp_len
, 0, false, false);
5074 ret
= ceph_osdc_alloc_messages(req
, GFP_NOIO
);
5078 ceph_osdc_start_request(osdc
, req
, false);
5079 ret
= ceph_osdc_wait_request(osdc
, req
);
5081 ret
= req
->r_ops
[0].rval
;
5083 *resp_len
= req
->r_ops
[0].outdata_len
;
5087 ceph_osdc_put_request(req
);
5090 EXPORT_SYMBOL(ceph_osdc_call
);
5095 int ceph_osdc_init(struct ceph_osd_client
*osdc
, struct ceph_client
*client
)
5100 osdc
->client
= client
;
5101 init_rwsem(&osdc
->lock
);
5102 osdc
->osds
= RB_ROOT
;
5103 INIT_LIST_HEAD(&osdc
->osd_lru
);
5104 spin_lock_init(&osdc
->osd_lru_lock
);
5105 osd_init(&osdc
->homeless_osd
);
5106 osdc
->homeless_osd
.o_osdc
= osdc
;
5107 osdc
->homeless_osd
.o_osd
= CEPH_HOMELESS_OSD
;
5108 osdc
->last_linger_id
= CEPH_LINGER_ID_START
;
5109 osdc
->linger_requests
= RB_ROOT
;
5110 osdc
->map_checks
= RB_ROOT
;
5111 osdc
->linger_map_checks
= RB_ROOT
;
5112 INIT_DELAYED_WORK(&osdc
->timeout_work
, handle_timeout
);
5113 INIT_DELAYED_WORK(&osdc
->osds_timeout_work
, handle_osds_timeout
);
5116 osdc
->osdmap
= ceph_osdmap_alloc();
5120 osdc
->req_mempool
= mempool_create_slab_pool(10,
5121 ceph_osd_request_cache
);
5122 if (!osdc
->req_mempool
)
5125 err
= ceph_msgpool_init(&osdc
->msgpool_op
, CEPH_MSG_OSD_OP
,
5126 PAGE_SIZE
, CEPH_OSD_SLAB_OPS
, 10, "osd_op");
5129 err
= ceph_msgpool_init(&osdc
->msgpool_op_reply
, CEPH_MSG_OSD_OPREPLY
,
5130 PAGE_SIZE
, CEPH_OSD_SLAB_OPS
, 10,
5136 osdc
->notify_wq
= create_singlethread_workqueue("ceph-watch-notify");
5137 if (!osdc
->notify_wq
)
5138 goto out_msgpool_reply
;
5140 osdc
->completion_wq
= create_singlethread_workqueue("ceph-completion");
5141 if (!osdc
->completion_wq
)
5144 schedule_delayed_work(&osdc
->timeout_work
,
5145 osdc
->client
->options
->osd_keepalive_timeout
);
5146 schedule_delayed_work(&osdc
->osds_timeout_work
,
5147 round_jiffies_relative(osdc
->client
->options
->osd_idle_ttl
));
5152 destroy_workqueue(osdc
->notify_wq
);
5154 ceph_msgpool_destroy(&osdc
->msgpool_op_reply
);
5156 ceph_msgpool_destroy(&osdc
->msgpool_op
);
5158 mempool_destroy(osdc
->req_mempool
);
5160 ceph_osdmap_destroy(osdc
->osdmap
);
5165 void ceph_osdc_stop(struct ceph_osd_client
*osdc
)
5167 destroy_workqueue(osdc
->completion_wq
);
5168 destroy_workqueue(osdc
->notify_wq
);
5169 cancel_delayed_work_sync(&osdc
->timeout_work
);
5170 cancel_delayed_work_sync(&osdc
->osds_timeout_work
);
5172 down_write(&osdc
->lock
);
5173 while (!RB_EMPTY_ROOT(&osdc
->osds
)) {
5174 struct ceph_osd
*osd
= rb_entry(rb_first(&osdc
->osds
),
5175 struct ceph_osd
, o_node
);
5178 up_write(&osdc
->lock
);
5179 WARN_ON(refcount_read(&osdc
->homeless_osd
.o_ref
) != 1);
5180 osd_cleanup(&osdc
->homeless_osd
);
5182 WARN_ON(!list_empty(&osdc
->osd_lru
));
5183 WARN_ON(!RB_EMPTY_ROOT(&osdc
->linger_requests
));
5184 WARN_ON(!RB_EMPTY_ROOT(&osdc
->map_checks
));
5185 WARN_ON(!RB_EMPTY_ROOT(&osdc
->linger_map_checks
));
5186 WARN_ON(atomic_read(&osdc
->num_requests
));
5187 WARN_ON(atomic_read(&osdc
->num_homeless
));
5189 ceph_osdmap_destroy(osdc
->osdmap
);
5190 mempool_destroy(osdc
->req_mempool
);
5191 ceph_msgpool_destroy(&osdc
->msgpool_op
);
5192 ceph_msgpool_destroy(&osdc
->msgpool_op_reply
);
5196 * Read some contiguous pages. If we cross a stripe boundary, shorten
5197 * *plen. Return number of bytes read, or error.
5199 int ceph_osdc_readpages(struct ceph_osd_client
*osdc
,
5200 struct ceph_vino vino
, struct ceph_file_layout
*layout
,
5202 u32 truncate_seq
, u64 truncate_size
,
5203 struct page
**pages
, int num_pages
, int page_align
)
5205 struct ceph_osd_request
*req
;
5208 dout("readpages on ino %llx.%llx on %llu~%llu\n", vino
.ino
,
5209 vino
.snap
, off
, *plen
);
5210 req
= ceph_osdc_new_request(osdc
, layout
, vino
, off
, plen
, 0, 1,
5211 CEPH_OSD_OP_READ
, CEPH_OSD_FLAG_READ
,
5212 NULL
, truncate_seq
, truncate_size
,
5215 return PTR_ERR(req
);
5217 /* it may be a short read due to an object boundary */
5218 osd_req_op_extent_osd_data_pages(req
, 0,
5219 pages
, *plen
, page_align
, false, false);
5221 dout("readpages final extent is %llu~%llu (%llu bytes align %d)\n",
5222 off
, *plen
, *plen
, page_align
);
5224 rc
= ceph_osdc_start_request(osdc
, req
, false);
5226 rc
= ceph_osdc_wait_request(osdc
, req
);
5228 ceph_osdc_put_request(req
);
5229 dout("readpages result %d\n", rc
);
5232 EXPORT_SYMBOL(ceph_osdc_readpages
);
5235 * do a synchronous write on N pages
5237 int ceph_osdc_writepages(struct ceph_osd_client
*osdc
, struct ceph_vino vino
,
5238 struct ceph_file_layout
*layout
,
5239 struct ceph_snap_context
*snapc
,
5241 u32 truncate_seq
, u64 truncate_size
,
5242 struct timespec64
*mtime
,
5243 struct page
**pages
, int num_pages
)
5245 struct ceph_osd_request
*req
;
5247 int page_align
= off
& ~PAGE_MASK
;
5249 req
= ceph_osdc_new_request(osdc
, layout
, vino
, off
, &len
, 0, 1,
5250 CEPH_OSD_OP_WRITE
, CEPH_OSD_FLAG_WRITE
,
5251 snapc
, truncate_seq
, truncate_size
,
5254 return PTR_ERR(req
);
5256 /* it may be a short write due to an object boundary */
5257 osd_req_op_extent_osd_data_pages(req
, 0, pages
, len
, page_align
,
5259 dout("writepages %llu~%llu (%llu bytes)\n", off
, len
, len
);
5261 req
->r_mtime
= *mtime
;
5262 rc
= ceph_osdc_start_request(osdc
, req
, true);
5264 rc
= ceph_osdc_wait_request(osdc
, req
);
5266 ceph_osdc_put_request(req
);
5269 dout("writepages result %d\n", rc
);
5272 EXPORT_SYMBOL(ceph_osdc_writepages
);
5274 static int osd_req_op_copy_from_init(struct ceph_osd_request
*req
,
5275 u64 src_snapid
, u64 src_version
,
5276 struct ceph_object_id
*src_oid
,
5277 struct ceph_object_locator
*src_oloc
,
5278 u32 src_fadvise_flags
,
5279 u32 dst_fadvise_flags
,
5282 struct ceph_osd_req_op
*op
;
5283 struct page
**pages
;
5286 pages
= ceph_alloc_page_vector(1, GFP_KERNEL
);
5288 return PTR_ERR(pages
);
5290 op
= _osd_req_op_init(req
, 0, CEPH_OSD_OP_COPY_FROM
, dst_fadvise_flags
);
5291 op
->copy_from
.snapid
= src_snapid
;
5292 op
->copy_from
.src_version
= src_version
;
5293 op
->copy_from
.flags
= copy_from_flags
;
5294 op
->copy_from
.src_fadvise_flags
= src_fadvise_flags
;
5296 p
= page_address(pages
[0]);
5297 end
= p
+ PAGE_SIZE
;
5298 ceph_encode_string(&p
, end
, src_oid
->name
, src_oid
->name_len
);
5299 encode_oloc(&p
, end
, src_oloc
);
5300 op
->indata_len
= PAGE_SIZE
- (end
- p
);
5302 ceph_osd_data_pages_init(&op
->copy_from
.osd_data
, pages
,
5303 op
->indata_len
, 0, false, true);
5307 int ceph_osdc_copy_from(struct ceph_osd_client
*osdc
,
5308 u64 src_snapid
, u64 src_version
,
5309 struct ceph_object_id
*src_oid
,
5310 struct ceph_object_locator
*src_oloc
,
5311 u32 src_fadvise_flags
,
5312 struct ceph_object_id
*dst_oid
,
5313 struct ceph_object_locator
*dst_oloc
,
5314 u32 dst_fadvise_flags
,
5317 struct ceph_osd_request
*req
;
5320 req
= ceph_osdc_alloc_request(osdc
, NULL
, 1, false, GFP_KERNEL
);
5324 req
->r_flags
= CEPH_OSD_FLAG_WRITE
;
5326 ceph_oloc_copy(&req
->r_t
.base_oloc
, dst_oloc
);
5327 ceph_oid_copy(&req
->r_t
.base_oid
, dst_oid
);
5329 ret
= osd_req_op_copy_from_init(req
, src_snapid
, src_version
, src_oid
,
5330 src_oloc
, src_fadvise_flags
,
5331 dst_fadvise_flags
, copy_from_flags
);
5335 ret
= ceph_osdc_alloc_messages(req
, GFP_KERNEL
);
5339 ceph_osdc_start_request(osdc
, req
, false);
5340 ret
= ceph_osdc_wait_request(osdc
, req
);
5343 ceph_osdc_put_request(req
);
5346 EXPORT_SYMBOL(ceph_osdc_copy_from
);
5348 int __init
ceph_osdc_setup(void)
5350 size_t size
= sizeof(struct ceph_osd_request
) +
5351 CEPH_OSD_SLAB_OPS
* sizeof(struct ceph_osd_req_op
);
5353 BUG_ON(ceph_osd_request_cache
);
5354 ceph_osd_request_cache
= kmem_cache_create("ceph_osd_request", size
,
5357 return ceph_osd_request_cache
? 0 : -ENOMEM
;
5360 void ceph_osdc_cleanup(void)
5362 BUG_ON(!ceph_osd_request_cache
);
5363 kmem_cache_destroy(ceph_osd_request_cache
);
5364 ceph_osd_request_cache
= NULL
;
5368 * handle incoming message
5370 static void dispatch(struct ceph_connection
*con
, struct ceph_msg
*msg
)
5372 struct ceph_osd
*osd
= con
->private;
5373 struct ceph_osd_client
*osdc
= osd
->o_osdc
;
5374 int type
= le16_to_cpu(msg
->hdr
.type
);
5377 case CEPH_MSG_OSD_MAP
:
5378 ceph_osdc_handle_map(osdc
, msg
);
5380 case CEPH_MSG_OSD_OPREPLY
:
5381 handle_reply(osd
, msg
);
5383 case CEPH_MSG_OSD_BACKOFF
:
5384 handle_backoff(osd
, msg
);
5386 case CEPH_MSG_WATCH_NOTIFY
:
5387 handle_watch_notify(osdc
, msg
);
5391 pr_err("received unknown message type %d %s\n", type
,
5392 ceph_msg_type_name(type
));
5399 * Lookup and return message for incoming reply. Don't try to do
5400 * anything about a larger than preallocated data portion of the
5401 * message at the moment - for now, just skip the message.
5403 static struct ceph_msg
*get_reply(struct ceph_connection
*con
,
5404 struct ceph_msg_header
*hdr
,
5407 struct ceph_osd
*osd
= con
->private;
5408 struct ceph_osd_client
*osdc
= osd
->o_osdc
;
5409 struct ceph_msg
*m
= NULL
;
5410 struct ceph_osd_request
*req
;
5411 int front_len
= le32_to_cpu(hdr
->front_len
);
5412 int data_len
= le32_to_cpu(hdr
->data_len
);
5413 u64 tid
= le64_to_cpu(hdr
->tid
);
5415 down_read(&osdc
->lock
);
5416 if (!osd_registered(osd
)) {
5417 dout("%s osd%d unknown, skipping\n", __func__
, osd
->o_osd
);
5419 goto out_unlock_osdc
;
5421 WARN_ON(osd
->o_osd
!= le64_to_cpu(hdr
->src
.num
));
5423 mutex_lock(&osd
->lock
);
5424 req
= lookup_request(&osd
->o_requests
, tid
);
5426 dout("%s osd%d tid %llu unknown, skipping\n", __func__
,
5429 goto out_unlock_session
;
5432 ceph_msg_revoke_incoming(req
->r_reply
);
5434 if (front_len
> req
->r_reply
->front_alloc_len
) {
5435 pr_warn("%s osd%d tid %llu front %d > preallocated %d\n",
5436 __func__
, osd
->o_osd
, req
->r_tid
, front_len
,
5437 req
->r_reply
->front_alloc_len
);
5438 m
= ceph_msg_new(CEPH_MSG_OSD_OPREPLY
, front_len
, GFP_NOFS
,
5441 goto out_unlock_session
;
5442 ceph_msg_put(req
->r_reply
);
5446 if (data_len
> req
->r_reply
->data_length
) {
5447 pr_warn("%s osd%d tid %llu data %d > preallocated %zu, skipping\n",
5448 __func__
, osd
->o_osd
, req
->r_tid
, data_len
,
5449 req
->r_reply
->data_length
);
5452 goto out_unlock_session
;
5455 m
= ceph_msg_get(req
->r_reply
);
5456 dout("get_reply tid %lld %p\n", tid
, m
);
5459 mutex_unlock(&osd
->lock
);
5461 up_read(&osdc
->lock
);
5466 * TODO: switch to a msg-owned pagelist
5468 static struct ceph_msg
*alloc_msg_with_page_vector(struct ceph_msg_header
*hdr
)
5471 int type
= le16_to_cpu(hdr
->type
);
5472 u32 front_len
= le32_to_cpu(hdr
->front_len
);
5473 u32 data_len
= le32_to_cpu(hdr
->data_len
);
5475 m
= ceph_msg_new2(type
, front_len
, 1, GFP_NOIO
, false);
5480 struct page
**pages
;
5481 struct ceph_osd_data osd_data
;
5483 pages
= ceph_alloc_page_vector(calc_pages_for(0, data_len
),
5485 if (IS_ERR(pages
)) {
5490 ceph_osd_data_pages_init(&osd_data
, pages
, data_len
, 0, false,
5492 ceph_osdc_msg_data_add(m
, &osd_data
);
5498 static struct ceph_msg
*alloc_msg(struct ceph_connection
*con
,
5499 struct ceph_msg_header
*hdr
,
5502 struct ceph_osd
*osd
= con
->private;
5503 int type
= le16_to_cpu(hdr
->type
);
5507 case CEPH_MSG_OSD_MAP
:
5508 case CEPH_MSG_OSD_BACKOFF
:
5509 case CEPH_MSG_WATCH_NOTIFY
:
5510 return alloc_msg_with_page_vector(hdr
);
5511 case CEPH_MSG_OSD_OPREPLY
:
5512 return get_reply(con
, hdr
, skip
);
5514 pr_warn("%s osd%d unknown msg type %d, skipping\n", __func__
,
5522 * Wrappers to refcount containing ceph_osd struct
5524 static struct ceph_connection
*get_osd_con(struct ceph_connection
*con
)
5526 struct ceph_osd
*osd
= con
->private;
5532 static void put_osd_con(struct ceph_connection
*con
)
5534 struct ceph_osd
*osd
= con
->private;
5542 * Note: returned pointer is the address of a structure that's
5543 * managed separately. Caller must *not* attempt to free it.
5545 static struct ceph_auth_handshake
*get_authorizer(struct ceph_connection
*con
,
5546 int *proto
, int force_new
)
5548 struct ceph_osd
*o
= con
->private;
5549 struct ceph_osd_client
*osdc
= o
->o_osdc
;
5550 struct ceph_auth_client
*ac
= osdc
->client
->monc
.auth
;
5551 struct ceph_auth_handshake
*auth
= &o
->o_auth
;
5553 if (force_new
&& auth
->authorizer
) {
5554 ceph_auth_destroy_authorizer(auth
->authorizer
);
5555 auth
->authorizer
= NULL
;
5557 if (!auth
->authorizer
) {
5558 int ret
= ceph_auth_create_authorizer(ac
, CEPH_ENTITY_TYPE_OSD
,
5561 return ERR_PTR(ret
);
5563 int ret
= ceph_auth_update_authorizer(ac
, CEPH_ENTITY_TYPE_OSD
,
5566 return ERR_PTR(ret
);
5568 *proto
= ac
->protocol
;
5573 static int add_authorizer_challenge(struct ceph_connection
*con
,
5574 void *challenge_buf
, int challenge_buf_len
)
5576 struct ceph_osd
*o
= con
->private;
5577 struct ceph_osd_client
*osdc
= o
->o_osdc
;
5578 struct ceph_auth_client
*ac
= osdc
->client
->monc
.auth
;
5580 return ceph_auth_add_authorizer_challenge(ac
, o
->o_auth
.authorizer
,
5581 challenge_buf
, challenge_buf_len
);
5584 static int verify_authorizer_reply(struct ceph_connection
*con
)
5586 struct ceph_osd
*o
= con
->private;
5587 struct ceph_osd_client
*osdc
= o
->o_osdc
;
5588 struct ceph_auth_client
*ac
= osdc
->client
->monc
.auth
;
5590 return ceph_auth_verify_authorizer_reply(ac
, o
->o_auth
.authorizer
);
5593 static int invalidate_authorizer(struct ceph_connection
*con
)
5595 struct ceph_osd
*o
= con
->private;
5596 struct ceph_osd_client
*osdc
= o
->o_osdc
;
5597 struct ceph_auth_client
*ac
= osdc
->client
->monc
.auth
;
5599 ceph_auth_invalidate_authorizer(ac
, CEPH_ENTITY_TYPE_OSD
);
5600 return ceph_monc_validate_auth(&osdc
->client
->monc
);
5603 static void osd_reencode_message(struct ceph_msg
*msg
)
5605 int type
= le16_to_cpu(msg
->hdr
.type
);
5607 if (type
== CEPH_MSG_OSD_OP
)
5608 encode_request_finish(msg
);
5611 static int osd_sign_message(struct ceph_msg
*msg
)
5613 struct ceph_osd
*o
= msg
->con
->private;
5614 struct ceph_auth_handshake
*auth
= &o
->o_auth
;
5616 return ceph_auth_sign_message(auth
, msg
);
5619 static int osd_check_message_signature(struct ceph_msg
*msg
)
5621 struct ceph_osd
*o
= msg
->con
->private;
5622 struct ceph_auth_handshake
*auth
= &o
->o_auth
;
5624 return ceph_auth_check_message_signature(auth
, msg
);
5627 static const struct ceph_connection_operations osd_con_ops
= {
5630 .dispatch
= dispatch
,
5631 .get_authorizer
= get_authorizer
,
5632 .add_authorizer_challenge
= add_authorizer_challenge
,
5633 .verify_authorizer_reply
= verify_authorizer_reply
,
5634 .invalidate_authorizer
= invalidate_authorizer
,
5635 .alloc_msg
= alloc_msg
,
5636 .reencode_message
= osd_reencode_message
,
5637 .sign_message
= osd_sign_message
,
5638 .check_message_signature
= osd_check_message_signature
,