2 #include <linux/ceph/ceph_debug.h>
4 #include <linux/module.h>
6 #include <linux/highmem.h>
8 #include <linux/pagemap.h>
9 #include <linux/slab.h>
10 #include <linux/uaccess.h>
12 #include <linux/bio.h>
15 #include <linux/ceph/ceph_features.h>
16 #include <linux/ceph/libceph.h>
17 #include <linux/ceph/osd_client.h>
18 #include <linux/ceph/messenger.h>
19 #include <linux/ceph/decode.h>
20 #include <linux/ceph/auth.h>
21 #include <linux/ceph/pagelist.h>
23 #define OSD_OPREPLY_FRONT_LEN 512
25 static struct kmem_cache
*ceph_osd_request_cache
;
27 static const struct ceph_connection_operations osd_con_ops
;
30 * Implement client access to distributed object storage cluster.
32 * All data objects are stored within a cluster/cloud of OSDs, or
33 * "object storage devices." (Note that Ceph OSDs have _nothing_ to
34 * do with the T10 OSD extensions to SCSI.) Ceph OSDs are simply
35 * remote daemons serving up and coordinating consistent and safe
38 * Cluster membership and the mapping of data objects onto storage devices
39 * are described by the osd map.
41 * We keep track of pending OSD requests (read, write), resubmit
42 * requests to different OSDs when the cluster topology/data layout
43 * change, or retry the affected requests when the communications
44 * channel with an OSD is reset.
47 static void link_request(struct ceph_osd
*osd
, struct ceph_osd_request
*req
);
48 static void unlink_request(struct ceph_osd
*osd
, struct ceph_osd_request
*req
);
49 static void link_linger(struct ceph_osd
*osd
,
50 struct ceph_osd_linger_request
*lreq
);
51 static void unlink_linger(struct ceph_osd
*osd
,
52 struct ceph_osd_linger_request
*lreq
);
53 static void clear_backoffs(struct ceph_osd
*osd
);
56 static inline bool rwsem_is_wrlocked(struct rw_semaphore
*sem
)
60 if (unlikely(down_read_trylock(sem
))) {
67 static inline void verify_osdc_locked(struct ceph_osd_client
*osdc
)
69 WARN_ON(!rwsem_is_locked(&osdc
->lock
));
71 static inline void verify_osdc_wrlocked(struct ceph_osd_client
*osdc
)
73 WARN_ON(!rwsem_is_wrlocked(&osdc
->lock
));
75 static inline void verify_osd_locked(struct ceph_osd
*osd
)
77 struct ceph_osd_client
*osdc
= osd
->o_osdc
;
79 WARN_ON(!(mutex_is_locked(&osd
->lock
) &&
80 rwsem_is_locked(&osdc
->lock
)) &&
81 !rwsem_is_wrlocked(&osdc
->lock
));
83 static inline void verify_lreq_locked(struct ceph_osd_linger_request
*lreq
)
85 WARN_ON(!mutex_is_locked(&lreq
->lock
));
88 static inline void verify_osdc_locked(struct ceph_osd_client
*osdc
) { }
89 static inline void verify_osdc_wrlocked(struct ceph_osd_client
*osdc
) { }
90 static inline void verify_osd_locked(struct ceph_osd
*osd
) { }
91 static inline void verify_lreq_locked(struct ceph_osd_linger_request
*lreq
) { }
95 * calculate the mapping of a file extent onto an object, and fill out the
96 * request accordingly. shorten extent as necessary if it crosses an
99 * fill osd op in request message.
101 static int calc_layout(struct ceph_file_layout
*layout
, u64 off
, u64
*plen
,
102 u64
*objnum
, u64
*objoff
, u64
*objlen
)
104 u64 orig_len
= *plen
;
108 r
= ceph_calc_file_object_mapping(layout
, off
, orig_len
, objnum
,
112 if (*objlen
< orig_len
) {
114 dout(" skipping last %llu, final file extent %llu~%llu\n",
115 orig_len
- *plen
, off
, *plen
);
118 dout("calc_layout objnum=%llx %llu~%llu\n", *objnum
, *objoff
, *objlen
);
123 static void ceph_osd_data_init(struct ceph_osd_data
*osd_data
)
125 memset(osd_data
, 0, sizeof (*osd_data
));
126 osd_data
->type
= CEPH_OSD_DATA_TYPE_NONE
;
129 static void ceph_osd_data_pages_init(struct ceph_osd_data
*osd_data
,
130 struct page
**pages
, u64 length
, u32 alignment
,
131 bool pages_from_pool
, bool own_pages
)
133 osd_data
->type
= CEPH_OSD_DATA_TYPE_PAGES
;
134 osd_data
->pages
= pages
;
135 osd_data
->length
= length
;
136 osd_data
->alignment
= alignment
;
137 osd_data
->pages_from_pool
= pages_from_pool
;
138 osd_data
->own_pages
= own_pages
;
141 static void ceph_osd_data_pagelist_init(struct ceph_osd_data
*osd_data
,
142 struct ceph_pagelist
*pagelist
)
144 osd_data
->type
= CEPH_OSD_DATA_TYPE_PAGELIST
;
145 osd_data
->pagelist
= pagelist
;
149 static void ceph_osd_data_bio_init(struct ceph_osd_data
*osd_data
,
150 struct bio
*bio
, size_t bio_length
)
152 osd_data
->type
= CEPH_OSD_DATA_TYPE_BIO
;
154 osd_data
->bio_length
= bio_length
;
156 #endif /* CONFIG_BLOCK */
158 #define osd_req_op_data(oreq, whch, typ, fld) \
160 struct ceph_osd_request *__oreq = (oreq); \
161 unsigned int __whch = (whch); \
162 BUG_ON(__whch >= __oreq->r_num_ops); \
163 &__oreq->r_ops[__whch].typ.fld; \
166 static struct ceph_osd_data
*
167 osd_req_op_raw_data_in(struct ceph_osd_request
*osd_req
, unsigned int which
)
169 BUG_ON(which
>= osd_req
->r_num_ops
);
171 return &osd_req
->r_ops
[which
].raw_data_in
;
174 struct ceph_osd_data
*
175 osd_req_op_extent_osd_data(struct ceph_osd_request
*osd_req
,
178 return osd_req_op_data(osd_req
, which
, extent
, osd_data
);
180 EXPORT_SYMBOL(osd_req_op_extent_osd_data
);
182 void osd_req_op_raw_data_in_pages(struct ceph_osd_request
*osd_req
,
183 unsigned int which
, struct page
**pages
,
184 u64 length
, u32 alignment
,
185 bool pages_from_pool
, bool own_pages
)
187 struct ceph_osd_data
*osd_data
;
189 osd_data
= osd_req_op_raw_data_in(osd_req
, which
);
190 ceph_osd_data_pages_init(osd_data
, pages
, length
, alignment
,
191 pages_from_pool
, own_pages
);
193 EXPORT_SYMBOL(osd_req_op_raw_data_in_pages
);
195 void osd_req_op_extent_osd_data_pages(struct ceph_osd_request
*osd_req
,
196 unsigned int which
, struct page
**pages
,
197 u64 length
, u32 alignment
,
198 bool pages_from_pool
, bool own_pages
)
200 struct ceph_osd_data
*osd_data
;
202 osd_data
= osd_req_op_data(osd_req
, which
, extent
, osd_data
);
203 ceph_osd_data_pages_init(osd_data
, pages
, length
, alignment
,
204 pages_from_pool
, own_pages
);
206 EXPORT_SYMBOL(osd_req_op_extent_osd_data_pages
);
208 void osd_req_op_extent_osd_data_pagelist(struct ceph_osd_request
*osd_req
,
209 unsigned int which
, struct ceph_pagelist
*pagelist
)
211 struct ceph_osd_data
*osd_data
;
213 osd_data
= osd_req_op_data(osd_req
, which
, extent
, osd_data
);
214 ceph_osd_data_pagelist_init(osd_data
, pagelist
);
216 EXPORT_SYMBOL(osd_req_op_extent_osd_data_pagelist
);
219 void osd_req_op_extent_osd_data_bio(struct ceph_osd_request
*osd_req
,
220 unsigned int which
, struct bio
*bio
, size_t bio_length
)
222 struct ceph_osd_data
*osd_data
;
224 osd_data
= osd_req_op_data(osd_req
, which
, extent
, osd_data
);
225 ceph_osd_data_bio_init(osd_data
, bio
, bio_length
);
227 EXPORT_SYMBOL(osd_req_op_extent_osd_data_bio
);
228 #endif /* CONFIG_BLOCK */
230 static void osd_req_op_cls_request_info_pagelist(
231 struct ceph_osd_request
*osd_req
,
232 unsigned int which
, struct ceph_pagelist
*pagelist
)
234 struct ceph_osd_data
*osd_data
;
236 osd_data
= osd_req_op_data(osd_req
, which
, cls
, request_info
);
237 ceph_osd_data_pagelist_init(osd_data
, pagelist
);
240 void osd_req_op_cls_request_data_pagelist(
241 struct ceph_osd_request
*osd_req
,
242 unsigned int which
, struct ceph_pagelist
*pagelist
)
244 struct ceph_osd_data
*osd_data
;
246 osd_data
= osd_req_op_data(osd_req
, which
, cls
, request_data
);
247 ceph_osd_data_pagelist_init(osd_data
, pagelist
);
248 osd_req
->r_ops
[which
].cls
.indata_len
+= pagelist
->length
;
249 osd_req
->r_ops
[which
].indata_len
+= pagelist
->length
;
251 EXPORT_SYMBOL(osd_req_op_cls_request_data_pagelist
);
253 void osd_req_op_cls_request_data_pages(struct ceph_osd_request
*osd_req
,
254 unsigned int which
, struct page
**pages
, u64 length
,
255 u32 alignment
, bool pages_from_pool
, bool own_pages
)
257 struct ceph_osd_data
*osd_data
;
259 osd_data
= osd_req_op_data(osd_req
, which
, cls
, request_data
);
260 ceph_osd_data_pages_init(osd_data
, pages
, length
, alignment
,
261 pages_from_pool
, own_pages
);
262 osd_req
->r_ops
[which
].cls
.indata_len
+= length
;
263 osd_req
->r_ops
[which
].indata_len
+= length
;
265 EXPORT_SYMBOL(osd_req_op_cls_request_data_pages
);
267 void osd_req_op_cls_response_data_pages(struct ceph_osd_request
*osd_req
,
268 unsigned int which
, struct page
**pages
, u64 length
,
269 u32 alignment
, bool pages_from_pool
, bool own_pages
)
271 struct ceph_osd_data
*osd_data
;
273 osd_data
= osd_req_op_data(osd_req
, which
, cls
, response_data
);
274 ceph_osd_data_pages_init(osd_data
, pages
, length
, alignment
,
275 pages_from_pool
, own_pages
);
277 EXPORT_SYMBOL(osd_req_op_cls_response_data_pages
);
279 static u64
ceph_osd_data_length(struct ceph_osd_data
*osd_data
)
281 switch (osd_data
->type
) {
282 case CEPH_OSD_DATA_TYPE_NONE
:
284 case CEPH_OSD_DATA_TYPE_PAGES
:
285 return osd_data
->length
;
286 case CEPH_OSD_DATA_TYPE_PAGELIST
:
287 return (u64
)osd_data
->pagelist
->length
;
289 case CEPH_OSD_DATA_TYPE_BIO
:
290 return (u64
)osd_data
->bio_length
;
291 #endif /* CONFIG_BLOCK */
293 WARN(true, "unrecognized data type %d\n", (int)osd_data
->type
);
298 static void ceph_osd_data_release(struct ceph_osd_data
*osd_data
)
300 if (osd_data
->type
== CEPH_OSD_DATA_TYPE_PAGES
&& osd_data
->own_pages
) {
303 num_pages
= calc_pages_for((u64
)osd_data
->alignment
,
304 (u64
)osd_data
->length
);
305 ceph_release_page_vector(osd_data
->pages
, num_pages
);
307 ceph_osd_data_init(osd_data
);
310 static void osd_req_op_data_release(struct ceph_osd_request
*osd_req
,
313 struct ceph_osd_req_op
*op
;
315 BUG_ON(which
>= osd_req
->r_num_ops
);
316 op
= &osd_req
->r_ops
[which
];
319 case CEPH_OSD_OP_READ
:
320 case CEPH_OSD_OP_WRITE
:
321 case CEPH_OSD_OP_WRITEFULL
:
322 ceph_osd_data_release(&op
->extent
.osd_data
);
324 case CEPH_OSD_OP_CALL
:
325 ceph_osd_data_release(&op
->cls
.request_info
);
326 ceph_osd_data_release(&op
->cls
.request_data
);
327 ceph_osd_data_release(&op
->cls
.response_data
);
329 case CEPH_OSD_OP_SETXATTR
:
330 case CEPH_OSD_OP_CMPXATTR
:
331 ceph_osd_data_release(&op
->xattr
.osd_data
);
333 case CEPH_OSD_OP_STAT
:
334 ceph_osd_data_release(&op
->raw_data_in
);
336 case CEPH_OSD_OP_NOTIFY_ACK
:
337 ceph_osd_data_release(&op
->notify_ack
.request_data
);
339 case CEPH_OSD_OP_NOTIFY
:
340 ceph_osd_data_release(&op
->notify
.request_data
);
341 ceph_osd_data_release(&op
->notify
.response_data
);
343 case CEPH_OSD_OP_LIST_WATCHERS
:
344 ceph_osd_data_release(&op
->list_watchers
.response_data
);
352 * Assumes @t is zero-initialized.
354 static void target_init(struct ceph_osd_request_target
*t
)
356 ceph_oid_init(&t
->base_oid
);
357 ceph_oloc_init(&t
->base_oloc
);
358 ceph_oid_init(&t
->target_oid
);
359 ceph_oloc_init(&t
->target_oloc
);
361 ceph_osds_init(&t
->acting
);
362 ceph_osds_init(&t
->up
);
366 t
->osd
= CEPH_HOMELESS_OSD
;
369 static void target_copy(struct ceph_osd_request_target
*dest
,
370 const struct ceph_osd_request_target
*src
)
372 ceph_oid_copy(&dest
->base_oid
, &src
->base_oid
);
373 ceph_oloc_copy(&dest
->base_oloc
, &src
->base_oloc
);
374 ceph_oid_copy(&dest
->target_oid
, &src
->target_oid
);
375 ceph_oloc_copy(&dest
->target_oloc
, &src
->target_oloc
);
377 dest
->pgid
= src
->pgid
; /* struct */
378 dest
->spgid
= src
->spgid
; /* struct */
379 dest
->pg_num
= src
->pg_num
;
380 dest
->pg_num_mask
= src
->pg_num_mask
;
381 ceph_osds_copy(&dest
->acting
, &src
->acting
);
382 ceph_osds_copy(&dest
->up
, &src
->up
);
383 dest
->size
= src
->size
;
384 dest
->min_size
= src
->min_size
;
385 dest
->sort_bitwise
= src
->sort_bitwise
;
387 dest
->flags
= src
->flags
;
388 dest
->paused
= src
->paused
;
390 dest
->epoch
= src
->epoch
;
391 dest
->last_force_resend
= src
->last_force_resend
;
393 dest
->osd
= src
->osd
;
396 static void target_destroy(struct ceph_osd_request_target
*t
)
398 ceph_oid_destroy(&t
->base_oid
);
399 ceph_oloc_destroy(&t
->base_oloc
);
400 ceph_oid_destroy(&t
->target_oid
);
401 ceph_oloc_destroy(&t
->target_oloc
);
407 static void request_release_checks(struct ceph_osd_request
*req
)
409 WARN_ON(!RB_EMPTY_NODE(&req
->r_node
));
410 WARN_ON(!RB_EMPTY_NODE(&req
->r_mc_node
));
411 WARN_ON(!list_empty(&req
->r_unsafe_item
));
415 static void ceph_osdc_release_request(struct kref
*kref
)
417 struct ceph_osd_request
*req
= container_of(kref
,
418 struct ceph_osd_request
, r_kref
);
421 dout("%s %p (r_request %p r_reply %p)\n", __func__
, req
,
422 req
->r_request
, req
->r_reply
);
423 request_release_checks(req
);
426 ceph_msg_put(req
->r_request
);
428 ceph_msg_put(req
->r_reply
);
430 for (which
= 0; which
< req
->r_num_ops
; which
++)
431 osd_req_op_data_release(req
, which
);
433 target_destroy(&req
->r_t
);
434 ceph_put_snap_context(req
->r_snapc
);
437 mempool_free(req
, req
->r_osdc
->req_mempool
);
438 else if (req
->r_num_ops
<= CEPH_OSD_SLAB_OPS
)
439 kmem_cache_free(ceph_osd_request_cache
, req
);
444 void ceph_osdc_get_request(struct ceph_osd_request
*req
)
446 dout("%s %p (was %d)\n", __func__
, req
,
447 kref_read(&req
->r_kref
));
448 kref_get(&req
->r_kref
);
450 EXPORT_SYMBOL(ceph_osdc_get_request
);
452 void ceph_osdc_put_request(struct ceph_osd_request
*req
)
455 dout("%s %p (was %d)\n", __func__
, req
,
456 kref_read(&req
->r_kref
));
457 kref_put(&req
->r_kref
, ceph_osdc_release_request
);
460 EXPORT_SYMBOL(ceph_osdc_put_request
);
462 static void request_init(struct ceph_osd_request
*req
)
464 /* req only, each op is zeroed in _osd_req_op_init() */
465 memset(req
, 0, sizeof(*req
));
467 kref_init(&req
->r_kref
);
468 init_completion(&req
->r_completion
);
469 RB_CLEAR_NODE(&req
->r_node
);
470 RB_CLEAR_NODE(&req
->r_mc_node
);
471 INIT_LIST_HEAD(&req
->r_unsafe_item
);
473 target_init(&req
->r_t
);
477 * This is ugly, but it allows us to reuse linger registration and ping
478 * requests, keeping the structure of the code around send_linger{_ping}()
479 * reasonable. Setting up a min_nr=2 mempool for each linger request
480 * and dealing with copying ops (this blasts req only, watch op remains
481 * intact) isn't any better.
483 static void request_reinit(struct ceph_osd_request
*req
)
485 struct ceph_osd_client
*osdc
= req
->r_osdc
;
486 bool mempool
= req
->r_mempool
;
487 unsigned int num_ops
= req
->r_num_ops
;
488 u64 snapid
= req
->r_snapid
;
489 struct ceph_snap_context
*snapc
= req
->r_snapc
;
490 bool linger
= req
->r_linger
;
491 struct ceph_msg
*request_msg
= req
->r_request
;
492 struct ceph_msg
*reply_msg
= req
->r_reply
;
494 dout("%s req %p\n", __func__
, req
);
495 WARN_ON(kref_read(&req
->r_kref
) != 1);
496 request_release_checks(req
);
498 WARN_ON(kref_read(&request_msg
->kref
) != 1);
499 WARN_ON(kref_read(&reply_msg
->kref
) != 1);
500 target_destroy(&req
->r_t
);
504 req
->r_mempool
= mempool
;
505 req
->r_num_ops
= num_ops
;
506 req
->r_snapid
= snapid
;
507 req
->r_snapc
= snapc
;
508 req
->r_linger
= linger
;
509 req
->r_request
= request_msg
;
510 req
->r_reply
= reply_msg
;
513 struct ceph_osd_request
*ceph_osdc_alloc_request(struct ceph_osd_client
*osdc
,
514 struct ceph_snap_context
*snapc
,
515 unsigned int num_ops
,
519 struct ceph_osd_request
*req
;
522 BUG_ON(num_ops
> CEPH_OSD_SLAB_OPS
);
523 req
= mempool_alloc(osdc
->req_mempool
, gfp_flags
);
524 } else if (num_ops
<= CEPH_OSD_SLAB_OPS
) {
525 req
= kmem_cache_alloc(ceph_osd_request_cache
, gfp_flags
);
527 BUG_ON(num_ops
> CEPH_OSD_MAX_OPS
);
528 req
= kmalloc(sizeof(*req
) + num_ops
* sizeof(req
->r_ops
[0]),
536 req
->r_mempool
= use_mempool
;
537 req
->r_num_ops
= num_ops
;
538 req
->r_snapid
= CEPH_NOSNAP
;
539 req
->r_snapc
= ceph_get_snap_context(snapc
);
541 dout("%s req %p\n", __func__
, req
);
544 EXPORT_SYMBOL(ceph_osdc_alloc_request
);
546 static int ceph_oloc_encoding_size(const struct ceph_object_locator
*oloc
)
548 return 8 + 4 + 4 + 4 + (oloc
->pool_ns
? oloc
->pool_ns
->len
: 0);
551 int ceph_osdc_alloc_messages(struct ceph_osd_request
*req
, gfp_t gfp
)
553 struct ceph_osd_client
*osdc
= req
->r_osdc
;
554 struct ceph_msg
*msg
;
557 WARN_ON(ceph_oid_empty(&req
->r_base_oid
));
558 WARN_ON(ceph_oloc_empty(&req
->r_base_oloc
));
560 /* create request message */
561 msg_size
= CEPH_ENCODING_START_BLK_LEN
+
562 CEPH_PGID_ENCODING_LEN
+ 1; /* spgid */
563 msg_size
+= 4 + 4 + 4; /* hash, osdmap_epoch, flags */
564 msg_size
+= CEPH_ENCODING_START_BLK_LEN
+
565 sizeof(struct ceph_osd_reqid
); /* reqid */
566 msg_size
+= sizeof(struct ceph_blkin_trace_info
); /* trace */
567 msg_size
+= 4 + sizeof(struct ceph_timespec
); /* client_inc, mtime */
568 msg_size
+= CEPH_ENCODING_START_BLK_LEN
+
569 ceph_oloc_encoding_size(&req
->r_base_oloc
); /* oloc */
570 msg_size
+= 4 + req
->r_base_oid
.name_len
; /* oid */
571 msg_size
+= 2 + req
->r_num_ops
* sizeof(struct ceph_osd_op
);
572 msg_size
+= 8; /* snapid */
573 msg_size
+= 8; /* snap_seq */
574 msg_size
+= 4 + 8 * (req
->r_snapc
? req
->r_snapc
->num_snaps
: 0);
575 msg_size
+= 4 + 8; /* retry_attempt, features */
578 msg
= ceph_msgpool_get(&osdc
->msgpool_op
, 0);
580 msg
= ceph_msg_new(CEPH_MSG_OSD_OP
, msg_size
, gfp
, true);
584 memset(msg
->front
.iov_base
, 0, msg
->front
.iov_len
);
585 req
->r_request
= msg
;
587 /* create reply message */
588 msg_size
= OSD_OPREPLY_FRONT_LEN
;
589 msg_size
+= req
->r_base_oid
.name_len
;
590 msg_size
+= req
->r_num_ops
* sizeof(struct ceph_osd_op
);
593 msg
= ceph_msgpool_get(&osdc
->msgpool_op_reply
, 0);
595 msg
= ceph_msg_new(CEPH_MSG_OSD_OPREPLY
, msg_size
, gfp
, true);
603 EXPORT_SYMBOL(ceph_osdc_alloc_messages
);
605 static bool osd_req_opcode_valid(u16 opcode
)
608 #define GENERATE_CASE(op, opcode, str) case CEPH_OSD_OP_##op: return true;
609 __CEPH_FORALL_OSD_OPS(GENERATE_CASE
)
617 * This is an osd op init function for opcodes that have no data or
618 * other information associated with them. It also serves as a
619 * common init routine for all the other init functions, below.
621 static struct ceph_osd_req_op
*
622 _osd_req_op_init(struct ceph_osd_request
*osd_req
, unsigned int which
,
623 u16 opcode
, u32 flags
)
625 struct ceph_osd_req_op
*op
;
627 BUG_ON(which
>= osd_req
->r_num_ops
);
628 BUG_ON(!osd_req_opcode_valid(opcode
));
630 op
= &osd_req
->r_ops
[which
];
631 memset(op
, 0, sizeof (*op
));
638 void osd_req_op_init(struct ceph_osd_request
*osd_req
,
639 unsigned int which
, u16 opcode
, u32 flags
)
641 (void)_osd_req_op_init(osd_req
, which
, opcode
, flags
);
643 EXPORT_SYMBOL(osd_req_op_init
);
645 void osd_req_op_extent_init(struct ceph_osd_request
*osd_req
,
646 unsigned int which
, u16 opcode
,
647 u64 offset
, u64 length
,
648 u64 truncate_size
, u32 truncate_seq
)
650 struct ceph_osd_req_op
*op
= _osd_req_op_init(osd_req
, which
,
652 size_t payload_len
= 0;
654 BUG_ON(opcode
!= CEPH_OSD_OP_READ
&& opcode
!= CEPH_OSD_OP_WRITE
&&
655 opcode
!= CEPH_OSD_OP_WRITEFULL
&& opcode
!= CEPH_OSD_OP_ZERO
&&
656 opcode
!= CEPH_OSD_OP_TRUNCATE
);
658 op
->extent
.offset
= offset
;
659 op
->extent
.length
= length
;
660 op
->extent
.truncate_size
= truncate_size
;
661 op
->extent
.truncate_seq
= truncate_seq
;
662 if (opcode
== CEPH_OSD_OP_WRITE
|| opcode
== CEPH_OSD_OP_WRITEFULL
)
663 payload_len
+= length
;
665 op
->indata_len
= payload_len
;
667 EXPORT_SYMBOL(osd_req_op_extent_init
);
669 void osd_req_op_extent_update(struct ceph_osd_request
*osd_req
,
670 unsigned int which
, u64 length
)
672 struct ceph_osd_req_op
*op
;
675 BUG_ON(which
>= osd_req
->r_num_ops
);
676 op
= &osd_req
->r_ops
[which
];
677 previous
= op
->extent
.length
;
679 if (length
== previous
)
680 return; /* Nothing to do */
681 BUG_ON(length
> previous
);
683 op
->extent
.length
= length
;
684 if (op
->op
== CEPH_OSD_OP_WRITE
|| op
->op
== CEPH_OSD_OP_WRITEFULL
)
685 op
->indata_len
-= previous
- length
;
687 EXPORT_SYMBOL(osd_req_op_extent_update
);
689 void osd_req_op_extent_dup_last(struct ceph_osd_request
*osd_req
,
690 unsigned int which
, u64 offset_inc
)
692 struct ceph_osd_req_op
*op
, *prev_op
;
694 BUG_ON(which
+ 1 >= osd_req
->r_num_ops
);
696 prev_op
= &osd_req
->r_ops
[which
];
697 op
= _osd_req_op_init(osd_req
, which
+ 1, prev_op
->op
, prev_op
->flags
);
698 /* dup previous one */
699 op
->indata_len
= prev_op
->indata_len
;
700 op
->outdata_len
= prev_op
->outdata_len
;
701 op
->extent
= prev_op
->extent
;
703 op
->extent
.offset
+= offset_inc
;
704 op
->extent
.length
-= offset_inc
;
706 if (op
->op
== CEPH_OSD_OP_WRITE
|| op
->op
== CEPH_OSD_OP_WRITEFULL
)
707 op
->indata_len
-= offset_inc
;
709 EXPORT_SYMBOL(osd_req_op_extent_dup_last
);
711 void osd_req_op_cls_init(struct ceph_osd_request
*osd_req
, unsigned int which
,
712 u16 opcode
, const char *class, const char *method
)
714 struct ceph_osd_req_op
*op
= _osd_req_op_init(osd_req
, which
,
716 struct ceph_pagelist
*pagelist
;
717 size_t payload_len
= 0;
720 BUG_ON(opcode
!= CEPH_OSD_OP_CALL
);
722 pagelist
= kmalloc(sizeof (*pagelist
), GFP_NOFS
);
724 ceph_pagelist_init(pagelist
);
726 op
->cls
.class_name
= class;
727 size
= strlen(class);
728 BUG_ON(size
> (size_t) U8_MAX
);
729 op
->cls
.class_len
= size
;
730 ceph_pagelist_append(pagelist
, class, size
);
733 op
->cls
.method_name
= method
;
734 size
= strlen(method
);
735 BUG_ON(size
> (size_t) U8_MAX
);
736 op
->cls
.method_len
= size
;
737 ceph_pagelist_append(pagelist
, method
, size
);
740 osd_req_op_cls_request_info_pagelist(osd_req
, which
, pagelist
);
742 op
->indata_len
= payload_len
;
744 EXPORT_SYMBOL(osd_req_op_cls_init
);
746 int osd_req_op_xattr_init(struct ceph_osd_request
*osd_req
, unsigned int which
,
747 u16 opcode
, const char *name
, const void *value
,
748 size_t size
, u8 cmp_op
, u8 cmp_mode
)
750 struct ceph_osd_req_op
*op
= _osd_req_op_init(osd_req
, which
,
752 struct ceph_pagelist
*pagelist
;
755 BUG_ON(opcode
!= CEPH_OSD_OP_SETXATTR
&& opcode
!= CEPH_OSD_OP_CMPXATTR
);
757 pagelist
= kmalloc(sizeof(*pagelist
), GFP_NOFS
);
761 ceph_pagelist_init(pagelist
);
763 payload_len
= strlen(name
);
764 op
->xattr
.name_len
= payload_len
;
765 ceph_pagelist_append(pagelist
, name
, payload_len
);
767 op
->xattr
.value_len
= size
;
768 ceph_pagelist_append(pagelist
, value
, size
);
771 op
->xattr
.cmp_op
= cmp_op
;
772 op
->xattr
.cmp_mode
= cmp_mode
;
774 ceph_osd_data_pagelist_init(&op
->xattr
.osd_data
, pagelist
);
775 op
->indata_len
= payload_len
;
778 EXPORT_SYMBOL(osd_req_op_xattr_init
);
781 * @watch_opcode: CEPH_OSD_WATCH_OP_*
783 static void osd_req_op_watch_init(struct ceph_osd_request
*req
, int which
,
784 u64 cookie
, u8 watch_opcode
)
786 struct ceph_osd_req_op
*op
;
788 op
= _osd_req_op_init(req
, which
, CEPH_OSD_OP_WATCH
, 0);
789 op
->watch
.cookie
= cookie
;
790 op
->watch
.op
= watch_opcode
;
794 void osd_req_op_alloc_hint_init(struct ceph_osd_request
*osd_req
,
796 u64 expected_object_size
,
797 u64 expected_write_size
)
799 struct ceph_osd_req_op
*op
= _osd_req_op_init(osd_req
, which
,
800 CEPH_OSD_OP_SETALLOCHINT
,
803 op
->alloc_hint
.expected_object_size
= expected_object_size
;
804 op
->alloc_hint
.expected_write_size
= expected_write_size
;
807 * CEPH_OSD_OP_SETALLOCHINT op is advisory and therefore deemed
808 * not worth a feature bit. Set FAILOK per-op flag to make
809 * sure older osds don't trip over an unsupported opcode.
811 op
->flags
|= CEPH_OSD_OP_FLAG_FAILOK
;
813 EXPORT_SYMBOL(osd_req_op_alloc_hint_init
);
815 static void ceph_osdc_msg_data_add(struct ceph_msg
*msg
,
816 struct ceph_osd_data
*osd_data
)
818 u64 length
= ceph_osd_data_length(osd_data
);
820 if (osd_data
->type
== CEPH_OSD_DATA_TYPE_PAGES
) {
821 BUG_ON(length
> (u64
) SIZE_MAX
);
823 ceph_msg_data_add_pages(msg
, osd_data
->pages
,
824 length
, osd_data
->alignment
);
825 } else if (osd_data
->type
== CEPH_OSD_DATA_TYPE_PAGELIST
) {
827 ceph_msg_data_add_pagelist(msg
, osd_data
->pagelist
);
829 } else if (osd_data
->type
== CEPH_OSD_DATA_TYPE_BIO
) {
830 ceph_msg_data_add_bio(msg
, osd_data
->bio
, length
);
833 BUG_ON(osd_data
->type
!= CEPH_OSD_DATA_TYPE_NONE
);
837 static u32
osd_req_encode_op(struct ceph_osd_op
*dst
,
838 const struct ceph_osd_req_op
*src
)
840 if (WARN_ON(!osd_req_opcode_valid(src
->op
))) {
841 pr_err("unrecognized osd opcode %d\n", src
->op
);
847 case CEPH_OSD_OP_STAT
:
849 case CEPH_OSD_OP_READ
:
850 case CEPH_OSD_OP_WRITE
:
851 case CEPH_OSD_OP_WRITEFULL
:
852 case CEPH_OSD_OP_ZERO
:
853 case CEPH_OSD_OP_TRUNCATE
:
854 dst
->extent
.offset
= cpu_to_le64(src
->extent
.offset
);
855 dst
->extent
.length
= cpu_to_le64(src
->extent
.length
);
856 dst
->extent
.truncate_size
=
857 cpu_to_le64(src
->extent
.truncate_size
);
858 dst
->extent
.truncate_seq
=
859 cpu_to_le32(src
->extent
.truncate_seq
);
861 case CEPH_OSD_OP_CALL
:
862 dst
->cls
.class_len
= src
->cls
.class_len
;
863 dst
->cls
.method_len
= src
->cls
.method_len
;
864 dst
->cls
.indata_len
= cpu_to_le32(src
->cls
.indata_len
);
866 case CEPH_OSD_OP_STARTSYNC
:
868 case CEPH_OSD_OP_WATCH
:
869 dst
->watch
.cookie
= cpu_to_le64(src
->watch
.cookie
);
870 dst
->watch
.ver
= cpu_to_le64(0);
871 dst
->watch
.op
= src
->watch
.op
;
872 dst
->watch
.gen
= cpu_to_le32(src
->watch
.gen
);
874 case CEPH_OSD_OP_NOTIFY_ACK
:
876 case CEPH_OSD_OP_NOTIFY
:
877 dst
->notify
.cookie
= cpu_to_le64(src
->notify
.cookie
);
879 case CEPH_OSD_OP_LIST_WATCHERS
:
881 case CEPH_OSD_OP_SETALLOCHINT
:
882 dst
->alloc_hint
.expected_object_size
=
883 cpu_to_le64(src
->alloc_hint
.expected_object_size
);
884 dst
->alloc_hint
.expected_write_size
=
885 cpu_to_le64(src
->alloc_hint
.expected_write_size
);
887 case CEPH_OSD_OP_SETXATTR
:
888 case CEPH_OSD_OP_CMPXATTR
:
889 dst
->xattr
.name_len
= cpu_to_le32(src
->xattr
.name_len
);
890 dst
->xattr
.value_len
= cpu_to_le32(src
->xattr
.value_len
);
891 dst
->xattr
.cmp_op
= src
->xattr
.cmp_op
;
892 dst
->xattr
.cmp_mode
= src
->xattr
.cmp_mode
;
894 case CEPH_OSD_OP_CREATE
:
895 case CEPH_OSD_OP_DELETE
:
898 pr_err("unsupported osd opcode %s\n",
899 ceph_osd_op_name(src
->op
));
905 dst
->op
= cpu_to_le16(src
->op
);
906 dst
->flags
= cpu_to_le32(src
->flags
);
907 dst
->payload_len
= cpu_to_le32(src
->indata_len
);
909 return src
->indata_len
;
913 * build new request AND message, calculate layout, and adjust file
916 * if the file was recently truncated, we include information about its
917 * old and new size so that the object can be updated appropriately. (we
918 * avoid synchronously deleting truncated objects because it's slow.)
920 * if @do_sync, include a 'startsync' command so that the osd will flush
923 struct ceph_osd_request
*ceph_osdc_new_request(struct ceph_osd_client
*osdc
,
924 struct ceph_file_layout
*layout
,
925 struct ceph_vino vino
,
927 unsigned int which
, int num_ops
,
928 int opcode
, int flags
,
929 struct ceph_snap_context
*snapc
,
934 struct ceph_osd_request
*req
;
940 BUG_ON(opcode
!= CEPH_OSD_OP_READ
&& opcode
!= CEPH_OSD_OP_WRITE
&&
941 opcode
!= CEPH_OSD_OP_ZERO
&& opcode
!= CEPH_OSD_OP_TRUNCATE
&&
942 opcode
!= CEPH_OSD_OP_CREATE
&& opcode
!= CEPH_OSD_OP_DELETE
);
944 req
= ceph_osdc_alloc_request(osdc
, snapc
, num_ops
, use_mempool
,
951 /* calculate max write size */
952 r
= calc_layout(layout
, off
, plen
, &objnum
, &objoff
, &objlen
);
956 if (opcode
== CEPH_OSD_OP_CREATE
|| opcode
== CEPH_OSD_OP_DELETE
) {
957 osd_req_op_init(req
, which
, opcode
, 0);
959 u32 object_size
= layout
->object_size
;
960 u32 object_base
= off
- objoff
;
961 if (!(truncate_seq
== 1 && truncate_size
== -1ULL)) {
962 if (truncate_size
<= object_base
) {
965 truncate_size
-= object_base
;
966 if (truncate_size
> object_size
)
967 truncate_size
= object_size
;
970 osd_req_op_extent_init(req
, which
, opcode
, objoff
, objlen
,
971 truncate_size
, truncate_seq
);
974 req
->r_abort_on_full
= true;
975 req
->r_flags
= flags
;
976 req
->r_base_oloc
.pool
= layout
->pool_id
;
977 req
->r_base_oloc
.pool_ns
= ceph_try_get_string(layout
->pool_ns
);
978 ceph_oid_printf(&req
->r_base_oid
, "%llx.%08llx", vino
.ino
, objnum
);
980 req
->r_snapid
= vino
.snap
;
981 if (flags
& CEPH_OSD_FLAG_WRITE
)
982 req
->r_data_offset
= off
;
984 r
= ceph_osdc_alloc_messages(req
, GFP_NOFS
);
991 ceph_osdc_put_request(req
);
994 EXPORT_SYMBOL(ceph_osdc_new_request
);
997 * We keep osd requests in an rbtree, sorted by ->r_tid.
999 DEFINE_RB_FUNCS(request
, struct ceph_osd_request
, r_tid
, r_node
)
1000 DEFINE_RB_FUNCS(request_mc
, struct ceph_osd_request
, r_tid
, r_mc_node
)
1002 static bool osd_homeless(struct ceph_osd
*osd
)
1004 return osd
->o_osd
== CEPH_HOMELESS_OSD
;
1007 static bool osd_registered(struct ceph_osd
*osd
)
1009 verify_osdc_locked(osd
->o_osdc
);
1011 return !RB_EMPTY_NODE(&osd
->o_node
);
1015 * Assumes @osd is zero-initialized.
1017 static void osd_init(struct ceph_osd
*osd
)
1019 refcount_set(&osd
->o_ref
, 1);
1020 RB_CLEAR_NODE(&osd
->o_node
);
1021 osd
->o_requests
= RB_ROOT
;
1022 osd
->o_linger_requests
= RB_ROOT
;
1023 osd
->o_backoff_mappings
= RB_ROOT
;
1024 osd
->o_backoffs_by_id
= RB_ROOT
;
1025 INIT_LIST_HEAD(&osd
->o_osd_lru
);
1026 INIT_LIST_HEAD(&osd
->o_keepalive_item
);
1027 osd
->o_incarnation
= 1;
1028 mutex_init(&osd
->lock
);
1031 static void osd_cleanup(struct ceph_osd
*osd
)
1033 WARN_ON(!RB_EMPTY_NODE(&osd
->o_node
));
1034 WARN_ON(!RB_EMPTY_ROOT(&osd
->o_requests
));
1035 WARN_ON(!RB_EMPTY_ROOT(&osd
->o_linger_requests
));
1036 WARN_ON(!RB_EMPTY_ROOT(&osd
->o_backoff_mappings
));
1037 WARN_ON(!RB_EMPTY_ROOT(&osd
->o_backoffs_by_id
));
1038 WARN_ON(!list_empty(&osd
->o_osd_lru
));
1039 WARN_ON(!list_empty(&osd
->o_keepalive_item
));
1041 if (osd
->o_auth
.authorizer
) {
1042 WARN_ON(osd_homeless(osd
));
1043 ceph_auth_destroy_authorizer(osd
->o_auth
.authorizer
);
1048 * Track open sessions with osds.
1050 static struct ceph_osd
*create_osd(struct ceph_osd_client
*osdc
, int onum
)
1052 struct ceph_osd
*osd
;
1054 WARN_ON(onum
== CEPH_HOMELESS_OSD
);
1056 osd
= kzalloc(sizeof(*osd
), GFP_NOIO
| __GFP_NOFAIL
);
1061 ceph_con_init(&osd
->o_con
, osd
, &osd_con_ops
, &osdc
->client
->msgr
);
1066 static struct ceph_osd
*get_osd(struct ceph_osd
*osd
)
1068 if (refcount_inc_not_zero(&osd
->o_ref
)) {
1069 dout("get_osd %p %d -> %d\n", osd
, refcount_read(&osd
->o_ref
)-1,
1070 refcount_read(&osd
->o_ref
));
1073 dout("get_osd %p FAIL\n", osd
);
1078 static void put_osd(struct ceph_osd
*osd
)
1080 dout("put_osd %p %d -> %d\n", osd
, refcount_read(&osd
->o_ref
),
1081 refcount_read(&osd
->o_ref
) - 1);
1082 if (refcount_dec_and_test(&osd
->o_ref
)) {
1088 DEFINE_RB_FUNCS(osd
, struct ceph_osd
, o_osd
, o_node
)
1090 static void __move_osd_to_lru(struct ceph_osd
*osd
)
1092 struct ceph_osd_client
*osdc
= osd
->o_osdc
;
1094 dout("%s osd %p osd%d\n", __func__
, osd
, osd
->o_osd
);
1095 BUG_ON(!list_empty(&osd
->o_osd_lru
));
1097 spin_lock(&osdc
->osd_lru_lock
);
1098 list_add_tail(&osd
->o_osd_lru
, &osdc
->osd_lru
);
1099 spin_unlock(&osdc
->osd_lru_lock
);
1101 osd
->lru_ttl
= jiffies
+ osdc
->client
->options
->osd_idle_ttl
;
1104 static void maybe_move_osd_to_lru(struct ceph_osd
*osd
)
1106 if (RB_EMPTY_ROOT(&osd
->o_requests
) &&
1107 RB_EMPTY_ROOT(&osd
->o_linger_requests
))
1108 __move_osd_to_lru(osd
);
1111 static void __remove_osd_from_lru(struct ceph_osd
*osd
)
1113 struct ceph_osd_client
*osdc
= osd
->o_osdc
;
1115 dout("%s osd %p osd%d\n", __func__
, osd
, osd
->o_osd
);
1117 spin_lock(&osdc
->osd_lru_lock
);
1118 if (!list_empty(&osd
->o_osd_lru
))
1119 list_del_init(&osd
->o_osd_lru
);
1120 spin_unlock(&osdc
->osd_lru_lock
);
1124 * Close the connection and assign any leftover requests to the
1127 static void close_osd(struct ceph_osd
*osd
)
1129 struct ceph_osd_client
*osdc
= osd
->o_osdc
;
1132 verify_osdc_wrlocked(osdc
);
1133 dout("%s osd %p osd%d\n", __func__
, osd
, osd
->o_osd
);
1135 ceph_con_close(&osd
->o_con
);
1137 for (n
= rb_first(&osd
->o_requests
); n
; ) {
1138 struct ceph_osd_request
*req
=
1139 rb_entry(n
, struct ceph_osd_request
, r_node
);
1141 n
= rb_next(n
); /* unlink_request() */
1143 dout(" reassigning req %p tid %llu\n", req
, req
->r_tid
);
1144 unlink_request(osd
, req
);
1145 link_request(&osdc
->homeless_osd
, req
);
1147 for (n
= rb_first(&osd
->o_linger_requests
); n
; ) {
1148 struct ceph_osd_linger_request
*lreq
=
1149 rb_entry(n
, struct ceph_osd_linger_request
, node
);
1151 n
= rb_next(n
); /* unlink_linger() */
1153 dout(" reassigning lreq %p linger_id %llu\n", lreq
,
1155 unlink_linger(osd
, lreq
);
1156 link_linger(&osdc
->homeless_osd
, lreq
);
1158 clear_backoffs(osd
);
1160 __remove_osd_from_lru(osd
);
1161 erase_osd(&osdc
->osds
, osd
);
1168 static int reopen_osd(struct ceph_osd
*osd
)
1170 struct ceph_entity_addr
*peer_addr
;
1172 dout("%s osd %p osd%d\n", __func__
, osd
, osd
->o_osd
);
1174 if (RB_EMPTY_ROOT(&osd
->o_requests
) &&
1175 RB_EMPTY_ROOT(&osd
->o_linger_requests
)) {
1180 peer_addr
= &osd
->o_osdc
->osdmap
->osd_addr
[osd
->o_osd
];
1181 if (!memcmp(peer_addr
, &osd
->o_con
.peer_addr
, sizeof (*peer_addr
)) &&
1182 !ceph_con_opened(&osd
->o_con
)) {
1185 dout("osd addr hasn't changed and connection never opened, "
1186 "letting msgr retry\n");
1187 /* touch each r_stamp for handle_timeout()'s benfit */
1188 for (n
= rb_first(&osd
->o_requests
); n
; n
= rb_next(n
)) {
1189 struct ceph_osd_request
*req
=
1190 rb_entry(n
, struct ceph_osd_request
, r_node
);
1191 req
->r_stamp
= jiffies
;
1197 ceph_con_close(&osd
->o_con
);
1198 ceph_con_open(&osd
->o_con
, CEPH_ENTITY_TYPE_OSD
, osd
->o_osd
, peer_addr
);
1199 osd
->o_incarnation
++;
1204 static struct ceph_osd
*lookup_create_osd(struct ceph_osd_client
*osdc
, int o
,
1207 struct ceph_osd
*osd
;
1210 verify_osdc_wrlocked(osdc
);
1212 verify_osdc_locked(osdc
);
1214 if (o
!= CEPH_HOMELESS_OSD
)
1215 osd
= lookup_osd(&osdc
->osds
, o
);
1217 osd
= &osdc
->homeless_osd
;
1220 return ERR_PTR(-EAGAIN
);
1222 osd
= create_osd(osdc
, o
);
1223 insert_osd(&osdc
->osds
, osd
);
1224 ceph_con_open(&osd
->o_con
, CEPH_ENTITY_TYPE_OSD
, osd
->o_osd
,
1225 &osdc
->osdmap
->osd_addr
[osd
->o_osd
]);
1228 dout("%s osdc %p osd%d -> osd %p\n", __func__
, osdc
, o
, osd
);
1233 * Create request <-> OSD session relation.
1235 * @req has to be assigned a tid, @osd may be homeless.
1237 static void link_request(struct ceph_osd
*osd
, struct ceph_osd_request
*req
)
1239 verify_osd_locked(osd
);
1240 WARN_ON(!req
->r_tid
|| req
->r_osd
);
1241 dout("%s osd %p osd%d req %p tid %llu\n", __func__
, osd
, osd
->o_osd
,
1244 if (!osd_homeless(osd
))
1245 __remove_osd_from_lru(osd
);
1247 atomic_inc(&osd
->o_osdc
->num_homeless
);
1250 insert_request(&osd
->o_requests
, req
);
1254 static void unlink_request(struct ceph_osd
*osd
, struct ceph_osd_request
*req
)
1256 verify_osd_locked(osd
);
1257 WARN_ON(req
->r_osd
!= osd
);
1258 dout("%s osd %p osd%d req %p tid %llu\n", __func__
, osd
, osd
->o_osd
,
1262 erase_request(&osd
->o_requests
, req
);
1265 if (!osd_homeless(osd
))
1266 maybe_move_osd_to_lru(osd
);
1268 atomic_dec(&osd
->o_osdc
->num_homeless
);
1271 static bool __pool_full(struct ceph_pg_pool_info
*pi
)
1273 return pi
->flags
& CEPH_POOL_FLAG_FULL
;
1276 static bool have_pool_full(struct ceph_osd_client
*osdc
)
1280 for (n
= rb_first(&osdc
->osdmap
->pg_pools
); n
; n
= rb_next(n
)) {
1281 struct ceph_pg_pool_info
*pi
=
1282 rb_entry(n
, struct ceph_pg_pool_info
, node
);
1284 if (__pool_full(pi
))
1291 static bool pool_full(struct ceph_osd_client
*osdc
, s64 pool_id
)
1293 struct ceph_pg_pool_info
*pi
;
1295 pi
= ceph_pg_pool_by_id(osdc
->osdmap
, pool_id
);
1299 return __pool_full(pi
);
1303 * Returns whether a request should be blocked from being sent
1304 * based on the current osdmap and osd_client settings.
1306 static bool target_should_be_paused(struct ceph_osd_client
*osdc
,
1307 const struct ceph_osd_request_target
*t
,
1308 struct ceph_pg_pool_info
*pi
)
1310 bool pauserd
= ceph_osdmap_flag(osdc
, CEPH_OSDMAP_PAUSERD
);
1311 bool pausewr
= ceph_osdmap_flag(osdc
, CEPH_OSDMAP_PAUSEWR
) ||
1312 ceph_osdmap_flag(osdc
, CEPH_OSDMAP_FULL
) ||
1315 WARN_ON(pi
->id
!= t
->target_oloc
.pool
);
1316 return ((t
->flags
& CEPH_OSD_FLAG_READ
) && pauserd
) ||
1317 ((t
->flags
& CEPH_OSD_FLAG_WRITE
) && pausewr
) ||
1318 (osdc
->osdmap
->epoch
< osdc
->epoch_barrier
);
1321 enum calc_target_result
{
1322 CALC_TARGET_NO_ACTION
= 0,
1323 CALC_TARGET_NEED_RESEND
,
1324 CALC_TARGET_POOL_DNE
,
1327 static enum calc_target_result
calc_target(struct ceph_osd_client
*osdc
,
1328 struct ceph_osd_request_target
*t
,
1329 struct ceph_connection
*con
,
1332 struct ceph_pg_pool_info
*pi
;
1333 struct ceph_pg pgid
, last_pgid
;
1334 struct ceph_osds up
, acting
;
1335 bool force_resend
= false;
1336 bool unpaused
= false;
1339 bool sort_bitwise
= ceph_osdmap_flag(osdc
, CEPH_OSDMAP_SORTBITWISE
);
1340 enum calc_target_result ct_res
;
1343 t
->epoch
= osdc
->osdmap
->epoch
;
1344 pi
= ceph_pg_pool_by_id(osdc
->osdmap
, t
->base_oloc
.pool
);
1346 t
->osd
= CEPH_HOMELESS_OSD
;
1347 ct_res
= CALC_TARGET_POOL_DNE
;
1351 if (osdc
->osdmap
->epoch
== pi
->last_force_request_resend
) {
1352 if (t
->last_force_resend
< pi
->last_force_request_resend
) {
1353 t
->last_force_resend
= pi
->last_force_request_resend
;
1354 force_resend
= true;
1355 } else if (t
->last_force_resend
== 0) {
1356 force_resend
= true;
1361 ceph_oid_copy(&t
->target_oid
, &t
->base_oid
);
1362 ceph_oloc_copy(&t
->target_oloc
, &t
->base_oloc
);
1363 if ((t
->flags
& CEPH_OSD_FLAG_IGNORE_OVERLAY
) == 0) {
1364 if (t
->flags
& CEPH_OSD_FLAG_READ
&& pi
->read_tier
>= 0)
1365 t
->target_oloc
.pool
= pi
->read_tier
;
1366 if (t
->flags
& CEPH_OSD_FLAG_WRITE
&& pi
->write_tier
>= 0)
1367 t
->target_oloc
.pool
= pi
->write_tier
;
1369 pi
= ceph_pg_pool_by_id(osdc
->osdmap
, t
->target_oloc
.pool
);
1371 t
->osd
= CEPH_HOMELESS_OSD
;
1372 ct_res
= CALC_TARGET_POOL_DNE
;
1377 ret
= __ceph_object_locator_to_pg(pi
, &t
->target_oid
, &t
->target_oloc
,
1380 WARN_ON(ret
!= -ENOENT
);
1381 t
->osd
= CEPH_HOMELESS_OSD
;
1382 ct_res
= CALC_TARGET_POOL_DNE
;
1385 last_pgid
.pool
= pgid
.pool
;
1386 last_pgid
.seed
= ceph_stable_mod(pgid
.seed
, t
->pg_num
, t
->pg_num_mask
);
1388 ceph_pg_to_up_acting_osds(osdc
->osdmap
, pi
, &pgid
, &up
, &acting
);
1390 ceph_is_new_interval(&t
->acting
,
1403 force_resend
= true;
1405 if (t
->paused
&& !target_should_be_paused(osdc
, t
, pi
)) {
1409 legacy_change
= ceph_pg_compare(&t
->pgid
, &pgid
) ||
1410 ceph_osds_changed(&t
->acting
, &acting
, any_change
);
1412 split
= ceph_pg_is_split(&last_pgid
, t
->pg_num
, pi
->pg_num
);
1414 if (legacy_change
|| force_resend
|| split
) {
1415 t
->pgid
= pgid
; /* struct */
1416 ceph_pg_to_primary_shard(osdc
->osdmap
, pi
, &pgid
, &t
->spgid
);
1417 ceph_osds_copy(&t
->acting
, &acting
);
1418 ceph_osds_copy(&t
->up
, &up
);
1420 t
->min_size
= pi
->min_size
;
1421 t
->pg_num
= pi
->pg_num
;
1422 t
->pg_num_mask
= pi
->pg_num_mask
;
1423 t
->sort_bitwise
= sort_bitwise
;
1425 t
->osd
= acting
.primary
;
1428 if (unpaused
|| legacy_change
|| force_resend
||
1429 (split
&& con
&& CEPH_HAVE_FEATURE(con
->peer_features
,
1431 ct_res
= CALC_TARGET_NEED_RESEND
;
1433 ct_res
= CALC_TARGET_NO_ACTION
;
1436 dout("%s t %p -> ct_res %d osd %d\n", __func__
, t
, ct_res
, t
->osd
);
1440 static struct ceph_spg_mapping
*alloc_spg_mapping(void)
1442 struct ceph_spg_mapping
*spg
;
1444 spg
= kmalloc(sizeof(*spg
), GFP_NOIO
);
1448 RB_CLEAR_NODE(&spg
->node
);
1449 spg
->backoffs
= RB_ROOT
;
1453 static void free_spg_mapping(struct ceph_spg_mapping
*spg
)
1455 WARN_ON(!RB_EMPTY_NODE(&spg
->node
));
1456 WARN_ON(!RB_EMPTY_ROOT(&spg
->backoffs
));
1462 * rbtree of ceph_spg_mapping for handling map<spg_t, ...>, similar to
1463 * ceph_pg_mapping. Used to track OSD backoffs -- a backoff [range] is
1464 * defined only within a specific spgid; it does not pass anything to
1465 * children on split, or to another primary.
1467 DEFINE_RB_FUNCS2(spg_mapping
, struct ceph_spg_mapping
, spgid
, ceph_spg_compare
,
1468 RB_BYPTR
, const struct ceph_spg
*, node
)
1470 static u64
hoid_get_bitwise_key(const struct ceph_hobject_id
*hoid
)
1472 return hoid
->is_max
? 0x100000000ull
: hoid
->hash_reverse_bits
;
1475 static void hoid_get_effective_key(const struct ceph_hobject_id
*hoid
,
1476 void **pkey
, size_t *pkey_len
)
1478 if (hoid
->key_len
) {
1480 *pkey_len
= hoid
->key_len
;
1483 *pkey_len
= hoid
->oid_len
;
1487 static int compare_names(const void *name1
, size_t name1_len
,
1488 const void *name2
, size_t name2_len
)
1492 ret
= memcmp(name1
, name2
, min(name1_len
, name2_len
));
1494 if (name1_len
< name2_len
)
1496 else if (name1_len
> name2_len
)
1502 static int hoid_compare(const struct ceph_hobject_id
*lhs
,
1503 const struct ceph_hobject_id
*rhs
)
1505 void *effective_key1
, *effective_key2
;
1506 size_t effective_key1_len
, effective_key2_len
;
1509 if (lhs
->is_max
< rhs
->is_max
)
1511 if (lhs
->is_max
> rhs
->is_max
)
1514 if (lhs
->pool
< rhs
->pool
)
1516 if (lhs
->pool
> rhs
->pool
)
1519 if (hoid_get_bitwise_key(lhs
) < hoid_get_bitwise_key(rhs
))
1521 if (hoid_get_bitwise_key(lhs
) > hoid_get_bitwise_key(rhs
))
1524 ret
= compare_names(lhs
->nspace
, lhs
->nspace_len
,
1525 rhs
->nspace
, rhs
->nspace_len
);
1529 hoid_get_effective_key(lhs
, &effective_key1
, &effective_key1_len
);
1530 hoid_get_effective_key(rhs
, &effective_key2
, &effective_key2_len
);
1531 ret
= compare_names(effective_key1
, effective_key1_len
,
1532 effective_key2
, effective_key2_len
);
1536 ret
= compare_names(lhs
->oid
, lhs
->oid_len
, rhs
->oid
, rhs
->oid_len
);
1540 if (lhs
->snapid
< rhs
->snapid
)
1542 if (lhs
->snapid
> rhs
->snapid
)
1549 * For decoding ->begin and ->end of MOSDBackoff only -- no MIN/MAX
1550 * compat stuff here.
1552 * Assumes @hoid is zero-initialized.
1554 static int decode_hoid(void **p
, void *end
, struct ceph_hobject_id
*hoid
)
1560 ret
= ceph_start_decoding(p
, end
, 4, "hobject_t", &struct_v
,
1566 pr_err("got struct_v %d < 4 of hobject_t\n", struct_v
);
1570 hoid
->key
= ceph_extract_encoded_string(p
, end
, &hoid
->key_len
,
1572 if (IS_ERR(hoid
->key
)) {
1573 ret
= PTR_ERR(hoid
->key
);
1578 hoid
->oid
= ceph_extract_encoded_string(p
, end
, &hoid
->oid_len
,
1580 if (IS_ERR(hoid
->oid
)) {
1581 ret
= PTR_ERR(hoid
->oid
);
1586 ceph_decode_64_safe(p
, end
, hoid
->snapid
, e_inval
);
1587 ceph_decode_32_safe(p
, end
, hoid
->hash
, e_inval
);
1588 ceph_decode_8_safe(p
, end
, hoid
->is_max
, e_inval
);
1590 hoid
->nspace
= ceph_extract_encoded_string(p
, end
, &hoid
->nspace_len
,
1592 if (IS_ERR(hoid
->nspace
)) {
1593 ret
= PTR_ERR(hoid
->nspace
);
1594 hoid
->nspace
= NULL
;
1598 ceph_decode_64_safe(p
, end
, hoid
->pool
, e_inval
);
1600 ceph_hoid_build_hash_cache(hoid
);
1607 static int hoid_encoding_size(const struct ceph_hobject_id
*hoid
)
1609 return 8 + 4 + 1 + 8 + /* snapid, hash, is_max, pool */
1610 4 + hoid
->key_len
+ 4 + hoid
->oid_len
+ 4 + hoid
->nspace_len
;
1613 static void encode_hoid(void **p
, void *end
, const struct ceph_hobject_id
*hoid
)
1615 ceph_start_encoding(p
, 4, 3, hoid_encoding_size(hoid
));
1616 ceph_encode_string(p
, end
, hoid
->key
, hoid
->key_len
);
1617 ceph_encode_string(p
, end
, hoid
->oid
, hoid
->oid_len
);
1618 ceph_encode_64(p
, hoid
->snapid
);
1619 ceph_encode_32(p
, hoid
->hash
);
1620 ceph_encode_8(p
, hoid
->is_max
);
1621 ceph_encode_string(p
, end
, hoid
->nspace
, hoid
->nspace_len
);
1622 ceph_encode_64(p
, hoid
->pool
);
1625 static void free_hoid(struct ceph_hobject_id
*hoid
)
1630 kfree(hoid
->nspace
);
1635 static struct ceph_osd_backoff
*alloc_backoff(void)
1637 struct ceph_osd_backoff
*backoff
;
1639 backoff
= kzalloc(sizeof(*backoff
), GFP_NOIO
);
1643 RB_CLEAR_NODE(&backoff
->spg_node
);
1644 RB_CLEAR_NODE(&backoff
->id_node
);
1648 static void free_backoff(struct ceph_osd_backoff
*backoff
)
1650 WARN_ON(!RB_EMPTY_NODE(&backoff
->spg_node
));
1651 WARN_ON(!RB_EMPTY_NODE(&backoff
->id_node
));
1653 free_hoid(backoff
->begin
);
1654 free_hoid(backoff
->end
);
1659 * Within a specific spgid, backoffs are managed by ->begin hoid.
1661 DEFINE_RB_INSDEL_FUNCS2(backoff
, struct ceph_osd_backoff
, begin
, hoid_compare
,
1662 RB_BYVAL
, spg_node
);
1664 static struct ceph_osd_backoff
*lookup_containing_backoff(struct rb_root
*root
,
1665 const struct ceph_hobject_id
*hoid
)
1667 struct rb_node
*n
= root
->rb_node
;
1670 struct ceph_osd_backoff
*cur
=
1671 rb_entry(n
, struct ceph_osd_backoff
, spg_node
);
1674 cmp
= hoid_compare(hoid
, cur
->begin
);
1677 } else if (cmp
> 0) {
1678 if (hoid_compare(hoid
, cur
->end
) < 0)
1691 * Each backoff has a unique id within its OSD session.
1693 DEFINE_RB_FUNCS(backoff_by_id
, struct ceph_osd_backoff
, id
, id_node
)
1695 static void clear_backoffs(struct ceph_osd
*osd
)
1697 while (!RB_EMPTY_ROOT(&osd
->o_backoff_mappings
)) {
1698 struct ceph_spg_mapping
*spg
=
1699 rb_entry(rb_first(&osd
->o_backoff_mappings
),
1700 struct ceph_spg_mapping
, node
);
1702 while (!RB_EMPTY_ROOT(&spg
->backoffs
)) {
1703 struct ceph_osd_backoff
*backoff
=
1704 rb_entry(rb_first(&spg
->backoffs
),
1705 struct ceph_osd_backoff
, spg_node
);
1707 erase_backoff(&spg
->backoffs
, backoff
);
1708 erase_backoff_by_id(&osd
->o_backoffs_by_id
, backoff
);
1709 free_backoff(backoff
);
1711 erase_spg_mapping(&osd
->o_backoff_mappings
, spg
);
1712 free_spg_mapping(spg
);
1717 * Set up a temporary, non-owning view into @t.
1719 static void hoid_fill_from_target(struct ceph_hobject_id
*hoid
,
1720 const struct ceph_osd_request_target
*t
)
1724 hoid
->oid
= t
->target_oid
.name
;
1725 hoid
->oid_len
= t
->target_oid
.name_len
;
1726 hoid
->snapid
= CEPH_NOSNAP
;
1727 hoid
->hash
= t
->pgid
.seed
;
1728 hoid
->is_max
= false;
1729 if (t
->target_oloc
.pool_ns
) {
1730 hoid
->nspace
= t
->target_oloc
.pool_ns
->str
;
1731 hoid
->nspace_len
= t
->target_oloc
.pool_ns
->len
;
1733 hoid
->nspace
= NULL
;
1734 hoid
->nspace_len
= 0;
1736 hoid
->pool
= t
->target_oloc
.pool
;
1737 ceph_hoid_build_hash_cache(hoid
);
1740 static bool should_plug_request(struct ceph_osd_request
*req
)
1742 struct ceph_osd
*osd
= req
->r_osd
;
1743 struct ceph_spg_mapping
*spg
;
1744 struct ceph_osd_backoff
*backoff
;
1745 struct ceph_hobject_id hoid
;
1747 spg
= lookup_spg_mapping(&osd
->o_backoff_mappings
, &req
->r_t
.spgid
);
1751 hoid_fill_from_target(&hoid
, &req
->r_t
);
1752 backoff
= lookup_containing_backoff(&spg
->backoffs
, &hoid
);
1756 dout("%s req %p tid %llu backoff osd%d spgid %llu.%xs%d id %llu\n",
1757 __func__
, req
, req
->r_tid
, osd
->o_osd
, backoff
->spgid
.pgid
.pool
,
1758 backoff
->spgid
.pgid
.seed
, backoff
->spgid
.shard
, backoff
->id
);
1762 static void setup_request_data(struct ceph_osd_request
*req
,
1763 struct ceph_msg
*msg
)
1768 if (!list_empty(&msg
->data
))
1771 WARN_ON(msg
->data_length
);
1772 for (i
= 0; i
< req
->r_num_ops
; i
++) {
1773 struct ceph_osd_req_op
*op
= &req
->r_ops
[i
];
1777 case CEPH_OSD_OP_WRITE
:
1778 case CEPH_OSD_OP_WRITEFULL
:
1779 WARN_ON(op
->indata_len
!= op
->extent
.length
);
1780 ceph_osdc_msg_data_add(msg
, &op
->extent
.osd_data
);
1782 case CEPH_OSD_OP_SETXATTR
:
1783 case CEPH_OSD_OP_CMPXATTR
:
1784 WARN_ON(op
->indata_len
!= op
->xattr
.name_len
+
1785 op
->xattr
.value_len
);
1786 ceph_osdc_msg_data_add(msg
, &op
->xattr
.osd_data
);
1788 case CEPH_OSD_OP_NOTIFY_ACK
:
1789 ceph_osdc_msg_data_add(msg
,
1790 &op
->notify_ack
.request_data
);
1794 case CEPH_OSD_OP_STAT
:
1795 ceph_osdc_msg_data_add(req
->r_reply
,
1798 case CEPH_OSD_OP_READ
:
1799 ceph_osdc_msg_data_add(req
->r_reply
,
1800 &op
->extent
.osd_data
);
1802 case CEPH_OSD_OP_LIST_WATCHERS
:
1803 ceph_osdc_msg_data_add(req
->r_reply
,
1804 &op
->list_watchers
.response_data
);
1808 case CEPH_OSD_OP_CALL
:
1809 WARN_ON(op
->indata_len
!= op
->cls
.class_len
+
1810 op
->cls
.method_len
+
1811 op
->cls
.indata_len
);
1812 ceph_osdc_msg_data_add(msg
, &op
->cls
.request_info
);
1813 /* optional, can be NONE */
1814 ceph_osdc_msg_data_add(msg
, &op
->cls
.request_data
);
1815 /* optional, can be NONE */
1816 ceph_osdc_msg_data_add(req
->r_reply
,
1817 &op
->cls
.response_data
);
1819 case CEPH_OSD_OP_NOTIFY
:
1820 ceph_osdc_msg_data_add(msg
,
1821 &op
->notify
.request_data
);
1822 ceph_osdc_msg_data_add(req
->r_reply
,
1823 &op
->notify
.response_data
);
1827 data_len
+= op
->indata_len
;
1830 WARN_ON(data_len
!= msg
->data_length
);
1833 static void encode_pgid(void **p
, const struct ceph_pg
*pgid
)
1835 ceph_encode_8(p
, 1);
1836 ceph_encode_64(p
, pgid
->pool
);
1837 ceph_encode_32(p
, pgid
->seed
);
1838 ceph_encode_32(p
, -1); /* preferred */
1841 static void encode_spgid(void **p
, const struct ceph_spg
*spgid
)
1843 ceph_start_encoding(p
, 1, 1, CEPH_PGID_ENCODING_LEN
+ 1);
1844 encode_pgid(p
, &spgid
->pgid
);
1845 ceph_encode_8(p
, spgid
->shard
);
1848 static void encode_oloc(void **p
, void *end
,
1849 const struct ceph_object_locator
*oloc
)
1851 ceph_start_encoding(p
, 5, 4, ceph_oloc_encoding_size(oloc
));
1852 ceph_encode_64(p
, oloc
->pool
);
1853 ceph_encode_32(p
, -1); /* preferred */
1854 ceph_encode_32(p
, 0); /* key len */
1856 ceph_encode_string(p
, end
, oloc
->pool_ns
->str
,
1857 oloc
->pool_ns
->len
);
1859 ceph_encode_32(p
, 0);
1862 static void encode_request_partial(struct ceph_osd_request
*req
,
1863 struct ceph_msg
*msg
)
1865 void *p
= msg
->front
.iov_base
;
1866 void *const end
= p
+ msg
->front_alloc_len
;
1870 if (req
->r_flags
& CEPH_OSD_FLAG_WRITE
) {
1871 /* snapshots aren't writeable */
1872 WARN_ON(req
->r_snapid
!= CEPH_NOSNAP
);
1874 WARN_ON(req
->r_mtime
.tv_sec
|| req
->r_mtime
.tv_nsec
||
1875 req
->r_data_offset
|| req
->r_snapc
);
1878 setup_request_data(req
, msg
);
1880 encode_spgid(&p
, &req
->r_t
.spgid
); /* actual spg */
1881 ceph_encode_32(&p
, req
->r_t
.pgid
.seed
); /* raw hash */
1882 ceph_encode_32(&p
, req
->r_osdc
->osdmap
->epoch
);
1883 ceph_encode_32(&p
, req
->r_flags
);
1886 ceph_start_encoding(&p
, 2, 2, sizeof(struct ceph_osd_reqid
));
1887 memset(p
, 0, sizeof(struct ceph_osd_reqid
));
1888 p
+= sizeof(struct ceph_osd_reqid
);
1891 memset(p
, 0, sizeof(struct ceph_blkin_trace_info
));
1892 p
+= sizeof(struct ceph_blkin_trace_info
);
1894 ceph_encode_32(&p
, 0); /* client_inc, always 0 */
1895 ceph_encode_timespec(p
, &req
->r_mtime
);
1896 p
+= sizeof(struct ceph_timespec
);
1898 encode_oloc(&p
, end
, &req
->r_t
.target_oloc
);
1899 ceph_encode_string(&p
, end
, req
->r_t
.target_oid
.name
,
1900 req
->r_t
.target_oid
.name_len
);
1902 /* ops, can imply data */
1903 ceph_encode_16(&p
, req
->r_num_ops
);
1904 for (i
= 0; i
< req
->r_num_ops
; i
++) {
1905 data_len
+= osd_req_encode_op(p
, &req
->r_ops
[i
]);
1906 p
+= sizeof(struct ceph_osd_op
);
1909 ceph_encode_64(&p
, req
->r_snapid
); /* snapid */
1911 ceph_encode_64(&p
, req
->r_snapc
->seq
);
1912 ceph_encode_32(&p
, req
->r_snapc
->num_snaps
);
1913 for (i
= 0; i
< req
->r_snapc
->num_snaps
; i
++)
1914 ceph_encode_64(&p
, req
->r_snapc
->snaps
[i
]);
1916 ceph_encode_64(&p
, 0); /* snap_seq */
1917 ceph_encode_32(&p
, 0); /* snaps len */
1920 ceph_encode_32(&p
, req
->r_attempts
); /* retry_attempt */
1921 BUG_ON(p
!= end
- 8); /* space for features */
1923 msg
->hdr
.version
= cpu_to_le16(8); /* MOSDOp v8 */
1924 /* front_len is finalized in encode_request_finish() */
1925 msg
->hdr
.data_len
= cpu_to_le32(data_len
);
1927 * The header "data_off" is a hint to the receiver allowing it
1928 * to align received data into its buffers such that there's no
1929 * need to re-copy it before writing it to disk (direct I/O).
1931 msg
->hdr
.data_off
= cpu_to_le16(req
->r_data_offset
);
1933 dout("%s req %p msg %p oid %s oid_len %d\n", __func__
, req
, msg
,
1934 req
->r_t
.target_oid
.name
, req
->r_t
.target_oid
.name_len
);
1937 static void encode_request_finish(struct ceph_msg
*msg
)
1939 void *p
= msg
->front
.iov_base
;
1940 void *const end
= p
+ msg
->front_alloc_len
;
1942 if (CEPH_HAVE_FEATURE(msg
->con
->peer_features
, RESEND_ON_SPLIT
)) {
1943 /* luminous OSD -- encode features and be done */
1945 ceph_encode_64(&p
, msg
->con
->peer_features
);
1948 char spgid
[CEPH_ENCODING_START_BLK_LEN
+
1949 CEPH_PGID_ENCODING_LEN
+ 1];
1953 char reqid
[CEPH_ENCODING_START_BLK_LEN
+
1954 sizeof(struct ceph_osd_reqid
)];
1955 char trace
[sizeof(struct ceph_blkin_trace_info
)];
1957 struct ceph_timespec mtime
;
1959 struct ceph_pg pgid
;
1960 void *oloc
, *oid
, *tail
;
1961 int oloc_len
, oid_len
, tail_len
;
1965 * Pre-luminous OSD -- reencode v8 into v4 using @head
1966 * as a temporary buffer. Encode the raw PG; the rest
1967 * is just a matter of moving oloc, oid and tail blobs
1970 memcpy(&head
, p
, sizeof(head
));
1974 p
+= CEPH_ENCODING_START_BLK_LEN
;
1975 pgid
.pool
= ceph_decode_64(&p
);
1976 p
+= 4 + 4; /* preferred, key len */
1977 len
= ceph_decode_32(&p
);
1978 p
+= len
; /* nspace */
1979 oloc_len
= p
- oloc
;
1982 len
= ceph_decode_32(&p
);
1987 tail_len
= (end
- p
) - 8;
1989 p
= msg
->front
.iov_base
;
1990 ceph_encode_copy(&p
, &head
.client_inc
, sizeof(head
.client_inc
));
1991 ceph_encode_copy(&p
, &head
.epoch
, sizeof(head
.epoch
));
1992 ceph_encode_copy(&p
, &head
.flags
, sizeof(head
.flags
));
1993 ceph_encode_copy(&p
, &head
.mtime
, sizeof(head
.mtime
));
1995 /* reassert_version */
1996 memset(p
, 0, sizeof(struct ceph_eversion
));
1997 p
+= sizeof(struct ceph_eversion
);
2000 memmove(p
, oloc
, oloc_len
);
2003 pgid
.seed
= le32_to_cpu(head
.hash
);
2004 encode_pgid(&p
, &pgid
); /* raw pg */
2007 memmove(p
, oid
, oid_len
);
2010 /* tail -- ops, snapid, snapc, retry_attempt */
2012 memmove(p
, tail
, tail_len
);
2015 msg
->hdr
.version
= cpu_to_le16(4); /* MOSDOp v4 */
2019 msg
->front
.iov_len
= p
- msg
->front
.iov_base
;
2020 msg
->hdr
.front_len
= cpu_to_le32(msg
->front
.iov_len
);
2022 dout("%s msg %p tid %llu %u+%u+%u v%d\n", __func__
, msg
,
2023 le64_to_cpu(msg
->hdr
.tid
), le32_to_cpu(msg
->hdr
.front_len
),
2024 le32_to_cpu(msg
->hdr
.middle_len
), le32_to_cpu(msg
->hdr
.data_len
),
2025 le16_to_cpu(msg
->hdr
.version
));
2029 * @req has to be assigned a tid and registered.
2031 static void send_request(struct ceph_osd_request
*req
)
2033 struct ceph_osd
*osd
= req
->r_osd
;
2035 verify_osd_locked(osd
);
2036 WARN_ON(osd
->o_osd
!= req
->r_t
.osd
);
2039 if (should_plug_request(req
))
2043 * We may have a previously queued request message hanging
2044 * around. Cancel it to avoid corrupting the msgr.
2047 ceph_msg_revoke(req
->r_request
);
2049 req
->r_flags
|= CEPH_OSD_FLAG_KNOWN_REDIR
;
2050 if (req
->r_attempts
)
2051 req
->r_flags
|= CEPH_OSD_FLAG_RETRY
;
2053 WARN_ON(req
->r_flags
& CEPH_OSD_FLAG_RETRY
);
2055 encode_request_partial(req
, req
->r_request
);
2057 dout("%s req %p tid %llu to pgid %llu.%x spgid %llu.%xs%d osd%d e%u flags 0x%x attempt %d\n",
2058 __func__
, req
, req
->r_tid
, req
->r_t
.pgid
.pool
, req
->r_t
.pgid
.seed
,
2059 req
->r_t
.spgid
.pgid
.pool
, req
->r_t
.spgid
.pgid
.seed
,
2060 req
->r_t
.spgid
.shard
, osd
->o_osd
, req
->r_t
.epoch
, req
->r_flags
,
2063 req
->r_t
.paused
= false;
2064 req
->r_stamp
= jiffies
;
2067 req
->r_sent
= osd
->o_incarnation
;
2068 req
->r_request
->hdr
.tid
= cpu_to_le64(req
->r_tid
);
2069 ceph_con_send(&osd
->o_con
, ceph_msg_get(req
->r_request
));
2072 static void maybe_request_map(struct ceph_osd_client
*osdc
)
2074 bool continuous
= false;
2076 verify_osdc_locked(osdc
);
2077 WARN_ON(!osdc
->osdmap
->epoch
);
2079 if (ceph_osdmap_flag(osdc
, CEPH_OSDMAP_FULL
) ||
2080 ceph_osdmap_flag(osdc
, CEPH_OSDMAP_PAUSERD
) ||
2081 ceph_osdmap_flag(osdc
, CEPH_OSDMAP_PAUSEWR
)) {
2082 dout("%s osdc %p continuous\n", __func__
, osdc
);
2085 dout("%s osdc %p onetime\n", __func__
, osdc
);
2088 if (ceph_monc_want_map(&osdc
->client
->monc
, CEPH_SUB_OSDMAP
,
2089 osdc
->osdmap
->epoch
+ 1, continuous
))
2090 ceph_monc_renew_subs(&osdc
->client
->monc
);
2093 static void complete_request(struct ceph_osd_request
*req
, int err
);
2094 static void send_map_check(struct ceph_osd_request
*req
);
2096 static void __submit_request(struct ceph_osd_request
*req
, bool wrlocked
)
2098 struct ceph_osd_client
*osdc
= req
->r_osdc
;
2099 struct ceph_osd
*osd
;
2100 enum calc_target_result ct_res
;
2101 bool need_send
= false;
2102 bool promoted
= false;
2103 bool need_abort
= false;
2105 WARN_ON(req
->r_tid
);
2106 dout("%s req %p wrlocked %d\n", __func__
, req
, wrlocked
);
2109 ct_res
= calc_target(osdc
, &req
->r_t
, NULL
, false);
2110 if (ct_res
== CALC_TARGET_POOL_DNE
&& !wrlocked
)
2113 osd
= lookup_create_osd(osdc
, req
->r_t
.osd
, wrlocked
);
2115 WARN_ON(PTR_ERR(osd
) != -EAGAIN
|| wrlocked
);
2119 if (osdc
->osdmap
->epoch
< osdc
->epoch_barrier
) {
2120 dout("req %p epoch %u barrier %u\n", req
, osdc
->osdmap
->epoch
,
2121 osdc
->epoch_barrier
);
2122 req
->r_t
.paused
= true;
2123 maybe_request_map(osdc
);
2124 } else if ((req
->r_flags
& CEPH_OSD_FLAG_WRITE
) &&
2125 ceph_osdmap_flag(osdc
, CEPH_OSDMAP_PAUSEWR
)) {
2126 dout("req %p pausewr\n", req
);
2127 req
->r_t
.paused
= true;
2128 maybe_request_map(osdc
);
2129 } else if ((req
->r_flags
& CEPH_OSD_FLAG_READ
) &&
2130 ceph_osdmap_flag(osdc
, CEPH_OSDMAP_PAUSERD
)) {
2131 dout("req %p pauserd\n", req
);
2132 req
->r_t
.paused
= true;
2133 maybe_request_map(osdc
);
2134 } else if ((req
->r_flags
& CEPH_OSD_FLAG_WRITE
) &&
2135 !(req
->r_flags
& (CEPH_OSD_FLAG_FULL_TRY
|
2136 CEPH_OSD_FLAG_FULL_FORCE
)) &&
2137 (ceph_osdmap_flag(osdc
, CEPH_OSDMAP_FULL
) ||
2138 pool_full(osdc
, req
->r_t
.base_oloc
.pool
))) {
2139 dout("req %p full/pool_full\n", req
);
2140 pr_warn_ratelimited("FULL or reached pool quota\n");
2141 req
->r_t
.paused
= true;
2142 maybe_request_map(osdc
);
2143 if (req
->r_abort_on_full
)
2145 } else if (!osd_homeless(osd
)) {
2148 maybe_request_map(osdc
);
2151 mutex_lock(&osd
->lock
);
2153 * Assign the tid atomically with send_request() to protect
2154 * multiple writes to the same object from racing with each
2155 * other, resulting in out of order ops on the OSDs.
2157 req
->r_tid
= atomic64_inc_return(&osdc
->last_tid
);
2158 link_request(osd
, req
);
2161 else if (need_abort
)
2162 complete_request(req
, -ENOSPC
);
2163 mutex_unlock(&osd
->lock
);
2165 if (ct_res
== CALC_TARGET_POOL_DNE
)
2166 send_map_check(req
);
2169 downgrade_write(&osdc
->lock
);
2173 up_read(&osdc
->lock
);
2174 down_write(&osdc
->lock
);
2180 static void account_request(struct ceph_osd_request
*req
)
2182 WARN_ON(req
->r_flags
& (CEPH_OSD_FLAG_ACK
| CEPH_OSD_FLAG_ONDISK
));
2183 WARN_ON(!(req
->r_flags
& (CEPH_OSD_FLAG_READ
| CEPH_OSD_FLAG_WRITE
)));
2185 req
->r_flags
|= CEPH_OSD_FLAG_ONDISK
;
2186 atomic_inc(&req
->r_osdc
->num_requests
);
2188 req
->r_start_stamp
= jiffies
;
2191 static void submit_request(struct ceph_osd_request
*req
, bool wrlocked
)
2193 ceph_osdc_get_request(req
);
2194 account_request(req
);
2195 __submit_request(req
, wrlocked
);
2198 static void finish_request(struct ceph_osd_request
*req
)
2200 struct ceph_osd_client
*osdc
= req
->r_osdc
;
2202 WARN_ON(lookup_request_mc(&osdc
->map_checks
, req
->r_tid
));
2203 dout("%s req %p tid %llu\n", __func__
, req
, req
->r_tid
);
2206 unlink_request(req
->r_osd
, req
);
2207 atomic_dec(&osdc
->num_requests
);
2210 * If an OSD has failed or returned and a request has been sent
2211 * twice, it's possible to get a reply and end up here while the
2212 * request message is queued for delivery. We will ignore the
2213 * reply, so not a big deal, but better to try and catch it.
2215 ceph_msg_revoke(req
->r_request
);
2216 ceph_msg_revoke_incoming(req
->r_reply
);
2219 static void __complete_request(struct ceph_osd_request
*req
)
2221 if (req
->r_callback
) {
2222 dout("%s req %p tid %llu cb %pf result %d\n", __func__
, req
,
2223 req
->r_tid
, req
->r_callback
, req
->r_result
);
2224 req
->r_callback(req
);
2229 * This is open-coded in handle_reply().
2231 static void complete_request(struct ceph_osd_request
*req
, int err
)
2233 dout("%s req %p tid %llu err %d\n", __func__
, req
, req
->r_tid
, err
);
2235 req
->r_result
= err
;
2236 finish_request(req
);
2237 __complete_request(req
);
2238 complete_all(&req
->r_completion
);
2239 ceph_osdc_put_request(req
);
2242 static void cancel_map_check(struct ceph_osd_request
*req
)
2244 struct ceph_osd_client
*osdc
= req
->r_osdc
;
2245 struct ceph_osd_request
*lookup_req
;
2247 verify_osdc_wrlocked(osdc
);
2249 lookup_req
= lookup_request_mc(&osdc
->map_checks
, req
->r_tid
);
2253 WARN_ON(lookup_req
!= req
);
2254 erase_request_mc(&osdc
->map_checks
, req
);
2255 ceph_osdc_put_request(req
);
2258 static void cancel_request(struct ceph_osd_request
*req
)
2260 dout("%s req %p tid %llu\n", __func__
, req
, req
->r_tid
);
2262 cancel_map_check(req
);
2263 finish_request(req
);
2264 complete_all(&req
->r_completion
);
2265 ceph_osdc_put_request(req
);
2268 static void abort_request(struct ceph_osd_request
*req
, int err
)
2270 dout("%s req %p tid %llu err %d\n", __func__
, req
, req
->r_tid
, err
);
2272 cancel_map_check(req
);
2273 complete_request(req
, err
);
2276 static void update_epoch_barrier(struct ceph_osd_client
*osdc
, u32 eb
)
2278 if (likely(eb
> osdc
->epoch_barrier
)) {
2279 dout("updating epoch_barrier from %u to %u\n",
2280 osdc
->epoch_barrier
, eb
);
2281 osdc
->epoch_barrier
= eb
;
2282 /* Request map if we're not to the barrier yet */
2283 if (eb
> osdc
->osdmap
->epoch
)
2284 maybe_request_map(osdc
);
2288 void ceph_osdc_update_epoch_barrier(struct ceph_osd_client
*osdc
, u32 eb
)
2290 down_read(&osdc
->lock
);
2291 if (unlikely(eb
> osdc
->epoch_barrier
)) {
2292 up_read(&osdc
->lock
);
2293 down_write(&osdc
->lock
);
2294 update_epoch_barrier(osdc
, eb
);
2295 up_write(&osdc
->lock
);
2297 up_read(&osdc
->lock
);
2300 EXPORT_SYMBOL(ceph_osdc_update_epoch_barrier
);
2303 * Drop all pending requests that are stalled waiting on a full condition to
2304 * clear, and complete them with ENOSPC as the return code. Set the
2305 * osdc->epoch_barrier to the latest map epoch that we've seen if any were
2308 static void ceph_osdc_abort_on_full(struct ceph_osd_client
*osdc
)
2311 bool victims
= false;
2313 dout("enter abort_on_full\n");
2315 if (!ceph_osdmap_flag(osdc
, CEPH_OSDMAP_FULL
) && !have_pool_full(osdc
))
2318 /* Scan list and see if there is anything to abort */
2319 for (n
= rb_first(&osdc
->osds
); n
; n
= rb_next(n
)) {
2320 struct ceph_osd
*osd
= rb_entry(n
, struct ceph_osd
, o_node
);
2323 m
= rb_first(&osd
->o_requests
);
2325 struct ceph_osd_request
*req
= rb_entry(m
,
2326 struct ceph_osd_request
, r_node
);
2329 if (req
->r_abort_on_full
) {
2342 * Update the barrier to current epoch if it's behind that point,
2343 * since we know we have some calls to be aborted in the tree.
2345 update_epoch_barrier(osdc
, osdc
->osdmap
->epoch
);
2347 for (n
= rb_first(&osdc
->osds
); n
; n
= rb_next(n
)) {
2348 struct ceph_osd
*osd
= rb_entry(n
, struct ceph_osd
, o_node
);
2351 m
= rb_first(&osd
->o_requests
);
2353 struct ceph_osd_request
*req
= rb_entry(m
,
2354 struct ceph_osd_request
, r_node
);
2357 if (req
->r_abort_on_full
&&
2358 (ceph_osdmap_flag(osdc
, CEPH_OSDMAP_FULL
) ||
2359 pool_full(osdc
, req
->r_t
.target_oloc
.pool
)))
2360 abort_request(req
, -ENOSPC
);
2364 dout("return abort_on_full barrier=%u\n", osdc
->epoch_barrier
);
2367 static void check_pool_dne(struct ceph_osd_request
*req
)
2369 struct ceph_osd_client
*osdc
= req
->r_osdc
;
2370 struct ceph_osdmap
*map
= osdc
->osdmap
;
2372 verify_osdc_wrlocked(osdc
);
2373 WARN_ON(!map
->epoch
);
2375 if (req
->r_attempts
) {
2377 * We sent a request earlier, which means that
2378 * previously the pool existed, and now it does not
2379 * (i.e., it was deleted).
2381 req
->r_map_dne_bound
= map
->epoch
;
2382 dout("%s req %p tid %llu pool disappeared\n", __func__
, req
,
2385 dout("%s req %p tid %llu map_dne_bound %u have %u\n", __func__
,
2386 req
, req
->r_tid
, req
->r_map_dne_bound
, map
->epoch
);
2389 if (req
->r_map_dne_bound
) {
2390 if (map
->epoch
>= req
->r_map_dne_bound
) {
2391 /* we had a new enough map */
2392 pr_info_ratelimited("tid %llu pool does not exist\n",
2394 complete_request(req
, -ENOENT
);
2397 send_map_check(req
);
2401 static void map_check_cb(struct ceph_mon_generic_request
*greq
)
2403 struct ceph_osd_client
*osdc
= &greq
->monc
->client
->osdc
;
2404 struct ceph_osd_request
*req
;
2405 u64 tid
= greq
->private_data
;
2407 WARN_ON(greq
->result
|| !greq
->u
.newest
);
2409 down_write(&osdc
->lock
);
2410 req
= lookup_request_mc(&osdc
->map_checks
, tid
);
2412 dout("%s tid %llu dne\n", __func__
, tid
);
2416 dout("%s req %p tid %llu map_dne_bound %u newest %llu\n", __func__
,
2417 req
, req
->r_tid
, req
->r_map_dne_bound
, greq
->u
.newest
);
2418 if (!req
->r_map_dne_bound
)
2419 req
->r_map_dne_bound
= greq
->u
.newest
;
2420 erase_request_mc(&osdc
->map_checks
, req
);
2421 check_pool_dne(req
);
2423 ceph_osdc_put_request(req
);
2425 up_write(&osdc
->lock
);
2428 static void send_map_check(struct ceph_osd_request
*req
)
2430 struct ceph_osd_client
*osdc
= req
->r_osdc
;
2431 struct ceph_osd_request
*lookup_req
;
2434 verify_osdc_wrlocked(osdc
);
2436 lookup_req
= lookup_request_mc(&osdc
->map_checks
, req
->r_tid
);
2438 WARN_ON(lookup_req
!= req
);
2442 ceph_osdc_get_request(req
);
2443 insert_request_mc(&osdc
->map_checks
, req
);
2444 ret
= ceph_monc_get_version_async(&osdc
->client
->monc
, "osdmap",
2445 map_check_cb
, req
->r_tid
);
2450 * lingering requests, watch/notify v2 infrastructure
2452 static void linger_release(struct kref
*kref
)
2454 struct ceph_osd_linger_request
*lreq
=
2455 container_of(kref
, struct ceph_osd_linger_request
, kref
);
2457 dout("%s lreq %p reg_req %p ping_req %p\n", __func__
, lreq
,
2458 lreq
->reg_req
, lreq
->ping_req
);
2459 WARN_ON(!RB_EMPTY_NODE(&lreq
->node
));
2460 WARN_ON(!RB_EMPTY_NODE(&lreq
->osdc_node
));
2461 WARN_ON(!RB_EMPTY_NODE(&lreq
->mc_node
));
2462 WARN_ON(!list_empty(&lreq
->scan_item
));
2463 WARN_ON(!list_empty(&lreq
->pending_lworks
));
2467 ceph_osdc_put_request(lreq
->reg_req
);
2469 ceph_osdc_put_request(lreq
->ping_req
);
2470 target_destroy(&lreq
->t
);
2474 static void linger_put(struct ceph_osd_linger_request
*lreq
)
2477 kref_put(&lreq
->kref
, linger_release
);
2480 static struct ceph_osd_linger_request
*
2481 linger_get(struct ceph_osd_linger_request
*lreq
)
2483 kref_get(&lreq
->kref
);
2487 static struct ceph_osd_linger_request
*
2488 linger_alloc(struct ceph_osd_client
*osdc
)
2490 struct ceph_osd_linger_request
*lreq
;
2492 lreq
= kzalloc(sizeof(*lreq
), GFP_NOIO
);
2496 kref_init(&lreq
->kref
);
2497 mutex_init(&lreq
->lock
);
2498 RB_CLEAR_NODE(&lreq
->node
);
2499 RB_CLEAR_NODE(&lreq
->osdc_node
);
2500 RB_CLEAR_NODE(&lreq
->mc_node
);
2501 INIT_LIST_HEAD(&lreq
->scan_item
);
2502 INIT_LIST_HEAD(&lreq
->pending_lworks
);
2503 init_completion(&lreq
->reg_commit_wait
);
2504 init_completion(&lreq
->notify_finish_wait
);
2507 target_init(&lreq
->t
);
2509 dout("%s lreq %p\n", __func__
, lreq
);
2513 DEFINE_RB_INSDEL_FUNCS(linger
, struct ceph_osd_linger_request
, linger_id
, node
)
2514 DEFINE_RB_FUNCS(linger_osdc
, struct ceph_osd_linger_request
, linger_id
, osdc_node
)
2515 DEFINE_RB_FUNCS(linger_mc
, struct ceph_osd_linger_request
, linger_id
, mc_node
)
2518 * Create linger request <-> OSD session relation.
2520 * @lreq has to be registered, @osd may be homeless.
2522 static void link_linger(struct ceph_osd
*osd
,
2523 struct ceph_osd_linger_request
*lreq
)
2525 verify_osd_locked(osd
);
2526 WARN_ON(!lreq
->linger_id
|| lreq
->osd
);
2527 dout("%s osd %p osd%d lreq %p linger_id %llu\n", __func__
, osd
,
2528 osd
->o_osd
, lreq
, lreq
->linger_id
);
2530 if (!osd_homeless(osd
))
2531 __remove_osd_from_lru(osd
);
2533 atomic_inc(&osd
->o_osdc
->num_homeless
);
2536 insert_linger(&osd
->o_linger_requests
, lreq
);
2540 static void unlink_linger(struct ceph_osd
*osd
,
2541 struct ceph_osd_linger_request
*lreq
)
2543 verify_osd_locked(osd
);
2544 WARN_ON(lreq
->osd
!= osd
);
2545 dout("%s osd %p osd%d lreq %p linger_id %llu\n", __func__
, osd
,
2546 osd
->o_osd
, lreq
, lreq
->linger_id
);
2549 erase_linger(&osd
->o_linger_requests
, lreq
);
2552 if (!osd_homeless(osd
))
2553 maybe_move_osd_to_lru(osd
);
2555 atomic_dec(&osd
->o_osdc
->num_homeless
);
2558 static bool __linger_registered(struct ceph_osd_linger_request
*lreq
)
2560 verify_osdc_locked(lreq
->osdc
);
2562 return !RB_EMPTY_NODE(&lreq
->osdc_node
);
2565 static bool linger_registered(struct ceph_osd_linger_request
*lreq
)
2567 struct ceph_osd_client
*osdc
= lreq
->osdc
;
2570 down_read(&osdc
->lock
);
2571 registered
= __linger_registered(lreq
);
2572 up_read(&osdc
->lock
);
2577 static void linger_register(struct ceph_osd_linger_request
*lreq
)
2579 struct ceph_osd_client
*osdc
= lreq
->osdc
;
2581 verify_osdc_wrlocked(osdc
);
2582 WARN_ON(lreq
->linger_id
);
2585 lreq
->linger_id
= ++osdc
->last_linger_id
;
2586 insert_linger_osdc(&osdc
->linger_requests
, lreq
);
2589 static void linger_unregister(struct ceph_osd_linger_request
*lreq
)
2591 struct ceph_osd_client
*osdc
= lreq
->osdc
;
2593 verify_osdc_wrlocked(osdc
);
2595 erase_linger_osdc(&osdc
->linger_requests
, lreq
);
2599 static void cancel_linger_request(struct ceph_osd_request
*req
)
2601 struct ceph_osd_linger_request
*lreq
= req
->r_priv
;
2603 WARN_ON(!req
->r_linger
);
2604 cancel_request(req
);
2608 struct linger_work
{
2609 struct work_struct work
;
2610 struct ceph_osd_linger_request
*lreq
;
2611 struct list_head pending_item
;
2612 unsigned long queued_stamp
;
2618 void *payload
; /* points into @msg front */
2621 struct ceph_msg
*msg
; /* for ceph_msg_put() */
2629 static struct linger_work
*lwork_alloc(struct ceph_osd_linger_request
*lreq
,
2632 struct linger_work
*lwork
;
2634 lwork
= kzalloc(sizeof(*lwork
), GFP_NOIO
);
2638 INIT_WORK(&lwork
->work
, workfn
);
2639 INIT_LIST_HEAD(&lwork
->pending_item
);
2640 lwork
->lreq
= linger_get(lreq
);
2645 static void lwork_free(struct linger_work
*lwork
)
2647 struct ceph_osd_linger_request
*lreq
= lwork
->lreq
;
2649 mutex_lock(&lreq
->lock
);
2650 list_del(&lwork
->pending_item
);
2651 mutex_unlock(&lreq
->lock
);
2657 static void lwork_queue(struct linger_work
*lwork
)
2659 struct ceph_osd_linger_request
*lreq
= lwork
->lreq
;
2660 struct ceph_osd_client
*osdc
= lreq
->osdc
;
2662 verify_lreq_locked(lreq
);
2663 WARN_ON(!list_empty(&lwork
->pending_item
));
2665 lwork
->queued_stamp
= jiffies
;
2666 list_add_tail(&lwork
->pending_item
, &lreq
->pending_lworks
);
2667 queue_work(osdc
->notify_wq
, &lwork
->work
);
2670 static void do_watch_notify(struct work_struct
*w
)
2672 struct linger_work
*lwork
= container_of(w
, struct linger_work
, work
);
2673 struct ceph_osd_linger_request
*lreq
= lwork
->lreq
;
2675 if (!linger_registered(lreq
)) {
2676 dout("%s lreq %p not registered\n", __func__
, lreq
);
2680 WARN_ON(!lreq
->is_watch
);
2681 dout("%s lreq %p notify_id %llu notifier_id %llu payload_len %zu\n",
2682 __func__
, lreq
, lwork
->notify
.notify_id
, lwork
->notify
.notifier_id
,
2683 lwork
->notify
.payload_len
);
2684 lreq
->wcb(lreq
->data
, lwork
->notify
.notify_id
, lreq
->linger_id
,
2685 lwork
->notify
.notifier_id
, lwork
->notify
.payload
,
2686 lwork
->notify
.payload_len
);
2689 ceph_msg_put(lwork
->notify
.msg
);
2693 static void do_watch_error(struct work_struct
*w
)
2695 struct linger_work
*lwork
= container_of(w
, struct linger_work
, work
);
2696 struct ceph_osd_linger_request
*lreq
= lwork
->lreq
;
2698 if (!linger_registered(lreq
)) {
2699 dout("%s lreq %p not registered\n", __func__
, lreq
);
2703 dout("%s lreq %p err %d\n", __func__
, lreq
, lwork
->error
.err
);
2704 lreq
->errcb(lreq
->data
, lreq
->linger_id
, lwork
->error
.err
);
2710 static void queue_watch_error(struct ceph_osd_linger_request
*lreq
)
2712 struct linger_work
*lwork
;
2714 lwork
= lwork_alloc(lreq
, do_watch_error
);
2716 pr_err("failed to allocate error-lwork\n");
2720 lwork
->error
.err
= lreq
->last_error
;
2724 static void linger_reg_commit_complete(struct ceph_osd_linger_request
*lreq
,
2727 if (!completion_done(&lreq
->reg_commit_wait
)) {
2728 lreq
->reg_commit_error
= (result
<= 0 ? result
: 0);
2729 complete_all(&lreq
->reg_commit_wait
);
2733 static void linger_commit_cb(struct ceph_osd_request
*req
)
2735 struct ceph_osd_linger_request
*lreq
= req
->r_priv
;
2737 mutex_lock(&lreq
->lock
);
2738 dout("%s lreq %p linger_id %llu result %d\n", __func__
, lreq
,
2739 lreq
->linger_id
, req
->r_result
);
2740 linger_reg_commit_complete(lreq
, req
->r_result
);
2741 lreq
->committed
= true;
2743 if (!lreq
->is_watch
) {
2744 struct ceph_osd_data
*osd_data
=
2745 osd_req_op_data(req
, 0, notify
, response_data
);
2746 void *p
= page_address(osd_data
->pages
[0]);
2748 WARN_ON(req
->r_ops
[0].op
!= CEPH_OSD_OP_NOTIFY
||
2749 osd_data
->type
!= CEPH_OSD_DATA_TYPE_PAGES
);
2751 /* make note of the notify_id */
2752 if (req
->r_ops
[0].outdata_len
>= sizeof(u64
)) {
2753 lreq
->notify_id
= ceph_decode_64(&p
);
2754 dout("lreq %p notify_id %llu\n", lreq
,
2757 dout("lreq %p no notify_id\n", lreq
);
2761 mutex_unlock(&lreq
->lock
);
2765 static int normalize_watch_error(int err
)
2768 * Translate ENOENT -> ENOTCONN so that a delete->disconnection
2769 * notification and a failure to reconnect because we raced with
2770 * the delete appear the same to the user.
2778 static void linger_reconnect_cb(struct ceph_osd_request
*req
)
2780 struct ceph_osd_linger_request
*lreq
= req
->r_priv
;
2782 mutex_lock(&lreq
->lock
);
2783 dout("%s lreq %p linger_id %llu result %d last_error %d\n", __func__
,
2784 lreq
, lreq
->linger_id
, req
->r_result
, lreq
->last_error
);
2785 if (req
->r_result
< 0) {
2786 if (!lreq
->last_error
) {
2787 lreq
->last_error
= normalize_watch_error(req
->r_result
);
2788 queue_watch_error(lreq
);
2792 mutex_unlock(&lreq
->lock
);
2796 static void send_linger(struct ceph_osd_linger_request
*lreq
)
2798 struct ceph_osd_request
*req
= lreq
->reg_req
;
2799 struct ceph_osd_req_op
*op
= &req
->r_ops
[0];
2801 verify_osdc_wrlocked(req
->r_osdc
);
2802 dout("%s lreq %p linger_id %llu\n", __func__
, lreq
, lreq
->linger_id
);
2805 cancel_linger_request(req
);
2807 request_reinit(req
);
2808 ceph_oid_copy(&req
->r_base_oid
, &lreq
->t
.base_oid
);
2809 ceph_oloc_copy(&req
->r_base_oloc
, &lreq
->t
.base_oloc
);
2810 req
->r_flags
= lreq
->t
.flags
;
2811 req
->r_mtime
= lreq
->mtime
;
2813 mutex_lock(&lreq
->lock
);
2814 if (lreq
->is_watch
&& lreq
->committed
) {
2815 WARN_ON(op
->op
!= CEPH_OSD_OP_WATCH
||
2816 op
->watch
.cookie
!= lreq
->linger_id
);
2817 op
->watch
.op
= CEPH_OSD_WATCH_OP_RECONNECT
;
2818 op
->watch
.gen
= ++lreq
->register_gen
;
2819 dout("lreq %p reconnect register_gen %u\n", lreq
,
2821 req
->r_callback
= linger_reconnect_cb
;
2823 if (!lreq
->is_watch
)
2824 lreq
->notify_id
= 0;
2826 WARN_ON(op
->watch
.op
!= CEPH_OSD_WATCH_OP_WATCH
);
2827 dout("lreq %p register\n", lreq
);
2828 req
->r_callback
= linger_commit_cb
;
2830 mutex_unlock(&lreq
->lock
);
2832 req
->r_priv
= linger_get(lreq
);
2833 req
->r_linger
= true;
2835 submit_request(req
, true);
2838 static void linger_ping_cb(struct ceph_osd_request
*req
)
2840 struct ceph_osd_linger_request
*lreq
= req
->r_priv
;
2842 mutex_lock(&lreq
->lock
);
2843 dout("%s lreq %p linger_id %llu result %d ping_sent %lu last_error %d\n",
2844 __func__
, lreq
, lreq
->linger_id
, req
->r_result
, lreq
->ping_sent
,
2846 if (lreq
->register_gen
== req
->r_ops
[0].watch
.gen
) {
2847 if (!req
->r_result
) {
2848 lreq
->watch_valid_thru
= lreq
->ping_sent
;
2849 } else if (!lreq
->last_error
) {
2850 lreq
->last_error
= normalize_watch_error(req
->r_result
);
2851 queue_watch_error(lreq
);
2854 dout("lreq %p register_gen %u ignoring old pong %u\n", lreq
,
2855 lreq
->register_gen
, req
->r_ops
[0].watch
.gen
);
2858 mutex_unlock(&lreq
->lock
);
2862 static void send_linger_ping(struct ceph_osd_linger_request
*lreq
)
2864 struct ceph_osd_client
*osdc
= lreq
->osdc
;
2865 struct ceph_osd_request
*req
= lreq
->ping_req
;
2866 struct ceph_osd_req_op
*op
= &req
->r_ops
[0];
2868 if (ceph_osdmap_flag(osdc
, CEPH_OSDMAP_PAUSERD
)) {
2869 dout("%s PAUSERD\n", __func__
);
2873 lreq
->ping_sent
= jiffies
;
2874 dout("%s lreq %p linger_id %llu ping_sent %lu register_gen %u\n",
2875 __func__
, lreq
, lreq
->linger_id
, lreq
->ping_sent
,
2876 lreq
->register_gen
);
2879 cancel_linger_request(req
);
2881 request_reinit(req
);
2882 target_copy(&req
->r_t
, &lreq
->t
);
2884 WARN_ON(op
->op
!= CEPH_OSD_OP_WATCH
||
2885 op
->watch
.cookie
!= lreq
->linger_id
||
2886 op
->watch
.op
!= CEPH_OSD_WATCH_OP_PING
);
2887 op
->watch
.gen
= lreq
->register_gen
;
2888 req
->r_callback
= linger_ping_cb
;
2889 req
->r_priv
= linger_get(lreq
);
2890 req
->r_linger
= true;
2892 ceph_osdc_get_request(req
);
2893 account_request(req
);
2894 req
->r_tid
= atomic64_inc_return(&osdc
->last_tid
);
2895 link_request(lreq
->osd
, req
);
2899 static void linger_submit(struct ceph_osd_linger_request
*lreq
)
2901 struct ceph_osd_client
*osdc
= lreq
->osdc
;
2902 struct ceph_osd
*osd
;
2904 calc_target(osdc
, &lreq
->t
, NULL
, false);
2905 osd
= lookup_create_osd(osdc
, lreq
->t
.osd
, true);
2906 link_linger(osd
, lreq
);
2911 static void cancel_linger_map_check(struct ceph_osd_linger_request
*lreq
)
2913 struct ceph_osd_client
*osdc
= lreq
->osdc
;
2914 struct ceph_osd_linger_request
*lookup_lreq
;
2916 verify_osdc_wrlocked(osdc
);
2918 lookup_lreq
= lookup_linger_mc(&osdc
->linger_map_checks
,
2923 WARN_ON(lookup_lreq
!= lreq
);
2924 erase_linger_mc(&osdc
->linger_map_checks
, lreq
);
2929 * @lreq has to be both registered and linked.
2931 static void __linger_cancel(struct ceph_osd_linger_request
*lreq
)
2933 if (lreq
->is_watch
&& lreq
->ping_req
->r_osd
)
2934 cancel_linger_request(lreq
->ping_req
);
2935 if (lreq
->reg_req
->r_osd
)
2936 cancel_linger_request(lreq
->reg_req
);
2937 cancel_linger_map_check(lreq
);
2938 unlink_linger(lreq
->osd
, lreq
);
2939 linger_unregister(lreq
);
2942 static void linger_cancel(struct ceph_osd_linger_request
*lreq
)
2944 struct ceph_osd_client
*osdc
= lreq
->osdc
;
2946 down_write(&osdc
->lock
);
2947 if (__linger_registered(lreq
))
2948 __linger_cancel(lreq
);
2949 up_write(&osdc
->lock
);
2952 static void send_linger_map_check(struct ceph_osd_linger_request
*lreq
);
2954 static void check_linger_pool_dne(struct ceph_osd_linger_request
*lreq
)
2956 struct ceph_osd_client
*osdc
= lreq
->osdc
;
2957 struct ceph_osdmap
*map
= osdc
->osdmap
;
2959 verify_osdc_wrlocked(osdc
);
2960 WARN_ON(!map
->epoch
);
2962 if (lreq
->register_gen
) {
2963 lreq
->map_dne_bound
= map
->epoch
;
2964 dout("%s lreq %p linger_id %llu pool disappeared\n", __func__
,
2965 lreq
, lreq
->linger_id
);
2967 dout("%s lreq %p linger_id %llu map_dne_bound %u have %u\n",
2968 __func__
, lreq
, lreq
->linger_id
, lreq
->map_dne_bound
,
2972 if (lreq
->map_dne_bound
) {
2973 if (map
->epoch
>= lreq
->map_dne_bound
) {
2974 /* we had a new enough map */
2975 pr_info("linger_id %llu pool does not exist\n",
2977 linger_reg_commit_complete(lreq
, -ENOENT
);
2978 __linger_cancel(lreq
);
2981 send_linger_map_check(lreq
);
2985 static void linger_map_check_cb(struct ceph_mon_generic_request
*greq
)
2987 struct ceph_osd_client
*osdc
= &greq
->monc
->client
->osdc
;
2988 struct ceph_osd_linger_request
*lreq
;
2989 u64 linger_id
= greq
->private_data
;
2991 WARN_ON(greq
->result
|| !greq
->u
.newest
);
2993 down_write(&osdc
->lock
);
2994 lreq
= lookup_linger_mc(&osdc
->linger_map_checks
, linger_id
);
2996 dout("%s linger_id %llu dne\n", __func__
, linger_id
);
3000 dout("%s lreq %p linger_id %llu map_dne_bound %u newest %llu\n",
3001 __func__
, lreq
, lreq
->linger_id
, lreq
->map_dne_bound
,
3003 if (!lreq
->map_dne_bound
)
3004 lreq
->map_dne_bound
= greq
->u
.newest
;
3005 erase_linger_mc(&osdc
->linger_map_checks
, lreq
);
3006 check_linger_pool_dne(lreq
);
3010 up_write(&osdc
->lock
);
3013 static void send_linger_map_check(struct ceph_osd_linger_request
*lreq
)
3015 struct ceph_osd_client
*osdc
= lreq
->osdc
;
3016 struct ceph_osd_linger_request
*lookup_lreq
;
3019 verify_osdc_wrlocked(osdc
);
3021 lookup_lreq
= lookup_linger_mc(&osdc
->linger_map_checks
,
3024 WARN_ON(lookup_lreq
!= lreq
);
3029 insert_linger_mc(&osdc
->linger_map_checks
, lreq
);
3030 ret
= ceph_monc_get_version_async(&osdc
->client
->monc
, "osdmap",
3031 linger_map_check_cb
, lreq
->linger_id
);
3035 static int linger_reg_commit_wait(struct ceph_osd_linger_request
*lreq
)
3039 dout("%s lreq %p linger_id %llu\n", __func__
, lreq
, lreq
->linger_id
);
3040 ret
= wait_for_completion_interruptible(&lreq
->reg_commit_wait
);
3041 return ret
?: lreq
->reg_commit_error
;
3044 static int linger_notify_finish_wait(struct ceph_osd_linger_request
*lreq
)
3048 dout("%s lreq %p linger_id %llu\n", __func__
, lreq
, lreq
->linger_id
);
3049 ret
= wait_for_completion_interruptible(&lreq
->notify_finish_wait
);
3050 return ret
?: lreq
->notify_finish_error
;
3054 * Timeout callback, called every N seconds. When 1 or more OSD
3055 * requests has been active for more than N seconds, we send a keepalive
3056 * (tag + timestamp) to its OSD to ensure any communications channel
3057 * reset is detected.
3059 static void handle_timeout(struct work_struct
*work
)
3061 struct ceph_osd_client
*osdc
=
3062 container_of(work
, struct ceph_osd_client
, timeout_work
.work
);
3063 struct ceph_options
*opts
= osdc
->client
->options
;
3064 unsigned long cutoff
= jiffies
- opts
->osd_keepalive_timeout
;
3065 unsigned long expiry_cutoff
= jiffies
- opts
->osd_request_timeout
;
3066 LIST_HEAD(slow_osds
);
3067 struct rb_node
*n
, *p
;
3069 dout("%s osdc %p\n", __func__
, osdc
);
3070 down_write(&osdc
->lock
);
3073 * ping osds that are a bit slow. this ensures that if there
3074 * is a break in the TCP connection we will notice, and reopen
3075 * a connection with that osd (from the fault callback).
3077 for (n
= rb_first(&osdc
->osds
); n
; n
= rb_next(n
)) {
3078 struct ceph_osd
*osd
= rb_entry(n
, struct ceph_osd
, o_node
);
3081 for (p
= rb_first(&osd
->o_requests
); p
; ) {
3082 struct ceph_osd_request
*req
=
3083 rb_entry(p
, struct ceph_osd_request
, r_node
);
3085 p
= rb_next(p
); /* abort_request() */
3087 if (time_before(req
->r_stamp
, cutoff
)) {
3088 dout(" req %p tid %llu on osd%d is laggy\n",
3089 req
, req
->r_tid
, osd
->o_osd
);
3092 if (opts
->osd_request_timeout
&&
3093 time_before(req
->r_start_stamp
, expiry_cutoff
)) {
3094 pr_err_ratelimited("tid %llu on osd%d timeout\n",
3095 req
->r_tid
, osd
->o_osd
);
3096 abort_request(req
, -ETIMEDOUT
);
3099 for (p
= rb_first(&osd
->o_linger_requests
); p
; p
= rb_next(p
)) {
3100 struct ceph_osd_linger_request
*lreq
=
3101 rb_entry(p
, struct ceph_osd_linger_request
, node
);
3103 dout(" lreq %p linger_id %llu is served by osd%d\n",
3104 lreq
, lreq
->linger_id
, osd
->o_osd
);
3107 mutex_lock(&lreq
->lock
);
3108 if (lreq
->is_watch
&& lreq
->committed
&& !lreq
->last_error
)
3109 send_linger_ping(lreq
);
3110 mutex_unlock(&lreq
->lock
);
3114 list_move_tail(&osd
->o_keepalive_item
, &slow_osds
);
3117 if (opts
->osd_request_timeout
) {
3118 for (p
= rb_first(&osdc
->homeless_osd
.o_requests
); p
; ) {
3119 struct ceph_osd_request
*req
=
3120 rb_entry(p
, struct ceph_osd_request
, r_node
);
3122 p
= rb_next(p
); /* abort_request() */
3124 if (time_before(req
->r_start_stamp
, expiry_cutoff
)) {
3125 pr_err_ratelimited("tid %llu on osd%d timeout\n",
3126 req
->r_tid
, osdc
->homeless_osd
.o_osd
);
3127 abort_request(req
, -ETIMEDOUT
);
3132 if (atomic_read(&osdc
->num_homeless
) || !list_empty(&slow_osds
))
3133 maybe_request_map(osdc
);
3135 while (!list_empty(&slow_osds
)) {
3136 struct ceph_osd
*osd
= list_first_entry(&slow_osds
,
3139 list_del_init(&osd
->o_keepalive_item
);
3140 ceph_con_keepalive(&osd
->o_con
);
3143 up_write(&osdc
->lock
);
3144 schedule_delayed_work(&osdc
->timeout_work
,
3145 osdc
->client
->options
->osd_keepalive_timeout
);
3148 static void handle_osds_timeout(struct work_struct
*work
)
3150 struct ceph_osd_client
*osdc
=
3151 container_of(work
, struct ceph_osd_client
,
3152 osds_timeout_work
.work
);
3153 unsigned long delay
= osdc
->client
->options
->osd_idle_ttl
/ 4;
3154 struct ceph_osd
*osd
, *nosd
;
3156 dout("%s osdc %p\n", __func__
, osdc
);
3157 down_write(&osdc
->lock
);
3158 list_for_each_entry_safe(osd
, nosd
, &osdc
->osd_lru
, o_osd_lru
) {
3159 if (time_before(jiffies
, osd
->lru_ttl
))
3162 WARN_ON(!RB_EMPTY_ROOT(&osd
->o_requests
));
3163 WARN_ON(!RB_EMPTY_ROOT(&osd
->o_linger_requests
));
3167 up_write(&osdc
->lock
);
3168 schedule_delayed_work(&osdc
->osds_timeout_work
,
3169 round_jiffies_relative(delay
));
3172 static int ceph_oloc_decode(void **p
, void *end
,
3173 struct ceph_object_locator
*oloc
)
3175 u8 struct_v
, struct_cv
;
3180 ceph_decode_need(p
, end
, 1 + 1 + 4, e_inval
);
3181 struct_v
= ceph_decode_8(p
);
3182 struct_cv
= ceph_decode_8(p
);
3184 pr_warn("got v %d < 3 cv %d of ceph_object_locator\n",
3185 struct_v
, struct_cv
);
3188 if (struct_cv
> 6) {
3189 pr_warn("got v %d cv %d > 6 of ceph_object_locator\n",
3190 struct_v
, struct_cv
);
3193 len
= ceph_decode_32(p
);
3194 ceph_decode_need(p
, end
, len
, e_inval
);
3195 struct_end
= *p
+ len
;
3197 oloc
->pool
= ceph_decode_64(p
);
3198 *p
+= 4; /* skip preferred */
3200 len
= ceph_decode_32(p
);
3202 pr_warn("ceph_object_locator::key is set\n");
3206 if (struct_v
>= 5) {
3207 bool changed
= false;
3209 len
= ceph_decode_32(p
);
3211 ceph_decode_need(p
, end
, len
, e_inval
);
3212 if (!oloc
->pool_ns
||
3213 ceph_compare_string(oloc
->pool_ns
, *p
, len
))
3221 /* redirect changes namespace */
3222 pr_warn("ceph_object_locator::nspace is changed\n");
3227 if (struct_v
>= 6) {
3228 s64 hash
= ceph_decode_64(p
);
3230 pr_warn("ceph_object_locator::hash is set\n");
3245 static int ceph_redirect_decode(void **p
, void *end
,
3246 struct ceph_request_redirect
*redir
)
3248 u8 struct_v
, struct_cv
;
3253 ceph_decode_need(p
, end
, 1 + 1 + 4, e_inval
);
3254 struct_v
= ceph_decode_8(p
);
3255 struct_cv
= ceph_decode_8(p
);
3256 if (struct_cv
> 1) {
3257 pr_warn("got v %d cv %d > 1 of ceph_request_redirect\n",
3258 struct_v
, struct_cv
);
3261 len
= ceph_decode_32(p
);
3262 ceph_decode_need(p
, end
, len
, e_inval
);
3263 struct_end
= *p
+ len
;
3265 ret
= ceph_oloc_decode(p
, end
, &redir
->oloc
);
3269 len
= ceph_decode_32(p
);
3271 pr_warn("ceph_request_redirect::object_name is set\n");
3275 len
= ceph_decode_32(p
);
3276 *p
+= len
; /* skip osd_instructions */
3288 struct MOSDOpReply
{
3289 struct ceph_pg pgid
;
3294 u32 outdata_len
[CEPH_OSD_MAX_OPS
];
3295 s32 rval
[CEPH_OSD_MAX_OPS
];
3297 struct ceph_eversion replay_version
;
3299 struct ceph_request_redirect redirect
;
3302 static int decode_MOSDOpReply(const struct ceph_msg
*msg
, struct MOSDOpReply
*m
)
3304 void *p
= msg
->front
.iov_base
;
3305 void *const end
= p
+ msg
->front
.iov_len
;
3306 u16 version
= le16_to_cpu(msg
->hdr
.version
);
3307 struct ceph_eversion bad_replay_version
;
3313 ceph_decode_32_safe(&p
, end
, len
, e_inval
);
3314 ceph_decode_need(&p
, end
, len
, e_inval
);
3315 p
+= len
; /* skip oid */
3317 ret
= ceph_decode_pgid(&p
, end
, &m
->pgid
);
3321 ceph_decode_64_safe(&p
, end
, m
->flags
, e_inval
);
3322 ceph_decode_32_safe(&p
, end
, m
->result
, e_inval
);
3323 ceph_decode_need(&p
, end
, sizeof(bad_replay_version
), e_inval
);
3324 memcpy(&bad_replay_version
, p
, sizeof(bad_replay_version
));
3325 p
+= sizeof(bad_replay_version
);
3326 ceph_decode_32_safe(&p
, end
, m
->epoch
, e_inval
);
3328 ceph_decode_32_safe(&p
, end
, m
->num_ops
, e_inval
);
3329 if (m
->num_ops
> ARRAY_SIZE(m
->outdata_len
))
3332 ceph_decode_need(&p
, end
, m
->num_ops
* sizeof(struct ceph_osd_op
),
3334 for (i
= 0; i
< m
->num_ops
; i
++) {
3335 struct ceph_osd_op
*op
= p
;
3337 m
->outdata_len
[i
] = le32_to_cpu(op
->payload_len
);
3341 ceph_decode_32_safe(&p
, end
, m
->retry_attempt
, e_inval
);
3342 for (i
= 0; i
< m
->num_ops
; i
++)
3343 ceph_decode_32_safe(&p
, end
, m
->rval
[i
], e_inval
);
3346 ceph_decode_need(&p
, end
, sizeof(m
->replay_version
), e_inval
);
3347 memcpy(&m
->replay_version
, p
, sizeof(m
->replay_version
));
3348 p
+= sizeof(m
->replay_version
);
3349 ceph_decode_64_safe(&p
, end
, m
->user_version
, e_inval
);
3351 m
->replay_version
= bad_replay_version
; /* struct */
3352 m
->user_version
= le64_to_cpu(m
->replay_version
.version
);
3357 ceph_decode_8_safe(&p
, end
, decode_redir
, e_inval
);
3365 ret
= ceph_redirect_decode(&p
, end
, &m
->redirect
);
3369 ceph_oloc_init(&m
->redirect
.oloc
);
3379 * Handle MOSDOpReply. Set ->r_result and call the callback if it is
3382 static void handle_reply(struct ceph_osd
*osd
, struct ceph_msg
*msg
)
3384 struct ceph_osd_client
*osdc
= osd
->o_osdc
;
3385 struct ceph_osd_request
*req
;
3386 struct MOSDOpReply m
;
3387 u64 tid
= le64_to_cpu(msg
->hdr
.tid
);
3392 dout("%s msg %p tid %llu\n", __func__
, msg
, tid
);
3394 down_read(&osdc
->lock
);
3395 if (!osd_registered(osd
)) {
3396 dout("%s osd%d unknown\n", __func__
, osd
->o_osd
);
3397 goto out_unlock_osdc
;
3399 WARN_ON(osd
->o_osd
!= le64_to_cpu(msg
->hdr
.src
.num
));
3401 mutex_lock(&osd
->lock
);
3402 req
= lookup_request(&osd
->o_requests
, tid
);
3404 dout("%s osd%d tid %llu unknown\n", __func__
, osd
->o_osd
, tid
);
3405 goto out_unlock_session
;
3408 m
.redirect
.oloc
.pool_ns
= req
->r_t
.target_oloc
.pool_ns
;
3409 ret
= decode_MOSDOpReply(msg
, &m
);
3410 m
.redirect
.oloc
.pool_ns
= NULL
;
3412 pr_err("failed to decode MOSDOpReply for tid %llu: %d\n",
3417 dout("%s req %p tid %llu flags 0x%llx pgid %llu.%x epoch %u attempt %d v %u'%llu uv %llu\n",
3418 __func__
, req
, req
->r_tid
, m
.flags
, m
.pgid
.pool
, m
.pgid
.seed
,
3419 m
.epoch
, m
.retry_attempt
, le32_to_cpu(m
.replay_version
.epoch
),
3420 le64_to_cpu(m
.replay_version
.version
), m
.user_version
);
3422 if (m
.retry_attempt
>= 0) {
3423 if (m
.retry_attempt
!= req
->r_attempts
- 1) {
3424 dout("req %p tid %llu retry_attempt %d != %d, ignoring\n",
3425 req
, req
->r_tid
, m
.retry_attempt
,
3426 req
->r_attempts
- 1);
3427 goto out_unlock_session
;
3430 WARN_ON(1); /* MOSDOpReply v4 is assumed */
3433 if (!ceph_oloc_empty(&m
.redirect
.oloc
)) {
3434 dout("req %p tid %llu redirect pool %lld\n", req
, req
->r_tid
,
3435 m
.redirect
.oloc
.pool
);
3436 unlink_request(osd
, req
);
3437 mutex_unlock(&osd
->lock
);
3440 * Not ceph_oloc_copy() - changing pool_ns is not
3443 req
->r_t
.target_oloc
.pool
= m
.redirect
.oloc
.pool
;
3444 req
->r_flags
|= CEPH_OSD_FLAG_REDIRECTED
;
3446 __submit_request(req
, false);
3447 goto out_unlock_osdc
;
3450 if (m
.num_ops
!= req
->r_num_ops
) {
3451 pr_err("num_ops %d != %d for tid %llu\n", m
.num_ops
,
3452 req
->r_num_ops
, req
->r_tid
);
3455 for (i
= 0; i
< req
->r_num_ops
; i
++) {
3456 dout(" req %p tid %llu op %d rval %d len %u\n", req
,
3457 req
->r_tid
, i
, m
.rval
[i
], m
.outdata_len
[i
]);
3458 req
->r_ops
[i
].rval
= m
.rval
[i
];
3459 req
->r_ops
[i
].outdata_len
= m
.outdata_len
[i
];
3460 data_len
+= m
.outdata_len
[i
];
3462 if (data_len
!= le32_to_cpu(msg
->hdr
.data_len
)) {
3463 pr_err("sum of lens %u != %u for tid %llu\n", data_len
,
3464 le32_to_cpu(msg
->hdr
.data_len
), req
->r_tid
);
3467 dout("%s req %p tid %llu result %d data_len %u\n", __func__
,
3468 req
, req
->r_tid
, m
.result
, data_len
);
3471 * Since we only ever request ONDISK, we should only ever get
3472 * one (type of) reply back.
3474 WARN_ON(!(m
.flags
& CEPH_OSD_FLAG_ONDISK
));
3475 req
->r_result
= m
.result
?: data_len
;
3476 finish_request(req
);
3477 mutex_unlock(&osd
->lock
);
3478 up_read(&osdc
->lock
);
3480 __complete_request(req
);
3481 complete_all(&req
->r_completion
);
3482 ceph_osdc_put_request(req
);
3486 complete_request(req
, -EIO
);
3488 mutex_unlock(&osd
->lock
);
3490 up_read(&osdc
->lock
);
3493 static void set_pool_was_full(struct ceph_osd_client
*osdc
)
3497 for (n
= rb_first(&osdc
->osdmap
->pg_pools
); n
; n
= rb_next(n
)) {
3498 struct ceph_pg_pool_info
*pi
=
3499 rb_entry(n
, struct ceph_pg_pool_info
, node
);
3501 pi
->was_full
= __pool_full(pi
);
3505 static bool pool_cleared_full(struct ceph_osd_client
*osdc
, s64 pool_id
)
3507 struct ceph_pg_pool_info
*pi
;
3509 pi
= ceph_pg_pool_by_id(osdc
->osdmap
, pool_id
);
3513 return pi
->was_full
&& !__pool_full(pi
);
3516 static enum calc_target_result
3517 recalc_linger_target(struct ceph_osd_linger_request
*lreq
)
3519 struct ceph_osd_client
*osdc
= lreq
->osdc
;
3520 enum calc_target_result ct_res
;
3522 ct_res
= calc_target(osdc
, &lreq
->t
, NULL
, true);
3523 if (ct_res
== CALC_TARGET_NEED_RESEND
) {
3524 struct ceph_osd
*osd
;
3526 osd
= lookup_create_osd(osdc
, lreq
->t
.osd
, true);
3527 if (osd
!= lreq
->osd
) {
3528 unlink_linger(lreq
->osd
, lreq
);
3529 link_linger(osd
, lreq
);
3537 * Requeue requests whose mapping to an OSD has changed.
3539 static void scan_requests(struct ceph_osd
*osd
,
3542 bool check_pool_cleared_full
,
3543 struct rb_root
*need_resend
,
3544 struct list_head
*need_resend_linger
)
3546 struct ceph_osd_client
*osdc
= osd
->o_osdc
;
3548 bool force_resend_writes
;
3550 for (n
= rb_first(&osd
->o_linger_requests
); n
; ) {
3551 struct ceph_osd_linger_request
*lreq
=
3552 rb_entry(n
, struct ceph_osd_linger_request
, node
);
3553 enum calc_target_result ct_res
;
3555 n
= rb_next(n
); /* recalc_linger_target() */
3557 dout("%s lreq %p linger_id %llu\n", __func__
, lreq
,
3559 ct_res
= recalc_linger_target(lreq
);
3561 case CALC_TARGET_NO_ACTION
:
3562 force_resend_writes
= cleared_full
||
3563 (check_pool_cleared_full
&&
3564 pool_cleared_full(osdc
, lreq
->t
.base_oloc
.pool
));
3565 if (!force_resend
&& !force_resend_writes
)
3569 case CALC_TARGET_NEED_RESEND
:
3570 cancel_linger_map_check(lreq
);
3572 * scan_requests() for the previous epoch(s)
3573 * may have already added it to the list, since
3574 * it's not unlinked here.
3576 if (list_empty(&lreq
->scan_item
))
3577 list_add_tail(&lreq
->scan_item
, need_resend_linger
);
3579 case CALC_TARGET_POOL_DNE
:
3580 list_del_init(&lreq
->scan_item
);
3581 check_linger_pool_dne(lreq
);
3586 for (n
= rb_first(&osd
->o_requests
); n
; ) {
3587 struct ceph_osd_request
*req
=
3588 rb_entry(n
, struct ceph_osd_request
, r_node
);
3589 enum calc_target_result ct_res
;
3591 n
= rb_next(n
); /* unlink_request(), check_pool_dne() */
3593 dout("%s req %p tid %llu\n", __func__
, req
, req
->r_tid
);
3594 ct_res
= calc_target(osdc
, &req
->r_t
, &req
->r_osd
->o_con
,
3597 case CALC_TARGET_NO_ACTION
:
3598 force_resend_writes
= cleared_full
||
3599 (check_pool_cleared_full
&&
3600 pool_cleared_full(osdc
, req
->r_t
.base_oloc
.pool
));
3601 if (!force_resend
&&
3602 (!(req
->r_flags
& CEPH_OSD_FLAG_WRITE
) ||
3603 !force_resend_writes
))
3607 case CALC_TARGET_NEED_RESEND
:
3608 cancel_map_check(req
);
3609 unlink_request(osd
, req
);
3610 insert_request(need_resend
, req
);
3612 case CALC_TARGET_POOL_DNE
:
3613 check_pool_dne(req
);
3619 static int handle_one_map(struct ceph_osd_client
*osdc
,
3620 void *p
, void *end
, bool incremental
,
3621 struct rb_root
*need_resend
,
3622 struct list_head
*need_resend_linger
)
3624 struct ceph_osdmap
*newmap
;
3626 bool skipped_map
= false;
3629 was_full
= ceph_osdmap_flag(osdc
, CEPH_OSDMAP_FULL
);
3630 set_pool_was_full(osdc
);
3633 newmap
= osdmap_apply_incremental(&p
, end
, osdc
->osdmap
);
3635 newmap
= ceph_osdmap_decode(&p
, end
);
3637 return PTR_ERR(newmap
);
3639 if (newmap
!= osdc
->osdmap
) {
3641 * Preserve ->was_full before destroying the old map.
3642 * For pools that weren't in the old map, ->was_full
3645 for (n
= rb_first(&newmap
->pg_pools
); n
; n
= rb_next(n
)) {
3646 struct ceph_pg_pool_info
*pi
=
3647 rb_entry(n
, struct ceph_pg_pool_info
, node
);
3648 struct ceph_pg_pool_info
*old_pi
;
3650 old_pi
= ceph_pg_pool_by_id(osdc
->osdmap
, pi
->id
);
3652 pi
->was_full
= old_pi
->was_full
;
3654 WARN_ON(pi
->was_full
);
3657 if (osdc
->osdmap
->epoch
&&
3658 osdc
->osdmap
->epoch
+ 1 < newmap
->epoch
) {
3659 WARN_ON(incremental
);
3663 ceph_osdmap_destroy(osdc
->osdmap
);
3664 osdc
->osdmap
= newmap
;
3667 was_full
&= !ceph_osdmap_flag(osdc
, CEPH_OSDMAP_FULL
);
3668 scan_requests(&osdc
->homeless_osd
, skipped_map
, was_full
, true,
3669 need_resend
, need_resend_linger
);
3671 for (n
= rb_first(&osdc
->osds
); n
; ) {
3672 struct ceph_osd
*osd
= rb_entry(n
, struct ceph_osd
, o_node
);
3674 n
= rb_next(n
); /* close_osd() */
3676 scan_requests(osd
, skipped_map
, was_full
, true, need_resend
,
3677 need_resend_linger
);
3678 if (!ceph_osd_is_up(osdc
->osdmap
, osd
->o_osd
) ||
3679 memcmp(&osd
->o_con
.peer_addr
,
3680 ceph_osd_addr(osdc
->osdmap
, osd
->o_osd
),
3681 sizeof(struct ceph_entity_addr
)))
3688 static void kick_requests(struct ceph_osd_client
*osdc
,
3689 struct rb_root
*need_resend
,
3690 struct list_head
*need_resend_linger
)
3692 struct ceph_osd_linger_request
*lreq
, *nlreq
;
3693 enum calc_target_result ct_res
;
3696 /* make sure need_resend targets reflect latest map */
3697 for (n
= rb_first(need_resend
); n
; ) {
3698 struct ceph_osd_request
*req
=
3699 rb_entry(n
, struct ceph_osd_request
, r_node
);
3703 if (req
->r_t
.epoch
< osdc
->osdmap
->epoch
) {
3704 ct_res
= calc_target(osdc
, &req
->r_t
, NULL
, false);
3705 if (ct_res
== CALC_TARGET_POOL_DNE
) {
3706 erase_request(need_resend
, req
);
3707 check_pool_dne(req
);
3712 for (n
= rb_first(need_resend
); n
; ) {
3713 struct ceph_osd_request
*req
=
3714 rb_entry(n
, struct ceph_osd_request
, r_node
);
3715 struct ceph_osd
*osd
;
3718 erase_request(need_resend
, req
); /* before link_request() */
3720 osd
= lookup_create_osd(osdc
, req
->r_t
.osd
, true);
3721 link_request(osd
, req
);
3722 if (!req
->r_linger
) {
3723 if (!osd_homeless(osd
) && !req
->r_t
.paused
)
3726 cancel_linger_request(req
);
3730 list_for_each_entry_safe(lreq
, nlreq
, need_resend_linger
, scan_item
) {
3731 if (!osd_homeless(lreq
->osd
))
3734 list_del_init(&lreq
->scan_item
);
3739 * Process updated osd map.
3741 * The message contains any number of incremental and full maps, normally
3742 * indicating some sort of topology change in the cluster. Kick requests
3743 * off to different OSDs as needed.
3745 void ceph_osdc_handle_map(struct ceph_osd_client
*osdc
, struct ceph_msg
*msg
)
3747 void *p
= msg
->front
.iov_base
;
3748 void *const end
= p
+ msg
->front
.iov_len
;
3749 u32 nr_maps
, maplen
;
3751 struct ceph_fsid fsid
;
3752 struct rb_root need_resend
= RB_ROOT
;
3753 LIST_HEAD(need_resend_linger
);
3754 bool handled_incremental
= false;
3755 bool was_pauserd
, was_pausewr
;
3756 bool pauserd
, pausewr
;
3759 dout("%s have %u\n", __func__
, osdc
->osdmap
->epoch
);
3760 down_write(&osdc
->lock
);
3763 ceph_decode_need(&p
, end
, sizeof(fsid
), bad
);
3764 ceph_decode_copy(&p
, &fsid
, sizeof(fsid
));
3765 if (ceph_check_fsid(osdc
->client
, &fsid
) < 0)
3768 was_pauserd
= ceph_osdmap_flag(osdc
, CEPH_OSDMAP_PAUSERD
);
3769 was_pausewr
= ceph_osdmap_flag(osdc
, CEPH_OSDMAP_PAUSEWR
) ||
3770 ceph_osdmap_flag(osdc
, CEPH_OSDMAP_FULL
) ||
3771 have_pool_full(osdc
);
3773 /* incremental maps */
3774 ceph_decode_32_safe(&p
, end
, nr_maps
, bad
);
3775 dout(" %d inc maps\n", nr_maps
);
3776 while (nr_maps
> 0) {
3777 ceph_decode_need(&p
, end
, 2*sizeof(u32
), bad
);
3778 epoch
= ceph_decode_32(&p
);
3779 maplen
= ceph_decode_32(&p
);
3780 ceph_decode_need(&p
, end
, maplen
, bad
);
3781 if (osdc
->osdmap
->epoch
&&
3782 osdc
->osdmap
->epoch
+ 1 == epoch
) {
3783 dout("applying incremental map %u len %d\n",
3785 err
= handle_one_map(osdc
, p
, p
+ maplen
, true,
3786 &need_resend
, &need_resend_linger
);
3789 handled_incremental
= true;
3791 dout("ignoring incremental map %u len %d\n",
3797 if (handled_incremental
)
3801 ceph_decode_32_safe(&p
, end
, nr_maps
, bad
);
3802 dout(" %d full maps\n", nr_maps
);
3804 ceph_decode_need(&p
, end
, 2*sizeof(u32
), bad
);
3805 epoch
= ceph_decode_32(&p
);
3806 maplen
= ceph_decode_32(&p
);
3807 ceph_decode_need(&p
, end
, maplen
, bad
);
3809 dout("skipping non-latest full map %u len %d\n",
3811 } else if (osdc
->osdmap
->epoch
>= epoch
) {
3812 dout("skipping full map %u len %d, "
3813 "older than our %u\n", epoch
, maplen
,
3814 osdc
->osdmap
->epoch
);
3816 dout("taking full map %u len %d\n", epoch
, maplen
);
3817 err
= handle_one_map(osdc
, p
, p
+ maplen
, false,
3818 &need_resend
, &need_resend_linger
);
3828 * subscribe to subsequent osdmap updates if full to ensure
3829 * we find out when we are no longer full and stop returning
3832 pauserd
= ceph_osdmap_flag(osdc
, CEPH_OSDMAP_PAUSERD
);
3833 pausewr
= ceph_osdmap_flag(osdc
, CEPH_OSDMAP_PAUSEWR
) ||
3834 ceph_osdmap_flag(osdc
, CEPH_OSDMAP_FULL
) ||
3835 have_pool_full(osdc
);
3836 if (was_pauserd
|| was_pausewr
|| pauserd
|| pausewr
||
3837 osdc
->osdmap
->epoch
< osdc
->epoch_barrier
)
3838 maybe_request_map(osdc
);
3840 kick_requests(osdc
, &need_resend
, &need_resend_linger
);
3842 ceph_osdc_abort_on_full(osdc
);
3843 ceph_monc_got_map(&osdc
->client
->monc
, CEPH_SUB_OSDMAP
,
3844 osdc
->osdmap
->epoch
);
3845 up_write(&osdc
->lock
);
3846 wake_up_all(&osdc
->client
->auth_wq
);
3850 pr_err("osdc handle_map corrupt msg\n");
3852 up_write(&osdc
->lock
);
3856 * Resubmit requests pending on the given osd.
3858 static void kick_osd_requests(struct ceph_osd
*osd
)
3862 clear_backoffs(osd
);
3864 for (n
= rb_first(&osd
->o_requests
); n
; ) {
3865 struct ceph_osd_request
*req
=
3866 rb_entry(n
, struct ceph_osd_request
, r_node
);
3868 n
= rb_next(n
); /* cancel_linger_request() */
3870 if (!req
->r_linger
) {
3871 if (!req
->r_t
.paused
)
3874 cancel_linger_request(req
);
3877 for (n
= rb_first(&osd
->o_linger_requests
); n
; n
= rb_next(n
)) {
3878 struct ceph_osd_linger_request
*lreq
=
3879 rb_entry(n
, struct ceph_osd_linger_request
, node
);
3886 * If the osd connection drops, we need to resubmit all requests.
3888 static void osd_fault(struct ceph_connection
*con
)
3890 struct ceph_osd
*osd
= con
->private;
3891 struct ceph_osd_client
*osdc
= osd
->o_osdc
;
3893 dout("%s osd %p osd%d\n", __func__
, osd
, osd
->o_osd
);
3895 down_write(&osdc
->lock
);
3896 if (!osd_registered(osd
)) {
3897 dout("%s osd%d unknown\n", __func__
, osd
->o_osd
);
3901 if (!reopen_osd(osd
))
3902 kick_osd_requests(osd
);
3903 maybe_request_map(osdc
);
3906 up_write(&osdc
->lock
);
3909 struct MOSDBackoff
{
3910 struct ceph_spg spgid
;
3914 struct ceph_hobject_id
*begin
;
3915 struct ceph_hobject_id
*end
;
3918 static int decode_MOSDBackoff(const struct ceph_msg
*msg
, struct MOSDBackoff
*m
)
3920 void *p
= msg
->front
.iov_base
;
3921 void *const end
= p
+ msg
->front
.iov_len
;
3926 ret
= ceph_start_decoding(&p
, end
, 1, "spg_t", &struct_v
, &struct_len
);
3930 ret
= ceph_decode_pgid(&p
, end
, &m
->spgid
.pgid
);
3934 ceph_decode_8_safe(&p
, end
, m
->spgid
.shard
, e_inval
);
3935 ceph_decode_32_safe(&p
, end
, m
->map_epoch
, e_inval
);
3936 ceph_decode_8_safe(&p
, end
, m
->op
, e_inval
);
3937 ceph_decode_64_safe(&p
, end
, m
->id
, e_inval
);
3939 m
->begin
= kzalloc(sizeof(*m
->begin
), GFP_NOIO
);
3943 ret
= decode_hoid(&p
, end
, m
->begin
);
3945 free_hoid(m
->begin
);
3949 m
->end
= kzalloc(sizeof(*m
->end
), GFP_NOIO
);
3951 free_hoid(m
->begin
);
3955 ret
= decode_hoid(&p
, end
, m
->end
);
3957 free_hoid(m
->begin
);
3968 static struct ceph_msg
*create_backoff_message(
3969 const struct ceph_osd_backoff
*backoff
,
3972 struct ceph_msg
*msg
;
3976 msg_size
= CEPH_ENCODING_START_BLK_LEN
+
3977 CEPH_PGID_ENCODING_LEN
+ 1; /* spgid */
3978 msg_size
+= 4 + 1 + 8; /* map_epoch, op, id */
3979 msg_size
+= CEPH_ENCODING_START_BLK_LEN
+
3980 hoid_encoding_size(backoff
->begin
);
3981 msg_size
+= CEPH_ENCODING_START_BLK_LEN
+
3982 hoid_encoding_size(backoff
->end
);
3984 msg
= ceph_msg_new(CEPH_MSG_OSD_BACKOFF
, msg_size
, GFP_NOIO
, true);
3988 p
= msg
->front
.iov_base
;
3989 end
= p
+ msg
->front_alloc_len
;
3991 encode_spgid(&p
, &backoff
->spgid
);
3992 ceph_encode_32(&p
, map_epoch
);
3993 ceph_encode_8(&p
, CEPH_OSD_BACKOFF_OP_ACK_BLOCK
);
3994 ceph_encode_64(&p
, backoff
->id
);
3995 encode_hoid(&p
, end
, backoff
->begin
);
3996 encode_hoid(&p
, end
, backoff
->end
);
3999 msg
->front
.iov_len
= p
- msg
->front
.iov_base
;
4000 msg
->hdr
.version
= cpu_to_le16(1); /* MOSDBackoff v1 */
4001 msg
->hdr
.front_len
= cpu_to_le32(msg
->front
.iov_len
);
4006 static void handle_backoff_block(struct ceph_osd
*osd
, struct MOSDBackoff
*m
)
4008 struct ceph_spg_mapping
*spg
;
4009 struct ceph_osd_backoff
*backoff
;
4010 struct ceph_msg
*msg
;
4012 dout("%s osd%d spgid %llu.%xs%d id %llu\n", __func__
, osd
->o_osd
,
4013 m
->spgid
.pgid
.pool
, m
->spgid
.pgid
.seed
, m
->spgid
.shard
, m
->id
);
4015 spg
= lookup_spg_mapping(&osd
->o_backoff_mappings
, &m
->spgid
);
4017 spg
= alloc_spg_mapping();
4019 pr_err("%s failed to allocate spg\n", __func__
);
4022 spg
->spgid
= m
->spgid
; /* struct */
4023 insert_spg_mapping(&osd
->o_backoff_mappings
, spg
);
4026 backoff
= alloc_backoff();
4028 pr_err("%s failed to allocate backoff\n", __func__
);
4031 backoff
->spgid
= m
->spgid
; /* struct */
4032 backoff
->id
= m
->id
;
4033 backoff
->begin
= m
->begin
;
4034 m
->begin
= NULL
; /* backoff now owns this */
4035 backoff
->end
= m
->end
;
4036 m
->end
= NULL
; /* ditto */
4038 insert_backoff(&spg
->backoffs
, backoff
);
4039 insert_backoff_by_id(&osd
->o_backoffs_by_id
, backoff
);
4042 * Ack with original backoff's epoch so that the OSD can
4043 * discard this if there was a PG split.
4045 msg
= create_backoff_message(backoff
, m
->map_epoch
);
4047 pr_err("%s failed to allocate msg\n", __func__
);
4050 ceph_con_send(&osd
->o_con
, msg
);
4053 static bool target_contained_by(const struct ceph_osd_request_target
*t
,
4054 const struct ceph_hobject_id
*begin
,
4055 const struct ceph_hobject_id
*end
)
4057 struct ceph_hobject_id hoid
;
4060 hoid_fill_from_target(&hoid
, t
);
4061 cmp
= hoid_compare(&hoid
, begin
);
4062 return !cmp
|| (cmp
> 0 && hoid_compare(&hoid
, end
) < 0);
4065 static void handle_backoff_unblock(struct ceph_osd
*osd
,
4066 const struct MOSDBackoff
*m
)
4068 struct ceph_spg_mapping
*spg
;
4069 struct ceph_osd_backoff
*backoff
;
4072 dout("%s osd%d spgid %llu.%xs%d id %llu\n", __func__
, osd
->o_osd
,
4073 m
->spgid
.pgid
.pool
, m
->spgid
.pgid
.seed
, m
->spgid
.shard
, m
->id
);
4075 backoff
= lookup_backoff_by_id(&osd
->o_backoffs_by_id
, m
->id
);
4077 pr_err("%s osd%d spgid %llu.%xs%d id %llu backoff dne\n",
4078 __func__
, osd
->o_osd
, m
->spgid
.pgid
.pool
,
4079 m
->spgid
.pgid
.seed
, m
->spgid
.shard
, m
->id
);
4083 if (hoid_compare(backoff
->begin
, m
->begin
) &&
4084 hoid_compare(backoff
->end
, m
->end
)) {
4085 pr_err("%s osd%d spgid %llu.%xs%d id %llu bad range?\n",
4086 __func__
, osd
->o_osd
, m
->spgid
.pgid
.pool
,
4087 m
->spgid
.pgid
.seed
, m
->spgid
.shard
, m
->id
);
4088 /* unblock it anyway... */
4091 spg
= lookup_spg_mapping(&osd
->o_backoff_mappings
, &backoff
->spgid
);
4094 erase_backoff(&spg
->backoffs
, backoff
);
4095 erase_backoff_by_id(&osd
->o_backoffs_by_id
, backoff
);
4096 free_backoff(backoff
);
4098 if (RB_EMPTY_ROOT(&spg
->backoffs
)) {
4099 erase_spg_mapping(&osd
->o_backoff_mappings
, spg
);
4100 free_spg_mapping(spg
);
4103 for (n
= rb_first(&osd
->o_requests
); n
; n
= rb_next(n
)) {
4104 struct ceph_osd_request
*req
=
4105 rb_entry(n
, struct ceph_osd_request
, r_node
);
4107 if (!ceph_spg_compare(&req
->r_t
.spgid
, &m
->spgid
)) {
4109 * Match against @m, not @backoff -- the PG may
4110 * have split on the OSD.
4112 if (target_contained_by(&req
->r_t
, m
->begin
, m
->end
)) {
4114 * If no other installed backoff applies,
4123 static void handle_backoff(struct ceph_osd
*osd
, struct ceph_msg
*msg
)
4125 struct ceph_osd_client
*osdc
= osd
->o_osdc
;
4126 struct MOSDBackoff m
;
4129 down_read(&osdc
->lock
);
4130 if (!osd_registered(osd
)) {
4131 dout("%s osd%d unknown\n", __func__
, osd
->o_osd
);
4132 up_read(&osdc
->lock
);
4135 WARN_ON(osd
->o_osd
!= le64_to_cpu(msg
->hdr
.src
.num
));
4137 mutex_lock(&osd
->lock
);
4138 ret
= decode_MOSDBackoff(msg
, &m
);
4140 pr_err("failed to decode MOSDBackoff: %d\n", ret
);
4146 case CEPH_OSD_BACKOFF_OP_BLOCK
:
4147 handle_backoff_block(osd
, &m
);
4149 case CEPH_OSD_BACKOFF_OP_UNBLOCK
:
4150 handle_backoff_unblock(osd
, &m
);
4153 pr_err("%s osd%d unknown op %d\n", __func__
, osd
->o_osd
, m
.op
);
4160 mutex_unlock(&osd
->lock
);
4161 up_read(&osdc
->lock
);
4165 * Process osd watch notifications
4167 static void handle_watch_notify(struct ceph_osd_client
*osdc
,
4168 struct ceph_msg
*msg
)
4170 void *p
= msg
->front
.iov_base
;
4171 void *const end
= p
+ msg
->front
.iov_len
;
4172 struct ceph_osd_linger_request
*lreq
;
4173 struct linger_work
*lwork
;
4174 u8 proto_ver
, opcode
;
4175 u64 cookie
, notify_id
;
4176 u64 notifier_id
= 0;
4177 s32 return_code
= 0;
4178 void *payload
= NULL
;
4179 u32 payload_len
= 0;
4181 ceph_decode_8_safe(&p
, end
, proto_ver
, bad
);
4182 ceph_decode_8_safe(&p
, end
, opcode
, bad
);
4183 ceph_decode_64_safe(&p
, end
, cookie
, bad
);
4184 p
+= 8; /* skip ver */
4185 ceph_decode_64_safe(&p
, end
, notify_id
, bad
);
4187 if (proto_ver
>= 1) {
4188 ceph_decode_32_safe(&p
, end
, payload_len
, bad
);
4189 ceph_decode_need(&p
, end
, payload_len
, bad
);
4194 if (le16_to_cpu(msg
->hdr
.version
) >= 2)
4195 ceph_decode_32_safe(&p
, end
, return_code
, bad
);
4197 if (le16_to_cpu(msg
->hdr
.version
) >= 3)
4198 ceph_decode_64_safe(&p
, end
, notifier_id
, bad
);
4200 down_read(&osdc
->lock
);
4201 lreq
= lookup_linger_osdc(&osdc
->linger_requests
, cookie
);
4203 dout("%s opcode %d cookie %llu dne\n", __func__
, opcode
,
4205 goto out_unlock_osdc
;
4208 mutex_lock(&lreq
->lock
);
4209 dout("%s opcode %d cookie %llu lreq %p is_watch %d\n", __func__
,
4210 opcode
, cookie
, lreq
, lreq
->is_watch
);
4211 if (opcode
== CEPH_WATCH_EVENT_DISCONNECT
) {
4212 if (!lreq
->last_error
) {
4213 lreq
->last_error
= -ENOTCONN
;
4214 queue_watch_error(lreq
);
4216 } else if (!lreq
->is_watch
) {
4217 /* CEPH_WATCH_EVENT_NOTIFY_COMPLETE */
4218 if (lreq
->notify_id
&& lreq
->notify_id
!= notify_id
) {
4219 dout("lreq %p notify_id %llu != %llu, ignoring\n", lreq
,
4220 lreq
->notify_id
, notify_id
);
4221 } else if (!completion_done(&lreq
->notify_finish_wait
)) {
4222 struct ceph_msg_data
*data
=
4223 list_first_entry_or_null(&msg
->data
,
4224 struct ceph_msg_data
,
4228 if (lreq
->preply_pages
) {
4229 WARN_ON(data
->type
!=
4230 CEPH_MSG_DATA_PAGES
);
4231 *lreq
->preply_pages
= data
->pages
;
4232 *lreq
->preply_len
= data
->length
;
4234 ceph_release_page_vector(data
->pages
,
4235 calc_pages_for(0, data
->length
));
4238 lreq
->notify_finish_error
= return_code
;
4239 complete_all(&lreq
->notify_finish_wait
);
4242 /* CEPH_WATCH_EVENT_NOTIFY */
4243 lwork
= lwork_alloc(lreq
, do_watch_notify
);
4245 pr_err("failed to allocate notify-lwork\n");
4246 goto out_unlock_lreq
;
4249 lwork
->notify
.notify_id
= notify_id
;
4250 lwork
->notify
.notifier_id
= notifier_id
;
4251 lwork
->notify
.payload
= payload
;
4252 lwork
->notify
.payload_len
= payload_len
;
4253 lwork
->notify
.msg
= ceph_msg_get(msg
);
4258 mutex_unlock(&lreq
->lock
);
4260 up_read(&osdc
->lock
);
4264 pr_err("osdc handle_watch_notify corrupt msg\n");
4268 * Register request, send initial attempt.
4270 int ceph_osdc_start_request(struct ceph_osd_client
*osdc
,
4271 struct ceph_osd_request
*req
,
4274 down_read(&osdc
->lock
);
4275 submit_request(req
, false);
4276 up_read(&osdc
->lock
);
4280 EXPORT_SYMBOL(ceph_osdc_start_request
);
4283 * Unregister a registered request. The request is not completed:
4284 * ->r_result isn't set and __complete_request() isn't called.
4286 void ceph_osdc_cancel_request(struct ceph_osd_request
*req
)
4288 struct ceph_osd_client
*osdc
= req
->r_osdc
;
4290 down_write(&osdc
->lock
);
4292 cancel_request(req
);
4293 up_write(&osdc
->lock
);
4295 EXPORT_SYMBOL(ceph_osdc_cancel_request
);
4298 * @timeout: in jiffies, 0 means "wait forever"
4300 static int wait_request_timeout(struct ceph_osd_request
*req
,
4301 unsigned long timeout
)
4305 dout("%s req %p tid %llu\n", __func__
, req
, req
->r_tid
);
4306 left
= wait_for_completion_killable_timeout(&req
->r_completion
,
4307 ceph_timeout_jiffies(timeout
));
4309 left
= left
?: -ETIMEDOUT
;
4310 ceph_osdc_cancel_request(req
);
4312 left
= req
->r_result
; /* completed */
4319 * wait for a request to complete
4321 int ceph_osdc_wait_request(struct ceph_osd_client
*osdc
,
4322 struct ceph_osd_request
*req
)
4324 return wait_request_timeout(req
, 0);
4326 EXPORT_SYMBOL(ceph_osdc_wait_request
);
4329 * sync - wait for all in-flight requests to flush. avoid starvation.
4331 void ceph_osdc_sync(struct ceph_osd_client
*osdc
)
4333 struct rb_node
*n
, *p
;
4334 u64 last_tid
= atomic64_read(&osdc
->last_tid
);
4337 down_read(&osdc
->lock
);
4338 for (n
= rb_first(&osdc
->osds
); n
; n
= rb_next(n
)) {
4339 struct ceph_osd
*osd
= rb_entry(n
, struct ceph_osd
, o_node
);
4341 mutex_lock(&osd
->lock
);
4342 for (p
= rb_first(&osd
->o_requests
); p
; p
= rb_next(p
)) {
4343 struct ceph_osd_request
*req
=
4344 rb_entry(p
, struct ceph_osd_request
, r_node
);
4346 if (req
->r_tid
> last_tid
)
4349 if (!(req
->r_flags
& CEPH_OSD_FLAG_WRITE
))
4352 ceph_osdc_get_request(req
);
4353 mutex_unlock(&osd
->lock
);
4354 up_read(&osdc
->lock
);
4355 dout("%s waiting on req %p tid %llu last_tid %llu\n",
4356 __func__
, req
, req
->r_tid
, last_tid
);
4357 wait_for_completion(&req
->r_completion
);
4358 ceph_osdc_put_request(req
);
4362 mutex_unlock(&osd
->lock
);
4365 up_read(&osdc
->lock
);
4366 dout("%s done last_tid %llu\n", __func__
, last_tid
);
4368 EXPORT_SYMBOL(ceph_osdc_sync
);
4370 static struct ceph_osd_request
*
4371 alloc_linger_request(struct ceph_osd_linger_request
*lreq
)
4373 struct ceph_osd_request
*req
;
4375 req
= ceph_osdc_alloc_request(lreq
->osdc
, NULL
, 1, false, GFP_NOIO
);
4379 ceph_oid_copy(&req
->r_base_oid
, &lreq
->t
.base_oid
);
4380 ceph_oloc_copy(&req
->r_base_oloc
, &lreq
->t
.base_oloc
);
4382 if (ceph_osdc_alloc_messages(req
, GFP_NOIO
)) {
4383 ceph_osdc_put_request(req
);
4391 * Returns a handle, caller owns a ref.
4393 struct ceph_osd_linger_request
*
4394 ceph_osdc_watch(struct ceph_osd_client
*osdc
,
4395 struct ceph_object_id
*oid
,
4396 struct ceph_object_locator
*oloc
,
4397 rados_watchcb2_t wcb
,
4398 rados_watcherrcb_t errcb
,
4401 struct ceph_osd_linger_request
*lreq
;
4404 lreq
= linger_alloc(osdc
);
4406 return ERR_PTR(-ENOMEM
);
4408 lreq
->is_watch
= true;
4410 lreq
->errcb
= errcb
;
4412 lreq
->watch_valid_thru
= jiffies
;
4414 ceph_oid_copy(&lreq
->t
.base_oid
, oid
);
4415 ceph_oloc_copy(&lreq
->t
.base_oloc
, oloc
);
4416 lreq
->t
.flags
= CEPH_OSD_FLAG_WRITE
;
4417 ktime_get_real_ts(&lreq
->mtime
);
4419 lreq
->reg_req
= alloc_linger_request(lreq
);
4420 if (!lreq
->reg_req
) {
4425 lreq
->ping_req
= alloc_linger_request(lreq
);
4426 if (!lreq
->ping_req
) {
4431 down_write(&osdc
->lock
);
4432 linger_register(lreq
); /* before osd_req_op_* */
4433 osd_req_op_watch_init(lreq
->reg_req
, 0, lreq
->linger_id
,
4434 CEPH_OSD_WATCH_OP_WATCH
);
4435 osd_req_op_watch_init(lreq
->ping_req
, 0, lreq
->linger_id
,
4436 CEPH_OSD_WATCH_OP_PING
);
4437 linger_submit(lreq
);
4438 up_write(&osdc
->lock
);
4440 ret
= linger_reg_commit_wait(lreq
);
4442 linger_cancel(lreq
);
4450 return ERR_PTR(ret
);
4452 EXPORT_SYMBOL(ceph_osdc_watch
);
4457 * Times out after mount_timeout to preserve rbd unmap behaviour
4458 * introduced in 2894e1d76974 ("rbd: timeout watch teardown on unmap
4459 * with mount_timeout").
4461 int ceph_osdc_unwatch(struct ceph_osd_client
*osdc
,
4462 struct ceph_osd_linger_request
*lreq
)
4464 struct ceph_options
*opts
= osdc
->client
->options
;
4465 struct ceph_osd_request
*req
;
4468 req
= ceph_osdc_alloc_request(osdc
, NULL
, 1, false, GFP_NOIO
);
4472 ceph_oid_copy(&req
->r_base_oid
, &lreq
->t
.base_oid
);
4473 ceph_oloc_copy(&req
->r_base_oloc
, &lreq
->t
.base_oloc
);
4474 req
->r_flags
= CEPH_OSD_FLAG_WRITE
;
4475 ktime_get_real_ts(&req
->r_mtime
);
4476 osd_req_op_watch_init(req
, 0, lreq
->linger_id
,
4477 CEPH_OSD_WATCH_OP_UNWATCH
);
4479 ret
= ceph_osdc_alloc_messages(req
, GFP_NOIO
);
4483 ceph_osdc_start_request(osdc
, req
, false);
4484 linger_cancel(lreq
);
4486 ret
= wait_request_timeout(req
, opts
->mount_timeout
);
4489 ceph_osdc_put_request(req
);
4492 EXPORT_SYMBOL(ceph_osdc_unwatch
);
4494 static int osd_req_op_notify_ack_init(struct ceph_osd_request
*req
, int which
,
4495 u64 notify_id
, u64 cookie
, void *payload
,
4498 struct ceph_osd_req_op
*op
;
4499 struct ceph_pagelist
*pl
;
4502 op
= _osd_req_op_init(req
, which
, CEPH_OSD_OP_NOTIFY_ACK
, 0);
4504 pl
= kmalloc(sizeof(*pl
), GFP_NOIO
);
4508 ceph_pagelist_init(pl
);
4509 ret
= ceph_pagelist_encode_64(pl
, notify_id
);
4510 ret
|= ceph_pagelist_encode_64(pl
, cookie
);
4512 ret
|= ceph_pagelist_encode_32(pl
, payload_len
);
4513 ret
|= ceph_pagelist_append(pl
, payload
, payload_len
);
4515 ret
|= ceph_pagelist_encode_32(pl
, 0);
4518 ceph_pagelist_release(pl
);
4522 ceph_osd_data_pagelist_init(&op
->notify_ack
.request_data
, pl
);
4523 op
->indata_len
= pl
->length
;
4527 int ceph_osdc_notify_ack(struct ceph_osd_client
*osdc
,
4528 struct ceph_object_id
*oid
,
4529 struct ceph_object_locator
*oloc
,
4535 struct ceph_osd_request
*req
;
4538 req
= ceph_osdc_alloc_request(osdc
, NULL
, 1, false, GFP_NOIO
);
4542 ceph_oid_copy(&req
->r_base_oid
, oid
);
4543 ceph_oloc_copy(&req
->r_base_oloc
, oloc
);
4544 req
->r_flags
= CEPH_OSD_FLAG_READ
;
4546 ret
= ceph_osdc_alloc_messages(req
, GFP_NOIO
);
4550 ret
= osd_req_op_notify_ack_init(req
, 0, notify_id
, cookie
, payload
,
4555 ceph_osdc_start_request(osdc
, req
, false);
4556 ret
= ceph_osdc_wait_request(osdc
, req
);
4559 ceph_osdc_put_request(req
);
4562 EXPORT_SYMBOL(ceph_osdc_notify_ack
);
4564 static int osd_req_op_notify_init(struct ceph_osd_request
*req
, int which
,
4565 u64 cookie
, u32 prot_ver
, u32 timeout
,
4566 void *payload
, size_t payload_len
)
4568 struct ceph_osd_req_op
*op
;
4569 struct ceph_pagelist
*pl
;
4572 op
= _osd_req_op_init(req
, which
, CEPH_OSD_OP_NOTIFY
, 0);
4573 op
->notify
.cookie
= cookie
;
4575 pl
= kmalloc(sizeof(*pl
), GFP_NOIO
);
4579 ceph_pagelist_init(pl
);
4580 ret
= ceph_pagelist_encode_32(pl
, 1); /* prot_ver */
4581 ret
|= ceph_pagelist_encode_32(pl
, timeout
);
4582 ret
|= ceph_pagelist_encode_32(pl
, payload_len
);
4583 ret
|= ceph_pagelist_append(pl
, payload
, payload_len
);
4585 ceph_pagelist_release(pl
);
4589 ceph_osd_data_pagelist_init(&op
->notify
.request_data
, pl
);
4590 op
->indata_len
= pl
->length
;
4595 * @timeout: in seconds
4597 * @preply_{pages,len} are initialized both on success and error.
4598 * The caller is responsible for:
4600 * ceph_release_page_vector(reply_pages, calc_pages_for(0, reply_len))
4602 int ceph_osdc_notify(struct ceph_osd_client
*osdc
,
4603 struct ceph_object_id
*oid
,
4604 struct ceph_object_locator
*oloc
,
4608 struct page
***preply_pages
,
4611 struct ceph_osd_linger_request
*lreq
;
4612 struct page
**pages
;
4617 *preply_pages
= NULL
;
4621 lreq
= linger_alloc(osdc
);
4625 lreq
->preply_pages
= preply_pages
;
4626 lreq
->preply_len
= preply_len
;
4628 ceph_oid_copy(&lreq
->t
.base_oid
, oid
);
4629 ceph_oloc_copy(&lreq
->t
.base_oloc
, oloc
);
4630 lreq
->t
.flags
= CEPH_OSD_FLAG_READ
;
4632 lreq
->reg_req
= alloc_linger_request(lreq
);
4633 if (!lreq
->reg_req
) {
4639 pages
= ceph_alloc_page_vector(1, GFP_NOIO
);
4640 if (IS_ERR(pages
)) {
4641 ret
= PTR_ERR(pages
);
4645 down_write(&osdc
->lock
);
4646 linger_register(lreq
); /* before osd_req_op_* */
4647 ret
= osd_req_op_notify_init(lreq
->reg_req
, 0, lreq
->linger_id
, 1,
4648 timeout
, payload
, payload_len
);
4650 linger_unregister(lreq
);
4651 up_write(&osdc
->lock
);
4652 ceph_release_page_vector(pages
, 1);
4655 ceph_osd_data_pages_init(osd_req_op_data(lreq
->reg_req
, 0, notify
,
4657 pages
, PAGE_SIZE
, 0, false, true);
4658 linger_submit(lreq
);
4659 up_write(&osdc
->lock
);
4661 ret
= linger_reg_commit_wait(lreq
);
4663 ret
= linger_notify_finish_wait(lreq
);
4665 dout("lreq %p failed to initiate notify %d\n", lreq
, ret
);
4667 linger_cancel(lreq
);
4672 EXPORT_SYMBOL(ceph_osdc_notify
);
4675 * Return the number of milliseconds since the watch was last
4676 * confirmed, or an error. If there is an error, the watch is no
4677 * longer valid, and should be destroyed with ceph_osdc_unwatch().
4679 int ceph_osdc_watch_check(struct ceph_osd_client
*osdc
,
4680 struct ceph_osd_linger_request
*lreq
)
4682 unsigned long stamp
, age
;
4685 down_read(&osdc
->lock
);
4686 mutex_lock(&lreq
->lock
);
4687 stamp
= lreq
->watch_valid_thru
;
4688 if (!list_empty(&lreq
->pending_lworks
)) {
4689 struct linger_work
*lwork
=
4690 list_first_entry(&lreq
->pending_lworks
,
4694 if (time_before(lwork
->queued_stamp
, stamp
))
4695 stamp
= lwork
->queued_stamp
;
4697 age
= jiffies
- stamp
;
4698 dout("%s lreq %p linger_id %llu age %lu last_error %d\n", __func__
,
4699 lreq
, lreq
->linger_id
, age
, lreq
->last_error
);
4700 /* we are truncating to msecs, so return a safe upper bound */
4701 ret
= lreq
->last_error
?: 1 + jiffies_to_msecs(age
);
4703 mutex_unlock(&lreq
->lock
);
4704 up_read(&osdc
->lock
);
4708 static int decode_watcher(void **p
, void *end
, struct ceph_watch_item
*item
)
4714 ret
= ceph_start_decoding(p
, end
, 2, "watch_item_t",
4715 &struct_v
, &struct_len
);
4719 ceph_decode_copy(p
, &item
->name
, sizeof(item
->name
));
4720 item
->cookie
= ceph_decode_64(p
);
4721 *p
+= 4; /* skip timeout_seconds */
4722 if (struct_v
>= 2) {
4723 ceph_decode_copy(p
, &item
->addr
, sizeof(item
->addr
));
4724 ceph_decode_addr(&item
->addr
);
4727 dout("%s %s%llu cookie %llu addr %s\n", __func__
,
4728 ENTITY_NAME(item
->name
), item
->cookie
,
4729 ceph_pr_addr(&item
->addr
.in_addr
));
4733 static int decode_watchers(void **p
, void *end
,
4734 struct ceph_watch_item
**watchers
,
4742 ret
= ceph_start_decoding(p
, end
, 1, "obj_list_watch_response_t",
4743 &struct_v
, &struct_len
);
4747 *num_watchers
= ceph_decode_32(p
);
4748 *watchers
= kcalloc(*num_watchers
, sizeof(**watchers
), GFP_NOIO
);
4752 for (i
= 0; i
< *num_watchers
; i
++) {
4753 ret
= decode_watcher(p
, end
, *watchers
+ i
);
4764 * On success, the caller is responsible for:
4768 int ceph_osdc_list_watchers(struct ceph_osd_client
*osdc
,
4769 struct ceph_object_id
*oid
,
4770 struct ceph_object_locator
*oloc
,
4771 struct ceph_watch_item
**watchers
,
4774 struct ceph_osd_request
*req
;
4775 struct page
**pages
;
4778 req
= ceph_osdc_alloc_request(osdc
, NULL
, 1, false, GFP_NOIO
);
4782 ceph_oid_copy(&req
->r_base_oid
, oid
);
4783 ceph_oloc_copy(&req
->r_base_oloc
, oloc
);
4784 req
->r_flags
= CEPH_OSD_FLAG_READ
;
4786 ret
= ceph_osdc_alloc_messages(req
, GFP_NOIO
);
4790 pages
= ceph_alloc_page_vector(1, GFP_NOIO
);
4791 if (IS_ERR(pages
)) {
4792 ret
= PTR_ERR(pages
);
4796 osd_req_op_init(req
, 0, CEPH_OSD_OP_LIST_WATCHERS
, 0);
4797 ceph_osd_data_pages_init(osd_req_op_data(req
, 0, list_watchers
,
4799 pages
, PAGE_SIZE
, 0, false, true);
4801 ceph_osdc_start_request(osdc
, req
, false);
4802 ret
= ceph_osdc_wait_request(osdc
, req
);
4804 void *p
= page_address(pages
[0]);
4805 void *const end
= p
+ req
->r_ops
[0].outdata_len
;
4807 ret
= decode_watchers(&p
, end
, watchers
, num_watchers
);
4811 ceph_osdc_put_request(req
);
4814 EXPORT_SYMBOL(ceph_osdc_list_watchers
);
4817 * Call all pending notify callbacks - for use after a watch is
4818 * unregistered, to make sure no more callbacks for it will be invoked
4820 void ceph_osdc_flush_notifies(struct ceph_osd_client
*osdc
)
4822 dout("%s osdc %p\n", __func__
, osdc
);
4823 flush_workqueue(osdc
->notify_wq
);
4825 EXPORT_SYMBOL(ceph_osdc_flush_notifies
);
4827 void ceph_osdc_maybe_request_map(struct ceph_osd_client
*osdc
)
4829 down_read(&osdc
->lock
);
4830 maybe_request_map(osdc
);
4831 up_read(&osdc
->lock
);
4833 EXPORT_SYMBOL(ceph_osdc_maybe_request_map
);
4836 * Execute an OSD class method on an object.
4838 * @flags: CEPH_OSD_FLAG_*
4839 * @resp_len: in/out param for reply length
4841 int ceph_osdc_call(struct ceph_osd_client
*osdc
,
4842 struct ceph_object_id
*oid
,
4843 struct ceph_object_locator
*oloc
,
4844 const char *class, const char *method
,
4846 struct page
*req_page
, size_t req_len
,
4847 struct page
*resp_page
, size_t *resp_len
)
4849 struct ceph_osd_request
*req
;
4852 if (req_len
> PAGE_SIZE
|| (resp_page
&& *resp_len
> PAGE_SIZE
))
4855 req
= ceph_osdc_alloc_request(osdc
, NULL
, 1, false, GFP_NOIO
);
4859 ceph_oid_copy(&req
->r_base_oid
, oid
);
4860 ceph_oloc_copy(&req
->r_base_oloc
, oloc
);
4861 req
->r_flags
= flags
;
4863 ret
= ceph_osdc_alloc_messages(req
, GFP_NOIO
);
4867 osd_req_op_cls_init(req
, 0, CEPH_OSD_OP_CALL
, class, method
);
4869 osd_req_op_cls_request_data_pages(req
, 0, &req_page
, req_len
,
4872 osd_req_op_cls_response_data_pages(req
, 0, &resp_page
,
4873 *resp_len
, 0, false, false);
4875 ceph_osdc_start_request(osdc
, req
, false);
4876 ret
= ceph_osdc_wait_request(osdc
, req
);
4878 ret
= req
->r_ops
[0].rval
;
4880 *resp_len
= req
->r_ops
[0].outdata_len
;
4884 ceph_osdc_put_request(req
);
4887 EXPORT_SYMBOL(ceph_osdc_call
);
4892 int ceph_osdc_init(struct ceph_osd_client
*osdc
, struct ceph_client
*client
)
4897 osdc
->client
= client
;
4898 init_rwsem(&osdc
->lock
);
4899 osdc
->osds
= RB_ROOT
;
4900 INIT_LIST_HEAD(&osdc
->osd_lru
);
4901 spin_lock_init(&osdc
->osd_lru_lock
);
4902 osd_init(&osdc
->homeless_osd
);
4903 osdc
->homeless_osd
.o_osdc
= osdc
;
4904 osdc
->homeless_osd
.o_osd
= CEPH_HOMELESS_OSD
;
4905 osdc
->last_linger_id
= CEPH_LINGER_ID_START
;
4906 osdc
->linger_requests
= RB_ROOT
;
4907 osdc
->map_checks
= RB_ROOT
;
4908 osdc
->linger_map_checks
= RB_ROOT
;
4909 INIT_DELAYED_WORK(&osdc
->timeout_work
, handle_timeout
);
4910 INIT_DELAYED_WORK(&osdc
->osds_timeout_work
, handle_osds_timeout
);
4913 osdc
->osdmap
= ceph_osdmap_alloc();
4917 osdc
->req_mempool
= mempool_create_slab_pool(10,
4918 ceph_osd_request_cache
);
4919 if (!osdc
->req_mempool
)
4922 err
= ceph_msgpool_init(&osdc
->msgpool_op
, CEPH_MSG_OSD_OP
,
4923 PAGE_SIZE
, 10, true, "osd_op");
4926 err
= ceph_msgpool_init(&osdc
->msgpool_op_reply
, CEPH_MSG_OSD_OPREPLY
,
4927 PAGE_SIZE
, 10, true, "osd_op_reply");
4932 osdc
->notify_wq
= create_singlethread_workqueue("ceph-watch-notify");
4933 if (!osdc
->notify_wq
)
4934 goto out_msgpool_reply
;
4936 schedule_delayed_work(&osdc
->timeout_work
,
4937 osdc
->client
->options
->osd_keepalive_timeout
);
4938 schedule_delayed_work(&osdc
->osds_timeout_work
,
4939 round_jiffies_relative(osdc
->client
->options
->osd_idle_ttl
));
4944 ceph_msgpool_destroy(&osdc
->msgpool_op_reply
);
4946 ceph_msgpool_destroy(&osdc
->msgpool_op
);
4948 mempool_destroy(osdc
->req_mempool
);
4950 ceph_osdmap_destroy(osdc
->osdmap
);
4955 void ceph_osdc_stop(struct ceph_osd_client
*osdc
)
4957 flush_workqueue(osdc
->notify_wq
);
4958 destroy_workqueue(osdc
->notify_wq
);
4959 cancel_delayed_work_sync(&osdc
->timeout_work
);
4960 cancel_delayed_work_sync(&osdc
->osds_timeout_work
);
4962 down_write(&osdc
->lock
);
4963 while (!RB_EMPTY_ROOT(&osdc
->osds
)) {
4964 struct ceph_osd
*osd
= rb_entry(rb_first(&osdc
->osds
),
4965 struct ceph_osd
, o_node
);
4968 up_write(&osdc
->lock
);
4969 WARN_ON(refcount_read(&osdc
->homeless_osd
.o_ref
) != 1);
4970 osd_cleanup(&osdc
->homeless_osd
);
4972 WARN_ON(!list_empty(&osdc
->osd_lru
));
4973 WARN_ON(!RB_EMPTY_ROOT(&osdc
->linger_requests
));
4974 WARN_ON(!RB_EMPTY_ROOT(&osdc
->map_checks
));
4975 WARN_ON(!RB_EMPTY_ROOT(&osdc
->linger_map_checks
));
4976 WARN_ON(atomic_read(&osdc
->num_requests
));
4977 WARN_ON(atomic_read(&osdc
->num_homeless
));
4979 ceph_osdmap_destroy(osdc
->osdmap
);
4980 mempool_destroy(osdc
->req_mempool
);
4981 ceph_msgpool_destroy(&osdc
->msgpool_op
);
4982 ceph_msgpool_destroy(&osdc
->msgpool_op_reply
);
4986 * Read some contiguous pages. If we cross a stripe boundary, shorten
4987 * *plen. Return number of bytes read, or error.
4989 int ceph_osdc_readpages(struct ceph_osd_client
*osdc
,
4990 struct ceph_vino vino
, struct ceph_file_layout
*layout
,
4992 u32 truncate_seq
, u64 truncate_size
,
4993 struct page
**pages
, int num_pages
, int page_align
)
4995 struct ceph_osd_request
*req
;
4998 dout("readpages on ino %llx.%llx on %llu~%llu\n", vino
.ino
,
4999 vino
.snap
, off
, *plen
);
5000 req
= ceph_osdc_new_request(osdc
, layout
, vino
, off
, plen
, 0, 1,
5001 CEPH_OSD_OP_READ
, CEPH_OSD_FLAG_READ
,
5002 NULL
, truncate_seq
, truncate_size
,
5005 return PTR_ERR(req
);
5007 /* it may be a short read due to an object boundary */
5008 osd_req_op_extent_osd_data_pages(req
, 0,
5009 pages
, *plen
, page_align
, false, false);
5011 dout("readpages final extent is %llu~%llu (%llu bytes align %d)\n",
5012 off
, *plen
, *plen
, page_align
);
5014 rc
= ceph_osdc_start_request(osdc
, req
, false);
5016 rc
= ceph_osdc_wait_request(osdc
, req
);
5018 ceph_osdc_put_request(req
);
5019 dout("readpages result %d\n", rc
);
5022 EXPORT_SYMBOL(ceph_osdc_readpages
);
5025 * do a synchronous write on N pages
5027 int ceph_osdc_writepages(struct ceph_osd_client
*osdc
, struct ceph_vino vino
,
5028 struct ceph_file_layout
*layout
,
5029 struct ceph_snap_context
*snapc
,
5031 u32 truncate_seq
, u64 truncate_size
,
5032 struct timespec
*mtime
,
5033 struct page
**pages
, int num_pages
)
5035 struct ceph_osd_request
*req
;
5037 int page_align
= off
& ~PAGE_MASK
;
5039 req
= ceph_osdc_new_request(osdc
, layout
, vino
, off
, &len
, 0, 1,
5040 CEPH_OSD_OP_WRITE
, CEPH_OSD_FLAG_WRITE
,
5041 snapc
, truncate_seq
, truncate_size
,
5044 return PTR_ERR(req
);
5046 /* it may be a short write due to an object boundary */
5047 osd_req_op_extent_osd_data_pages(req
, 0, pages
, len
, page_align
,
5049 dout("writepages %llu~%llu (%llu bytes)\n", off
, len
, len
);
5051 req
->r_mtime
= *mtime
;
5052 rc
= ceph_osdc_start_request(osdc
, req
, true);
5054 rc
= ceph_osdc_wait_request(osdc
, req
);
5056 ceph_osdc_put_request(req
);
5059 dout("writepages result %d\n", rc
);
5062 EXPORT_SYMBOL(ceph_osdc_writepages
);
5064 int ceph_osdc_setup(void)
5066 size_t size
= sizeof(struct ceph_osd_request
) +
5067 CEPH_OSD_SLAB_OPS
* sizeof(struct ceph_osd_req_op
);
5069 BUG_ON(ceph_osd_request_cache
);
5070 ceph_osd_request_cache
= kmem_cache_create("ceph_osd_request", size
,
5073 return ceph_osd_request_cache
? 0 : -ENOMEM
;
5075 EXPORT_SYMBOL(ceph_osdc_setup
);
5077 void ceph_osdc_cleanup(void)
5079 BUG_ON(!ceph_osd_request_cache
);
5080 kmem_cache_destroy(ceph_osd_request_cache
);
5081 ceph_osd_request_cache
= NULL
;
5083 EXPORT_SYMBOL(ceph_osdc_cleanup
);
5086 * handle incoming message
5088 static void dispatch(struct ceph_connection
*con
, struct ceph_msg
*msg
)
5090 struct ceph_osd
*osd
= con
->private;
5091 struct ceph_osd_client
*osdc
= osd
->o_osdc
;
5092 int type
= le16_to_cpu(msg
->hdr
.type
);
5095 case CEPH_MSG_OSD_MAP
:
5096 ceph_osdc_handle_map(osdc
, msg
);
5098 case CEPH_MSG_OSD_OPREPLY
:
5099 handle_reply(osd
, msg
);
5101 case CEPH_MSG_OSD_BACKOFF
:
5102 handle_backoff(osd
, msg
);
5104 case CEPH_MSG_WATCH_NOTIFY
:
5105 handle_watch_notify(osdc
, msg
);
5109 pr_err("received unknown message type %d %s\n", type
,
5110 ceph_msg_type_name(type
));
5117 * Lookup and return message for incoming reply. Don't try to do
5118 * anything about a larger than preallocated data portion of the
5119 * message at the moment - for now, just skip the message.
5121 static struct ceph_msg
*get_reply(struct ceph_connection
*con
,
5122 struct ceph_msg_header
*hdr
,
5125 struct ceph_osd
*osd
= con
->private;
5126 struct ceph_osd_client
*osdc
= osd
->o_osdc
;
5127 struct ceph_msg
*m
= NULL
;
5128 struct ceph_osd_request
*req
;
5129 int front_len
= le32_to_cpu(hdr
->front_len
);
5130 int data_len
= le32_to_cpu(hdr
->data_len
);
5131 u64 tid
= le64_to_cpu(hdr
->tid
);
5133 down_read(&osdc
->lock
);
5134 if (!osd_registered(osd
)) {
5135 dout("%s osd%d unknown, skipping\n", __func__
, osd
->o_osd
);
5137 goto out_unlock_osdc
;
5139 WARN_ON(osd
->o_osd
!= le64_to_cpu(hdr
->src
.num
));
5141 mutex_lock(&osd
->lock
);
5142 req
= lookup_request(&osd
->o_requests
, tid
);
5144 dout("%s osd%d tid %llu unknown, skipping\n", __func__
,
5147 goto out_unlock_session
;
5150 ceph_msg_revoke_incoming(req
->r_reply
);
5152 if (front_len
> req
->r_reply
->front_alloc_len
) {
5153 pr_warn("%s osd%d tid %llu front %d > preallocated %d\n",
5154 __func__
, osd
->o_osd
, req
->r_tid
, front_len
,
5155 req
->r_reply
->front_alloc_len
);
5156 m
= ceph_msg_new(CEPH_MSG_OSD_OPREPLY
, front_len
, GFP_NOFS
,
5159 goto out_unlock_session
;
5160 ceph_msg_put(req
->r_reply
);
5164 if (data_len
> req
->r_reply
->data_length
) {
5165 pr_warn("%s osd%d tid %llu data %d > preallocated %zu, skipping\n",
5166 __func__
, osd
->o_osd
, req
->r_tid
, data_len
,
5167 req
->r_reply
->data_length
);
5170 goto out_unlock_session
;
5173 m
= ceph_msg_get(req
->r_reply
);
5174 dout("get_reply tid %lld %p\n", tid
, m
);
5177 mutex_unlock(&osd
->lock
);
5179 up_read(&osdc
->lock
);
5184 * TODO: switch to a msg-owned pagelist
5186 static struct ceph_msg
*alloc_msg_with_page_vector(struct ceph_msg_header
*hdr
)
5189 int type
= le16_to_cpu(hdr
->type
);
5190 u32 front_len
= le32_to_cpu(hdr
->front_len
);
5191 u32 data_len
= le32_to_cpu(hdr
->data_len
);
5193 m
= ceph_msg_new(type
, front_len
, GFP_NOIO
, false);
5198 struct page
**pages
;
5199 struct ceph_osd_data osd_data
;
5201 pages
= ceph_alloc_page_vector(calc_pages_for(0, data_len
),
5203 if (IS_ERR(pages
)) {
5208 ceph_osd_data_pages_init(&osd_data
, pages
, data_len
, 0, false,
5210 ceph_osdc_msg_data_add(m
, &osd_data
);
5216 static struct ceph_msg
*alloc_msg(struct ceph_connection
*con
,
5217 struct ceph_msg_header
*hdr
,
5220 struct ceph_osd
*osd
= con
->private;
5221 int type
= le16_to_cpu(hdr
->type
);
5225 case CEPH_MSG_OSD_MAP
:
5226 case CEPH_MSG_OSD_BACKOFF
:
5227 case CEPH_MSG_WATCH_NOTIFY
:
5228 return alloc_msg_with_page_vector(hdr
);
5229 case CEPH_MSG_OSD_OPREPLY
:
5230 return get_reply(con
, hdr
, skip
);
5232 pr_warn("%s osd%d unknown msg type %d, skipping\n", __func__
,
5240 * Wrappers to refcount containing ceph_osd struct
5242 static struct ceph_connection
*get_osd_con(struct ceph_connection
*con
)
5244 struct ceph_osd
*osd
= con
->private;
5250 static void put_osd_con(struct ceph_connection
*con
)
5252 struct ceph_osd
*osd
= con
->private;
5260 * Note: returned pointer is the address of a structure that's
5261 * managed separately. Caller must *not* attempt to free it.
5263 static struct ceph_auth_handshake
*get_authorizer(struct ceph_connection
*con
,
5264 int *proto
, int force_new
)
5266 struct ceph_osd
*o
= con
->private;
5267 struct ceph_osd_client
*osdc
= o
->o_osdc
;
5268 struct ceph_auth_client
*ac
= osdc
->client
->monc
.auth
;
5269 struct ceph_auth_handshake
*auth
= &o
->o_auth
;
5271 if (force_new
&& auth
->authorizer
) {
5272 ceph_auth_destroy_authorizer(auth
->authorizer
);
5273 auth
->authorizer
= NULL
;
5275 if (!auth
->authorizer
) {
5276 int ret
= ceph_auth_create_authorizer(ac
, CEPH_ENTITY_TYPE_OSD
,
5279 return ERR_PTR(ret
);
5281 int ret
= ceph_auth_update_authorizer(ac
, CEPH_ENTITY_TYPE_OSD
,
5284 return ERR_PTR(ret
);
5286 *proto
= ac
->protocol
;
5292 static int verify_authorizer_reply(struct ceph_connection
*con
)
5294 struct ceph_osd
*o
= con
->private;
5295 struct ceph_osd_client
*osdc
= o
->o_osdc
;
5296 struct ceph_auth_client
*ac
= osdc
->client
->monc
.auth
;
5298 return ceph_auth_verify_authorizer_reply(ac
, o
->o_auth
.authorizer
);
5301 static int invalidate_authorizer(struct ceph_connection
*con
)
5303 struct ceph_osd
*o
= con
->private;
5304 struct ceph_osd_client
*osdc
= o
->o_osdc
;
5305 struct ceph_auth_client
*ac
= osdc
->client
->monc
.auth
;
5307 ceph_auth_invalidate_authorizer(ac
, CEPH_ENTITY_TYPE_OSD
);
5308 return ceph_monc_validate_auth(&osdc
->client
->monc
);
5311 static void osd_reencode_message(struct ceph_msg
*msg
)
5313 int type
= le16_to_cpu(msg
->hdr
.type
);
5315 if (type
== CEPH_MSG_OSD_OP
)
5316 encode_request_finish(msg
);
5319 static int osd_sign_message(struct ceph_msg
*msg
)
5321 struct ceph_osd
*o
= msg
->con
->private;
5322 struct ceph_auth_handshake
*auth
= &o
->o_auth
;
5324 return ceph_auth_sign_message(auth
, msg
);
5327 static int osd_check_message_signature(struct ceph_msg
*msg
)
5329 struct ceph_osd
*o
= msg
->con
->private;
5330 struct ceph_auth_handshake
*auth
= &o
->o_auth
;
5332 return ceph_auth_check_message_signature(auth
, msg
);
5335 static const struct ceph_connection_operations osd_con_ops
= {
5338 .dispatch
= dispatch
,
5339 .get_authorizer
= get_authorizer
,
5340 .verify_authorizer_reply
= verify_authorizer_reply
,
5341 .invalidate_authorizer
= invalidate_authorizer
,
5342 .alloc_msg
= alloc_msg
,
5343 .reencode_message
= osd_reencode_message
,
5344 .sign_message
= osd_sign_message
,
5345 .check_message_signature
= osd_check_message_signature
,