2 #include <linux/ceph/ceph_debug.h>
4 #include <linux/module.h>
6 #include <linux/highmem.h>
8 #include <linux/pagemap.h>
9 #include <linux/slab.h>
10 #include <linux/uaccess.h>
12 #include <linux/bio.h>
15 #include <linux/ceph/libceph.h>
16 #include <linux/ceph/osd_client.h>
17 #include <linux/ceph/messenger.h>
18 #include <linux/ceph/decode.h>
19 #include <linux/ceph/auth.h>
20 #include <linux/ceph/pagelist.h>
22 #define OSD_OPREPLY_FRONT_LEN 512
24 static struct kmem_cache
*ceph_osd_request_cache
;
26 static const struct ceph_connection_operations osd_con_ops
;
29 * Implement client access to distributed object storage cluster.
31 * All data objects are stored within a cluster/cloud of OSDs, or
32 * "object storage devices." (Note that Ceph OSDs have _nothing_ to
33 * do with the T10 OSD extensions to SCSI.) Ceph OSDs are simply
34 * remote daemons serving up and coordinating consistent and safe
37 * Cluster membership and the mapping of data objects onto storage devices
38 * are described by the osd map.
40 * We keep track of pending OSD requests (read, write), resubmit
41 * requests to different OSDs when the cluster topology/data layout
42 * change, or retry the affected requests when the communications
43 * channel with an OSD is reset.
46 static void link_request(struct ceph_osd
*osd
, struct ceph_osd_request
*req
);
47 static void unlink_request(struct ceph_osd
*osd
, struct ceph_osd_request
*req
);
48 static void link_linger(struct ceph_osd
*osd
,
49 struct ceph_osd_linger_request
*lreq
);
50 static void unlink_linger(struct ceph_osd
*osd
,
51 struct ceph_osd_linger_request
*lreq
);
54 static inline bool rwsem_is_wrlocked(struct rw_semaphore
*sem
)
58 if (unlikely(down_read_trylock(sem
))) {
65 static inline void verify_osdc_locked(struct ceph_osd_client
*osdc
)
67 WARN_ON(!rwsem_is_locked(&osdc
->lock
));
69 static inline void verify_osdc_wrlocked(struct ceph_osd_client
*osdc
)
71 WARN_ON(!rwsem_is_wrlocked(&osdc
->lock
));
73 static inline void verify_osd_locked(struct ceph_osd
*osd
)
75 struct ceph_osd_client
*osdc
= osd
->o_osdc
;
77 WARN_ON(!(mutex_is_locked(&osd
->lock
) &&
78 rwsem_is_locked(&osdc
->lock
)) &&
79 !rwsem_is_wrlocked(&osdc
->lock
));
81 static inline void verify_lreq_locked(struct ceph_osd_linger_request
*lreq
)
83 WARN_ON(!mutex_is_locked(&lreq
->lock
));
86 static inline void verify_osdc_locked(struct ceph_osd_client
*osdc
) { }
87 static inline void verify_osdc_wrlocked(struct ceph_osd_client
*osdc
) { }
88 static inline void verify_osd_locked(struct ceph_osd
*osd
) { }
89 static inline void verify_lreq_locked(struct ceph_osd_linger_request
*lreq
) { }
93 * calculate the mapping of a file extent onto an object, and fill out the
94 * request accordingly. shorten extent as necessary if it crosses an
97 * fill osd op in request message.
99 static int calc_layout(struct ceph_file_layout
*layout
, u64 off
, u64
*plen
,
100 u64
*objnum
, u64
*objoff
, u64
*objlen
)
102 u64 orig_len
= *plen
;
106 r
= ceph_calc_file_object_mapping(layout
, off
, orig_len
, objnum
,
110 if (*objlen
< orig_len
) {
112 dout(" skipping last %llu, final file extent %llu~%llu\n",
113 orig_len
- *plen
, off
, *plen
);
116 dout("calc_layout objnum=%llx %llu~%llu\n", *objnum
, *objoff
, *objlen
);
121 static void ceph_osd_data_init(struct ceph_osd_data
*osd_data
)
123 memset(osd_data
, 0, sizeof (*osd_data
));
124 osd_data
->type
= CEPH_OSD_DATA_TYPE_NONE
;
127 static void ceph_osd_data_pages_init(struct ceph_osd_data
*osd_data
,
128 struct page
**pages
, u64 length
, u32 alignment
,
129 bool pages_from_pool
, bool own_pages
)
131 osd_data
->type
= CEPH_OSD_DATA_TYPE_PAGES
;
132 osd_data
->pages
= pages
;
133 osd_data
->length
= length
;
134 osd_data
->alignment
= alignment
;
135 osd_data
->pages_from_pool
= pages_from_pool
;
136 osd_data
->own_pages
= own_pages
;
139 static void ceph_osd_data_pagelist_init(struct ceph_osd_data
*osd_data
,
140 struct ceph_pagelist
*pagelist
)
142 osd_data
->type
= CEPH_OSD_DATA_TYPE_PAGELIST
;
143 osd_data
->pagelist
= pagelist
;
147 static void ceph_osd_data_bio_init(struct ceph_osd_data
*osd_data
,
148 struct bio
*bio
, size_t bio_length
)
150 osd_data
->type
= CEPH_OSD_DATA_TYPE_BIO
;
152 osd_data
->bio_length
= bio_length
;
154 #endif /* CONFIG_BLOCK */
156 #define osd_req_op_data(oreq, whch, typ, fld) \
158 struct ceph_osd_request *__oreq = (oreq); \
159 unsigned int __whch = (whch); \
160 BUG_ON(__whch >= __oreq->r_num_ops); \
161 &__oreq->r_ops[__whch].typ.fld; \
164 static struct ceph_osd_data
*
165 osd_req_op_raw_data_in(struct ceph_osd_request
*osd_req
, unsigned int which
)
167 BUG_ON(which
>= osd_req
->r_num_ops
);
169 return &osd_req
->r_ops
[which
].raw_data_in
;
172 struct ceph_osd_data
*
173 osd_req_op_extent_osd_data(struct ceph_osd_request
*osd_req
,
176 return osd_req_op_data(osd_req
, which
, extent
, osd_data
);
178 EXPORT_SYMBOL(osd_req_op_extent_osd_data
);
180 void osd_req_op_raw_data_in_pages(struct ceph_osd_request
*osd_req
,
181 unsigned int which
, struct page
**pages
,
182 u64 length
, u32 alignment
,
183 bool pages_from_pool
, bool own_pages
)
185 struct ceph_osd_data
*osd_data
;
187 osd_data
= osd_req_op_raw_data_in(osd_req
, which
);
188 ceph_osd_data_pages_init(osd_data
, pages
, length
, alignment
,
189 pages_from_pool
, own_pages
);
191 EXPORT_SYMBOL(osd_req_op_raw_data_in_pages
);
193 void osd_req_op_extent_osd_data_pages(struct ceph_osd_request
*osd_req
,
194 unsigned int which
, struct page
**pages
,
195 u64 length
, u32 alignment
,
196 bool pages_from_pool
, bool own_pages
)
198 struct ceph_osd_data
*osd_data
;
200 osd_data
= osd_req_op_data(osd_req
, which
, extent
, osd_data
);
201 ceph_osd_data_pages_init(osd_data
, pages
, length
, alignment
,
202 pages_from_pool
, own_pages
);
204 EXPORT_SYMBOL(osd_req_op_extent_osd_data_pages
);
206 void osd_req_op_extent_osd_data_pagelist(struct ceph_osd_request
*osd_req
,
207 unsigned int which
, struct ceph_pagelist
*pagelist
)
209 struct ceph_osd_data
*osd_data
;
211 osd_data
= osd_req_op_data(osd_req
, which
, extent
, osd_data
);
212 ceph_osd_data_pagelist_init(osd_data
, pagelist
);
214 EXPORT_SYMBOL(osd_req_op_extent_osd_data_pagelist
);
217 void osd_req_op_extent_osd_data_bio(struct ceph_osd_request
*osd_req
,
218 unsigned int which
, struct bio
*bio
, size_t bio_length
)
220 struct ceph_osd_data
*osd_data
;
222 osd_data
= osd_req_op_data(osd_req
, which
, extent
, osd_data
);
223 ceph_osd_data_bio_init(osd_data
, bio
, bio_length
);
225 EXPORT_SYMBOL(osd_req_op_extent_osd_data_bio
);
226 #endif /* CONFIG_BLOCK */
228 static void osd_req_op_cls_request_info_pagelist(
229 struct ceph_osd_request
*osd_req
,
230 unsigned int which
, struct ceph_pagelist
*pagelist
)
232 struct ceph_osd_data
*osd_data
;
234 osd_data
= osd_req_op_data(osd_req
, which
, cls
, request_info
);
235 ceph_osd_data_pagelist_init(osd_data
, pagelist
);
238 void osd_req_op_cls_request_data_pagelist(
239 struct ceph_osd_request
*osd_req
,
240 unsigned int which
, struct ceph_pagelist
*pagelist
)
242 struct ceph_osd_data
*osd_data
;
244 osd_data
= osd_req_op_data(osd_req
, which
, cls
, request_data
);
245 ceph_osd_data_pagelist_init(osd_data
, pagelist
);
246 osd_req
->r_ops
[which
].cls
.indata_len
+= pagelist
->length
;
247 osd_req
->r_ops
[which
].indata_len
+= pagelist
->length
;
249 EXPORT_SYMBOL(osd_req_op_cls_request_data_pagelist
);
251 void osd_req_op_cls_request_data_pages(struct ceph_osd_request
*osd_req
,
252 unsigned int which
, struct page
**pages
, u64 length
,
253 u32 alignment
, bool pages_from_pool
, bool own_pages
)
255 struct ceph_osd_data
*osd_data
;
257 osd_data
= osd_req_op_data(osd_req
, which
, cls
, request_data
);
258 ceph_osd_data_pages_init(osd_data
, pages
, length
, alignment
,
259 pages_from_pool
, own_pages
);
260 osd_req
->r_ops
[which
].cls
.indata_len
+= length
;
261 osd_req
->r_ops
[which
].indata_len
+= length
;
263 EXPORT_SYMBOL(osd_req_op_cls_request_data_pages
);
265 void osd_req_op_cls_response_data_pages(struct ceph_osd_request
*osd_req
,
266 unsigned int which
, struct page
**pages
, u64 length
,
267 u32 alignment
, bool pages_from_pool
, bool own_pages
)
269 struct ceph_osd_data
*osd_data
;
271 osd_data
= osd_req_op_data(osd_req
, which
, cls
, response_data
);
272 ceph_osd_data_pages_init(osd_data
, pages
, length
, alignment
,
273 pages_from_pool
, own_pages
);
275 EXPORT_SYMBOL(osd_req_op_cls_response_data_pages
);
277 static u64
ceph_osd_data_length(struct ceph_osd_data
*osd_data
)
279 switch (osd_data
->type
) {
280 case CEPH_OSD_DATA_TYPE_NONE
:
282 case CEPH_OSD_DATA_TYPE_PAGES
:
283 return osd_data
->length
;
284 case CEPH_OSD_DATA_TYPE_PAGELIST
:
285 return (u64
)osd_data
->pagelist
->length
;
287 case CEPH_OSD_DATA_TYPE_BIO
:
288 return (u64
)osd_data
->bio_length
;
289 #endif /* CONFIG_BLOCK */
291 WARN(true, "unrecognized data type %d\n", (int)osd_data
->type
);
296 static void ceph_osd_data_release(struct ceph_osd_data
*osd_data
)
298 if (osd_data
->type
== CEPH_OSD_DATA_TYPE_PAGES
&& osd_data
->own_pages
) {
301 num_pages
= calc_pages_for((u64
)osd_data
->alignment
,
302 (u64
)osd_data
->length
);
303 ceph_release_page_vector(osd_data
->pages
, num_pages
);
305 ceph_osd_data_init(osd_data
);
308 static void osd_req_op_data_release(struct ceph_osd_request
*osd_req
,
311 struct ceph_osd_req_op
*op
;
313 BUG_ON(which
>= osd_req
->r_num_ops
);
314 op
= &osd_req
->r_ops
[which
];
317 case CEPH_OSD_OP_READ
:
318 case CEPH_OSD_OP_WRITE
:
319 case CEPH_OSD_OP_WRITEFULL
:
320 ceph_osd_data_release(&op
->extent
.osd_data
);
322 case CEPH_OSD_OP_CALL
:
323 ceph_osd_data_release(&op
->cls
.request_info
);
324 ceph_osd_data_release(&op
->cls
.request_data
);
325 ceph_osd_data_release(&op
->cls
.response_data
);
327 case CEPH_OSD_OP_SETXATTR
:
328 case CEPH_OSD_OP_CMPXATTR
:
329 ceph_osd_data_release(&op
->xattr
.osd_data
);
331 case CEPH_OSD_OP_STAT
:
332 ceph_osd_data_release(&op
->raw_data_in
);
334 case CEPH_OSD_OP_NOTIFY_ACK
:
335 ceph_osd_data_release(&op
->notify_ack
.request_data
);
337 case CEPH_OSD_OP_NOTIFY
:
338 ceph_osd_data_release(&op
->notify
.request_data
);
339 ceph_osd_data_release(&op
->notify
.response_data
);
341 case CEPH_OSD_OP_LIST_WATCHERS
:
342 ceph_osd_data_release(&op
->list_watchers
.response_data
);
350 * Assumes @t is zero-initialized.
352 static void target_init(struct ceph_osd_request_target
*t
)
354 ceph_oid_init(&t
->base_oid
);
355 ceph_oloc_init(&t
->base_oloc
);
356 ceph_oid_init(&t
->target_oid
);
357 ceph_oloc_init(&t
->target_oloc
);
359 ceph_osds_init(&t
->acting
);
360 ceph_osds_init(&t
->up
);
364 t
->osd
= CEPH_HOMELESS_OSD
;
367 static void target_copy(struct ceph_osd_request_target
*dest
,
368 const struct ceph_osd_request_target
*src
)
370 ceph_oid_copy(&dest
->base_oid
, &src
->base_oid
);
371 ceph_oloc_copy(&dest
->base_oloc
, &src
->base_oloc
);
372 ceph_oid_copy(&dest
->target_oid
, &src
->target_oid
);
373 ceph_oloc_copy(&dest
->target_oloc
, &src
->target_oloc
);
375 dest
->pgid
= src
->pgid
; /* struct */
376 dest
->pg_num
= src
->pg_num
;
377 dest
->pg_num_mask
= src
->pg_num_mask
;
378 ceph_osds_copy(&dest
->acting
, &src
->acting
);
379 ceph_osds_copy(&dest
->up
, &src
->up
);
380 dest
->size
= src
->size
;
381 dest
->min_size
= src
->min_size
;
382 dest
->sort_bitwise
= src
->sort_bitwise
;
384 dest
->flags
= src
->flags
;
385 dest
->paused
= src
->paused
;
387 dest
->osd
= src
->osd
;
390 static void target_destroy(struct ceph_osd_request_target
*t
)
392 ceph_oid_destroy(&t
->base_oid
);
393 ceph_oloc_destroy(&t
->base_oloc
);
394 ceph_oid_destroy(&t
->target_oid
);
395 ceph_oloc_destroy(&t
->target_oloc
);
401 static void request_release_checks(struct ceph_osd_request
*req
)
403 WARN_ON(!RB_EMPTY_NODE(&req
->r_node
));
404 WARN_ON(!RB_EMPTY_NODE(&req
->r_mc_node
));
405 WARN_ON(!list_empty(&req
->r_unsafe_item
));
409 static void ceph_osdc_release_request(struct kref
*kref
)
411 struct ceph_osd_request
*req
= container_of(kref
,
412 struct ceph_osd_request
, r_kref
);
415 dout("%s %p (r_request %p r_reply %p)\n", __func__
, req
,
416 req
->r_request
, req
->r_reply
);
417 request_release_checks(req
);
420 ceph_msg_put(req
->r_request
);
422 ceph_msg_put(req
->r_reply
);
424 for (which
= 0; which
< req
->r_num_ops
; which
++)
425 osd_req_op_data_release(req
, which
);
427 target_destroy(&req
->r_t
);
428 ceph_put_snap_context(req
->r_snapc
);
431 mempool_free(req
, req
->r_osdc
->req_mempool
);
432 else if (req
->r_num_ops
<= CEPH_OSD_SLAB_OPS
)
433 kmem_cache_free(ceph_osd_request_cache
, req
);
438 void ceph_osdc_get_request(struct ceph_osd_request
*req
)
440 dout("%s %p (was %d)\n", __func__
, req
,
441 kref_read(&req
->r_kref
));
442 kref_get(&req
->r_kref
);
444 EXPORT_SYMBOL(ceph_osdc_get_request
);
446 void ceph_osdc_put_request(struct ceph_osd_request
*req
)
449 dout("%s %p (was %d)\n", __func__
, req
,
450 kref_read(&req
->r_kref
));
451 kref_put(&req
->r_kref
, ceph_osdc_release_request
);
454 EXPORT_SYMBOL(ceph_osdc_put_request
);
456 static void request_init(struct ceph_osd_request
*req
)
458 /* req only, each op is zeroed in _osd_req_op_init() */
459 memset(req
, 0, sizeof(*req
));
461 kref_init(&req
->r_kref
);
462 init_completion(&req
->r_completion
);
463 RB_CLEAR_NODE(&req
->r_node
);
464 RB_CLEAR_NODE(&req
->r_mc_node
);
465 INIT_LIST_HEAD(&req
->r_unsafe_item
);
467 target_init(&req
->r_t
);
471 * This is ugly, but it allows us to reuse linger registration and ping
472 * requests, keeping the structure of the code around send_linger{_ping}()
473 * reasonable. Setting up a min_nr=2 mempool for each linger request
474 * and dealing with copying ops (this blasts req only, watch op remains
475 * intact) isn't any better.
477 static void request_reinit(struct ceph_osd_request
*req
)
479 struct ceph_osd_client
*osdc
= req
->r_osdc
;
480 bool mempool
= req
->r_mempool
;
481 unsigned int num_ops
= req
->r_num_ops
;
482 u64 snapid
= req
->r_snapid
;
483 struct ceph_snap_context
*snapc
= req
->r_snapc
;
484 bool linger
= req
->r_linger
;
485 struct ceph_msg
*request_msg
= req
->r_request
;
486 struct ceph_msg
*reply_msg
= req
->r_reply
;
488 dout("%s req %p\n", __func__
, req
);
489 WARN_ON(kref_read(&req
->r_kref
) != 1);
490 request_release_checks(req
);
492 WARN_ON(kref_read(&request_msg
->kref
) != 1);
493 WARN_ON(kref_read(&reply_msg
->kref
) != 1);
494 target_destroy(&req
->r_t
);
498 req
->r_mempool
= mempool
;
499 req
->r_num_ops
= num_ops
;
500 req
->r_snapid
= snapid
;
501 req
->r_snapc
= snapc
;
502 req
->r_linger
= linger
;
503 req
->r_request
= request_msg
;
504 req
->r_reply
= reply_msg
;
507 struct ceph_osd_request
*ceph_osdc_alloc_request(struct ceph_osd_client
*osdc
,
508 struct ceph_snap_context
*snapc
,
509 unsigned int num_ops
,
513 struct ceph_osd_request
*req
;
516 BUG_ON(num_ops
> CEPH_OSD_SLAB_OPS
);
517 req
= mempool_alloc(osdc
->req_mempool
, gfp_flags
);
518 } else if (num_ops
<= CEPH_OSD_SLAB_OPS
) {
519 req
= kmem_cache_alloc(ceph_osd_request_cache
, gfp_flags
);
521 BUG_ON(num_ops
> CEPH_OSD_MAX_OPS
);
522 req
= kmalloc(sizeof(*req
) + num_ops
* sizeof(req
->r_ops
[0]),
530 req
->r_mempool
= use_mempool
;
531 req
->r_num_ops
= num_ops
;
532 req
->r_snapid
= CEPH_NOSNAP
;
533 req
->r_snapc
= ceph_get_snap_context(snapc
);
535 dout("%s req %p\n", __func__
, req
);
538 EXPORT_SYMBOL(ceph_osdc_alloc_request
);
540 static int ceph_oloc_encoding_size(struct ceph_object_locator
*oloc
)
542 return 8 + 4 + 4 + 4 + (oloc
->pool_ns
? oloc
->pool_ns
->len
: 0);
545 int ceph_osdc_alloc_messages(struct ceph_osd_request
*req
, gfp_t gfp
)
547 struct ceph_osd_client
*osdc
= req
->r_osdc
;
548 struct ceph_msg
*msg
;
551 WARN_ON(ceph_oid_empty(&req
->r_base_oid
));
552 WARN_ON(ceph_oloc_empty(&req
->r_base_oloc
));
554 /* create request message */
555 msg_size
= 4 + 4 + 4; /* client_inc, osdmap_epoch, flags */
556 msg_size
+= 4 + 4 + 4 + 8; /* mtime, reassert_version */
557 msg_size
+= CEPH_ENCODING_START_BLK_LEN
+
558 ceph_oloc_encoding_size(&req
->r_base_oloc
); /* oloc */
559 msg_size
+= 1 + 8 + 4 + 4; /* pgid */
560 msg_size
+= 4 + req
->r_base_oid
.name_len
; /* oid */
561 msg_size
+= 2 + req
->r_num_ops
* sizeof(struct ceph_osd_op
);
562 msg_size
+= 8; /* snapid */
563 msg_size
+= 8; /* snap_seq */
564 msg_size
+= 4 + 8 * (req
->r_snapc
? req
->r_snapc
->num_snaps
: 0);
565 msg_size
+= 4; /* retry_attempt */
568 msg
= ceph_msgpool_get(&osdc
->msgpool_op
, 0);
570 msg
= ceph_msg_new(CEPH_MSG_OSD_OP
, msg_size
, gfp
, true);
574 memset(msg
->front
.iov_base
, 0, msg
->front
.iov_len
);
575 req
->r_request
= msg
;
577 /* create reply message */
578 msg_size
= OSD_OPREPLY_FRONT_LEN
;
579 msg_size
+= req
->r_base_oid
.name_len
;
580 msg_size
+= req
->r_num_ops
* sizeof(struct ceph_osd_op
);
583 msg
= ceph_msgpool_get(&osdc
->msgpool_op_reply
, 0);
585 msg
= ceph_msg_new(CEPH_MSG_OSD_OPREPLY
, msg_size
, gfp
, true);
593 EXPORT_SYMBOL(ceph_osdc_alloc_messages
);
595 static bool osd_req_opcode_valid(u16 opcode
)
598 #define GENERATE_CASE(op, opcode, str) case CEPH_OSD_OP_##op: return true;
599 __CEPH_FORALL_OSD_OPS(GENERATE_CASE
)
607 * This is an osd op init function for opcodes that have no data or
608 * other information associated with them. It also serves as a
609 * common init routine for all the other init functions, below.
611 static struct ceph_osd_req_op
*
612 _osd_req_op_init(struct ceph_osd_request
*osd_req
, unsigned int which
,
613 u16 opcode
, u32 flags
)
615 struct ceph_osd_req_op
*op
;
617 BUG_ON(which
>= osd_req
->r_num_ops
);
618 BUG_ON(!osd_req_opcode_valid(opcode
));
620 op
= &osd_req
->r_ops
[which
];
621 memset(op
, 0, sizeof (*op
));
628 void osd_req_op_init(struct ceph_osd_request
*osd_req
,
629 unsigned int which
, u16 opcode
, u32 flags
)
631 (void)_osd_req_op_init(osd_req
, which
, opcode
, flags
);
633 EXPORT_SYMBOL(osd_req_op_init
);
635 void osd_req_op_extent_init(struct ceph_osd_request
*osd_req
,
636 unsigned int which
, u16 opcode
,
637 u64 offset
, u64 length
,
638 u64 truncate_size
, u32 truncate_seq
)
640 struct ceph_osd_req_op
*op
= _osd_req_op_init(osd_req
, which
,
642 size_t payload_len
= 0;
644 BUG_ON(opcode
!= CEPH_OSD_OP_READ
&& opcode
!= CEPH_OSD_OP_WRITE
&&
645 opcode
!= CEPH_OSD_OP_WRITEFULL
&& opcode
!= CEPH_OSD_OP_ZERO
&&
646 opcode
!= CEPH_OSD_OP_TRUNCATE
);
648 op
->extent
.offset
= offset
;
649 op
->extent
.length
= length
;
650 op
->extent
.truncate_size
= truncate_size
;
651 op
->extent
.truncate_seq
= truncate_seq
;
652 if (opcode
== CEPH_OSD_OP_WRITE
|| opcode
== CEPH_OSD_OP_WRITEFULL
)
653 payload_len
+= length
;
655 op
->indata_len
= payload_len
;
657 EXPORT_SYMBOL(osd_req_op_extent_init
);
659 void osd_req_op_extent_update(struct ceph_osd_request
*osd_req
,
660 unsigned int which
, u64 length
)
662 struct ceph_osd_req_op
*op
;
665 BUG_ON(which
>= osd_req
->r_num_ops
);
666 op
= &osd_req
->r_ops
[which
];
667 previous
= op
->extent
.length
;
669 if (length
== previous
)
670 return; /* Nothing to do */
671 BUG_ON(length
> previous
);
673 op
->extent
.length
= length
;
674 if (op
->op
== CEPH_OSD_OP_WRITE
|| op
->op
== CEPH_OSD_OP_WRITEFULL
)
675 op
->indata_len
-= previous
- length
;
677 EXPORT_SYMBOL(osd_req_op_extent_update
);
679 void osd_req_op_extent_dup_last(struct ceph_osd_request
*osd_req
,
680 unsigned int which
, u64 offset_inc
)
682 struct ceph_osd_req_op
*op
, *prev_op
;
684 BUG_ON(which
+ 1 >= osd_req
->r_num_ops
);
686 prev_op
= &osd_req
->r_ops
[which
];
687 op
= _osd_req_op_init(osd_req
, which
+ 1, prev_op
->op
, prev_op
->flags
);
688 /* dup previous one */
689 op
->indata_len
= prev_op
->indata_len
;
690 op
->outdata_len
= prev_op
->outdata_len
;
691 op
->extent
= prev_op
->extent
;
693 op
->extent
.offset
+= offset_inc
;
694 op
->extent
.length
-= offset_inc
;
696 if (op
->op
== CEPH_OSD_OP_WRITE
|| op
->op
== CEPH_OSD_OP_WRITEFULL
)
697 op
->indata_len
-= offset_inc
;
699 EXPORT_SYMBOL(osd_req_op_extent_dup_last
);
701 void osd_req_op_cls_init(struct ceph_osd_request
*osd_req
, unsigned int which
,
702 u16 opcode
, const char *class, const char *method
)
704 struct ceph_osd_req_op
*op
= _osd_req_op_init(osd_req
, which
,
706 struct ceph_pagelist
*pagelist
;
707 size_t payload_len
= 0;
710 BUG_ON(opcode
!= CEPH_OSD_OP_CALL
);
712 pagelist
= kmalloc(sizeof (*pagelist
), GFP_NOFS
);
714 ceph_pagelist_init(pagelist
);
716 op
->cls
.class_name
= class;
717 size
= strlen(class);
718 BUG_ON(size
> (size_t) U8_MAX
);
719 op
->cls
.class_len
= size
;
720 ceph_pagelist_append(pagelist
, class, size
);
723 op
->cls
.method_name
= method
;
724 size
= strlen(method
);
725 BUG_ON(size
> (size_t) U8_MAX
);
726 op
->cls
.method_len
= size
;
727 ceph_pagelist_append(pagelist
, method
, size
);
730 osd_req_op_cls_request_info_pagelist(osd_req
, which
, pagelist
);
732 op
->indata_len
= payload_len
;
734 EXPORT_SYMBOL(osd_req_op_cls_init
);
736 int osd_req_op_xattr_init(struct ceph_osd_request
*osd_req
, unsigned int which
,
737 u16 opcode
, const char *name
, const void *value
,
738 size_t size
, u8 cmp_op
, u8 cmp_mode
)
740 struct ceph_osd_req_op
*op
= _osd_req_op_init(osd_req
, which
,
742 struct ceph_pagelist
*pagelist
;
745 BUG_ON(opcode
!= CEPH_OSD_OP_SETXATTR
&& opcode
!= CEPH_OSD_OP_CMPXATTR
);
747 pagelist
= kmalloc(sizeof(*pagelist
), GFP_NOFS
);
751 ceph_pagelist_init(pagelist
);
753 payload_len
= strlen(name
);
754 op
->xattr
.name_len
= payload_len
;
755 ceph_pagelist_append(pagelist
, name
, payload_len
);
757 op
->xattr
.value_len
= size
;
758 ceph_pagelist_append(pagelist
, value
, size
);
761 op
->xattr
.cmp_op
= cmp_op
;
762 op
->xattr
.cmp_mode
= cmp_mode
;
764 ceph_osd_data_pagelist_init(&op
->xattr
.osd_data
, pagelist
);
765 op
->indata_len
= payload_len
;
768 EXPORT_SYMBOL(osd_req_op_xattr_init
);
771 * @watch_opcode: CEPH_OSD_WATCH_OP_*
773 static void osd_req_op_watch_init(struct ceph_osd_request
*req
, int which
,
774 u64 cookie
, u8 watch_opcode
)
776 struct ceph_osd_req_op
*op
;
778 op
= _osd_req_op_init(req
, which
, CEPH_OSD_OP_WATCH
, 0);
779 op
->watch
.cookie
= cookie
;
780 op
->watch
.op
= watch_opcode
;
784 void osd_req_op_alloc_hint_init(struct ceph_osd_request
*osd_req
,
786 u64 expected_object_size
,
787 u64 expected_write_size
)
789 struct ceph_osd_req_op
*op
= _osd_req_op_init(osd_req
, which
,
790 CEPH_OSD_OP_SETALLOCHINT
,
793 op
->alloc_hint
.expected_object_size
= expected_object_size
;
794 op
->alloc_hint
.expected_write_size
= expected_write_size
;
797 * CEPH_OSD_OP_SETALLOCHINT op is advisory and therefore deemed
798 * not worth a feature bit. Set FAILOK per-op flag to make
799 * sure older osds don't trip over an unsupported opcode.
801 op
->flags
|= CEPH_OSD_OP_FLAG_FAILOK
;
803 EXPORT_SYMBOL(osd_req_op_alloc_hint_init
);
805 static void ceph_osdc_msg_data_add(struct ceph_msg
*msg
,
806 struct ceph_osd_data
*osd_data
)
808 u64 length
= ceph_osd_data_length(osd_data
);
810 if (osd_data
->type
== CEPH_OSD_DATA_TYPE_PAGES
) {
811 BUG_ON(length
> (u64
) SIZE_MAX
);
813 ceph_msg_data_add_pages(msg
, osd_data
->pages
,
814 length
, osd_data
->alignment
);
815 } else if (osd_data
->type
== CEPH_OSD_DATA_TYPE_PAGELIST
) {
817 ceph_msg_data_add_pagelist(msg
, osd_data
->pagelist
);
819 } else if (osd_data
->type
== CEPH_OSD_DATA_TYPE_BIO
) {
820 ceph_msg_data_add_bio(msg
, osd_data
->bio
, length
);
823 BUG_ON(osd_data
->type
!= CEPH_OSD_DATA_TYPE_NONE
);
827 static u32
osd_req_encode_op(struct ceph_osd_op
*dst
,
828 const struct ceph_osd_req_op
*src
)
830 if (WARN_ON(!osd_req_opcode_valid(src
->op
))) {
831 pr_err("unrecognized osd opcode %d\n", src
->op
);
837 case CEPH_OSD_OP_STAT
:
839 case CEPH_OSD_OP_READ
:
840 case CEPH_OSD_OP_WRITE
:
841 case CEPH_OSD_OP_WRITEFULL
:
842 case CEPH_OSD_OP_ZERO
:
843 case CEPH_OSD_OP_TRUNCATE
:
844 dst
->extent
.offset
= cpu_to_le64(src
->extent
.offset
);
845 dst
->extent
.length
= cpu_to_le64(src
->extent
.length
);
846 dst
->extent
.truncate_size
=
847 cpu_to_le64(src
->extent
.truncate_size
);
848 dst
->extent
.truncate_seq
=
849 cpu_to_le32(src
->extent
.truncate_seq
);
851 case CEPH_OSD_OP_CALL
:
852 dst
->cls
.class_len
= src
->cls
.class_len
;
853 dst
->cls
.method_len
= src
->cls
.method_len
;
854 dst
->cls
.indata_len
= cpu_to_le32(src
->cls
.indata_len
);
856 case CEPH_OSD_OP_STARTSYNC
:
858 case CEPH_OSD_OP_WATCH
:
859 dst
->watch
.cookie
= cpu_to_le64(src
->watch
.cookie
);
860 dst
->watch
.ver
= cpu_to_le64(0);
861 dst
->watch
.op
= src
->watch
.op
;
862 dst
->watch
.gen
= cpu_to_le32(src
->watch
.gen
);
864 case CEPH_OSD_OP_NOTIFY_ACK
:
866 case CEPH_OSD_OP_NOTIFY
:
867 dst
->notify
.cookie
= cpu_to_le64(src
->notify
.cookie
);
869 case CEPH_OSD_OP_LIST_WATCHERS
:
871 case CEPH_OSD_OP_SETALLOCHINT
:
872 dst
->alloc_hint
.expected_object_size
=
873 cpu_to_le64(src
->alloc_hint
.expected_object_size
);
874 dst
->alloc_hint
.expected_write_size
=
875 cpu_to_le64(src
->alloc_hint
.expected_write_size
);
877 case CEPH_OSD_OP_SETXATTR
:
878 case CEPH_OSD_OP_CMPXATTR
:
879 dst
->xattr
.name_len
= cpu_to_le32(src
->xattr
.name_len
);
880 dst
->xattr
.value_len
= cpu_to_le32(src
->xattr
.value_len
);
881 dst
->xattr
.cmp_op
= src
->xattr
.cmp_op
;
882 dst
->xattr
.cmp_mode
= src
->xattr
.cmp_mode
;
884 case CEPH_OSD_OP_CREATE
:
885 case CEPH_OSD_OP_DELETE
:
888 pr_err("unsupported osd opcode %s\n",
889 ceph_osd_op_name(src
->op
));
895 dst
->op
= cpu_to_le16(src
->op
);
896 dst
->flags
= cpu_to_le32(src
->flags
);
897 dst
->payload_len
= cpu_to_le32(src
->indata_len
);
899 return src
->indata_len
;
903 * build new request AND message, calculate layout, and adjust file
906 * if the file was recently truncated, we include information about its
907 * old and new size so that the object can be updated appropriately. (we
908 * avoid synchronously deleting truncated objects because it's slow.)
910 * if @do_sync, include a 'startsync' command so that the osd will flush
913 struct ceph_osd_request
*ceph_osdc_new_request(struct ceph_osd_client
*osdc
,
914 struct ceph_file_layout
*layout
,
915 struct ceph_vino vino
,
917 unsigned int which
, int num_ops
,
918 int opcode
, int flags
,
919 struct ceph_snap_context
*snapc
,
924 struct ceph_osd_request
*req
;
930 BUG_ON(opcode
!= CEPH_OSD_OP_READ
&& opcode
!= CEPH_OSD_OP_WRITE
&&
931 opcode
!= CEPH_OSD_OP_ZERO
&& opcode
!= CEPH_OSD_OP_TRUNCATE
&&
932 opcode
!= CEPH_OSD_OP_CREATE
&& opcode
!= CEPH_OSD_OP_DELETE
);
934 req
= ceph_osdc_alloc_request(osdc
, snapc
, num_ops
, use_mempool
,
941 /* calculate max write size */
942 r
= calc_layout(layout
, off
, plen
, &objnum
, &objoff
, &objlen
);
946 if (opcode
== CEPH_OSD_OP_CREATE
|| opcode
== CEPH_OSD_OP_DELETE
) {
947 osd_req_op_init(req
, which
, opcode
, 0);
949 u32 object_size
= layout
->object_size
;
950 u32 object_base
= off
- objoff
;
951 if (!(truncate_seq
== 1 && truncate_size
== -1ULL)) {
952 if (truncate_size
<= object_base
) {
955 truncate_size
-= object_base
;
956 if (truncate_size
> object_size
)
957 truncate_size
= object_size
;
960 osd_req_op_extent_init(req
, which
, opcode
, objoff
, objlen
,
961 truncate_size
, truncate_seq
);
964 req
->r_flags
= flags
;
965 req
->r_base_oloc
.pool
= layout
->pool_id
;
966 req
->r_base_oloc
.pool_ns
= ceph_try_get_string(layout
->pool_ns
);
967 ceph_oid_printf(&req
->r_base_oid
, "%llx.%08llx", vino
.ino
, objnum
);
969 req
->r_snapid
= vino
.snap
;
970 if (flags
& CEPH_OSD_FLAG_WRITE
)
971 req
->r_data_offset
= off
;
973 r
= ceph_osdc_alloc_messages(req
, GFP_NOFS
);
980 ceph_osdc_put_request(req
);
983 EXPORT_SYMBOL(ceph_osdc_new_request
);
986 * We keep osd requests in an rbtree, sorted by ->r_tid.
988 DEFINE_RB_FUNCS(request
, struct ceph_osd_request
, r_tid
, r_node
)
989 DEFINE_RB_FUNCS(request_mc
, struct ceph_osd_request
, r_tid
, r_mc_node
)
991 static bool osd_homeless(struct ceph_osd
*osd
)
993 return osd
->o_osd
== CEPH_HOMELESS_OSD
;
996 static bool osd_registered(struct ceph_osd
*osd
)
998 verify_osdc_locked(osd
->o_osdc
);
1000 return !RB_EMPTY_NODE(&osd
->o_node
);
1004 * Assumes @osd is zero-initialized.
1006 static void osd_init(struct ceph_osd
*osd
)
1008 atomic_set(&osd
->o_ref
, 1);
1009 RB_CLEAR_NODE(&osd
->o_node
);
1010 osd
->o_requests
= RB_ROOT
;
1011 osd
->o_linger_requests
= RB_ROOT
;
1012 INIT_LIST_HEAD(&osd
->o_osd_lru
);
1013 INIT_LIST_HEAD(&osd
->o_keepalive_item
);
1014 osd
->o_incarnation
= 1;
1015 mutex_init(&osd
->lock
);
1018 static void osd_cleanup(struct ceph_osd
*osd
)
1020 WARN_ON(!RB_EMPTY_NODE(&osd
->o_node
));
1021 WARN_ON(!RB_EMPTY_ROOT(&osd
->o_requests
));
1022 WARN_ON(!RB_EMPTY_ROOT(&osd
->o_linger_requests
));
1023 WARN_ON(!list_empty(&osd
->o_osd_lru
));
1024 WARN_ON(!list_empty(&osd
->o_keepalive_item
));
1026 if (osd
->o_auth
.authorizer
) {
1027 WARN_ON(osd_homeless(osd
));
1028 ceph_auth_destroy_authorizer(osd
->o_auth
.authorizer
);
1033 * Track open sessions with osds.
1035 static struct ceph_osd
*create_osd(struct ceph_osd_client
*osdc
, int onum
)
1037 struct ceph_osd
*osd
;
1039 WARN_ON(onum
== CEPH_HOMELESS_OSD
);
1041 osd
= kzalloc(sizeof(*osd
), GFP_NOIO
| __GFP_NOFAIL
);
1046 ceph_con_init(&osd
->o_con
, osd
, &osd_con_ops
, &osdc
->client
->msgr
);
1051 static struct ceph_osd
*get_osd(struct ceph_osd
*osd
)
1053 if (atomic_inc_not_zero(&osd
->o_ref
)) {
1054 dout("get_osd %p %d -> %d\n", osd
, atomic_read(&osd
->o_ref
)-1,
1055 atomic_read(&osd
->o_ref
));
1058 dout("get_osd %p FAIL\n", osd
);
1063 static void put_osd(struct ceph_osd
*osd
)
1065 dout("put_osd %p %d -> %d\n", osd
, atomic_read(&osd
->o_ref
),
1066 atomic_read(&osd
->o_ref
) - 1);
1067 if (atomic_dec_and_test(&osd
->o_ref
)) {
1073 DEFINE_RB_FUNCS(osd
, struct ceph_osd
, o_osd
, o_node
)
1075 static void __move_osd_to_lru(struct ceph_osd
*osd
)
1077 struct ceph_osd_client
*osdc
= osd
->o_osdc
;
1079 dout("%s osd %p osd%d\n", __func__
, osd
, osd
->o_osd
);
1080 BUG_ON(!list_empty(&osd
->o_osd_lru
));
1082 spin_lock(&osdc
->osd_lru_lock
);
1083 list_add_tail(&osd
->o_osd_lru
, &osdc
->osd_lru
);
1084 spin_unlock(&osdc
->osd_lru_lock
);
1086 osd
->lru_ttl
= jiffies
+ osdc
->client
->options
->osd_idle_ttl
;
1089 static void maybe_move_osd_to_lru(struct ceph_osd
*osd
)
1091 if (RB_EMPTY_ROOT(&osd
->o_requests
) &&
1092 RB_EMPTY_ROOT(&osd
->o_linger_requests
))
1093 __move_osd_to_lru(osd
);
1096 static void __remove_osd_from_lru(struct ceph_osd
*osd
)
1098 struct ceph_osd_client
*osdc
= osd
->o_osdc
;
1100 dout("%s osd %p osd%d\n", __func__
, osd
, osd
->o_osd
);
1102 spin_lock(&osdc
->osd_lru_lock
);
1103 if (!list_empty(&osd
->o_osd_lru
))
1104 list_del_init(&osd
->o_osd_lru
);
1105 spin_unlock(&osdc
->osd_lru_lock
);
1109 * Close the connection and assign any leftover requests to the
1112 static void close_osd(struct ceph_osd
*osd
)
1114 struct ceph_osd_client
*osdc
= osd
->o_osdc
;
1117 verify_osdc_wrlocked(osdc
);
1118 dout("%s osd %p osd%d\n", __func__
, osd
, osd
->o_osd
);
1120 ceph_con_close(&osd
->o_con
);
1122 for (n
= rb_first(&osd
->o_requests
); n
; ) {
1123 struct ceph_osd_request
*req
=
1124 rb_entry(n
, struct ceph_osd_request
, r_node
);
1126 n
= rb_next(n
); /* unlink_request() */
1128 dout(" reassigning req %p tid %llu\n", req
, req
->r_tid
);
1129 unlink_request(osd
, req
);
1130 link_request(&osdc
->homeless_osd
, req
);
1132 for (n
= rb_first(&osd
->o_linger_requests
); n
; ) {
1133 struct ceph_osd_linger_request
*lreq
=
1134 rb_entry(n
, struct ceph_osd_linger_request
, node
);
1136 n
= rb_next(n
); /* unlink_linger() */
1138 dout(" reassigning lreq %p linger_id %llu\n", lreq
,
1140 unlink_linger(osd
, lreq
);
1141 link_linger(&osdc
->homeless_osd
, lreq
);
1144 __remove_osd_from_lru(osd
);
1145 erase_osd(&osdc
->osds
, osd
);
1152 static int reopen_osd(struct ceph_osd
*osd
)
1154 struct ceph_entity_addr
*peer_addr
;
1156 dout("%s osd %p osd%d\n", __func__
, osd
, osd
->o_osd
);
1158 if (RB_EMPTY_ROOT(&osd
->o_requests
) &&
1159 RB_EMPTY_ROOT(&osd
->o_linger_requests
)) {
1164 peer_addr
= &osd
->o_osdc
->osdmap
->osd_addr
[osd
->o_osd
];
1165 if (!memcmp(peer_addr
, &osd
->o_con
.peer_addr
, sizeof (*peer_addr
)) &&
1166 !ceph_con_opened(&osd
->o_con
)) {
1169 dout("osd addr hasn't changed and connection never opened, "
1170 "letting msgr retry\n");
1171 /* touch each r_stamp for handle_timeout()'s benfit */
1172 for (n
= rb_first(&osd
->o_requests
); n
; n
= rb_next(n
)) {
1173 struct ceph_osd_request
*req
=
1174 rb_entry(n
, struct ceph_osd_request
, r_node
);
1175 req
->r_stamp
= jiffies
;
1181 ceph_con_close(&osd
->o_con
);
1182 ceph_con_open(&osd
->o_con
, CEPH_ENTITY_TYPE_OSD
, osd
->o_osd
, peer_addr
);
1183 osd
->o_incarnation
++;
1188 static struct ceph_osd
*lookup_create_osd(struct ceph_osd_client
*osdc
, int o
,
1191 struct ceph_osd
*osd
;
1194 verify_osdc_wrlocked(osdc
);
1196 verify_osdc_locked(osdc
);
1198 if (o
!= CEPH_HOMELESS_OSD
)
1199 osd
= lookup_osd(&osdc
->osds
, o
);
1201 osd
= &osdc
->homeless_osd
;
1204 return ERR_PTR(-EAGAIN
);
1206 osd
= create_osd(osdc
, o
);
1207 insert_osd(&osdc
->osds
, osd
);
1208 ceph_con_open(&osd
->o_con
, CEPH_ENTITY_TYPE_OSD
, osd
->o_osd
,
1209 &osdc
->osdmap
->osd_addr
[osd
->o_osd
]);
1212 dout("%s osdc %p osd%d -> osd %p\n", __func__
, osdc
, o
, osd
);
1217 * Create request <-> OSD session relation.
1219 * @req has to be assigned a tid, @osd may be homeless.
1221 static void link_request(struct ceph_osd
*osd
, struct ceph_osd_request
*req
)
1223 verify_osd_locked(osd
);
1224 WARN_ON(!req
->r_tid
|| req
->r_osd
);
1225 dout("%s osd %p osd%d req %p tid %llu\n", __func__
, osd
, osd
->o_osd
,
1228 if (!osd_homeless(osd
))
1229 __remove_osd_from_lru(osd
);
1231 atomic_inc(&osd
->o_osdc
->num_homeless
);
1234 insert_request(&osd
->o_requests
, req
);
1238 static void unlink_request(struct ceph_osd
*osd
, struct ceph_osd_request
*req
)
1240 verify_osd_locked(osd
);
1241 WARN_ON(req
->r_osd
!= osd
);
1242 dout("%s osd %p osd%d req %p tid %llu\n", __func__
, osd
, osd
->o_osd
,
1246 erase_request(&osd
->o_requests
, req
);
1249 if (!osd_homeless(osd
))
1250 maybe_move_osd_to_lru(osd
);
1252 atomic_dec(&osd
->o_osdc
->num_homeless
);
1255 static bool __pool_full(struct ceph_pg_pool_info
*pi
)
1257 return pi
->flags
& CEPH_POOL_FLAG_FULL
;
1260 static bool have_pool_full(struct ceph_osd_client
*osdc
)
1264 for (n
= rb_first(&osdc
->osdmap
->pg_pools
); n
; n
= rb_next(n
)) {
1265 struct ceph_pg_pool_info
*pi
=
1266 rb_entry(n
, struct ceph_pg_pool_info
, node
);
1268 if (__pool_full(pi
))
1275 static bool pool_full(struct ceph_osd_client
*osdc
, s64 pool_id
)
1277 struct ceph_pg_pool_info
*pi
;
1279 pi
= ceph_pg_pool_by_id(osdc
->osdmap
, pool_id
);
1283 return __pool_full(pi
);
1287 * Returns whether a request should be blocked from being sent
1288 * based on the current osdmap and osd_client settings.
1290 static bool target_should_be_paused(struct ceph_osd_client
*osdc
,
1291 const struct ceph_osd_request_target
*t
,
1292 struct ceph_pg_pool_info
*pi
)
1294 bool pauserd
= ceph_osdmap_flag(osdc
, CEPH_OSDMAP_PAUSERD
);
1295 bool pausewr
= ceph_osdmap_flag(osdc
, CEPH_OSDMAP_PAUSEWR
) ||
1296 ceph_osdmap_flag(osdc
, CEPH_OSDMAP_FULL
) ||
1299 WARN_ON(pi
->id
!= t
->base_oloc
.pool
);
1300 return (t
->flags
& CEPH_OSD_FLAG_READ
&& pauserd
) ||
1301 (t
->flags
& CEPH_OSD_FLAG_WRITE
&& pausewr
);
1304 enum calc_target_result
{
1305 CALC_TARGET_NO_ACTION
= 0,
1306 CALC_TARGET_NEED_RESEND
,
1307 CALC_TARGET_POOL_DNE
,
1310 static enum calc_target_result
calc_target(struct ceph_osd_client
*osdc
,
1311 struct ceph_osd_request_target
*t
,
1312 u32
*last_force_resend
,
1315 struct ceph_pg_pool_info
*pi
;
1316 struct ceph_pg pgid
, last_pgid
;
1317 struct ceph_osds up
, acting
;
1318 bool force_resend
= false;
1319 bool need_check_tiering
= false;
1320 bool need_resend
= false;
1321 bool sort_bitwise
= ceph_osdmap_flag(osdc
, CEPH_OSDMAP_SORTBITWISE
);
1322 enum calc_target_result ct_res
;
1325 pi
= ceph_pg_pool_by_id(osdc
->osdmap
, t
->base_oloc
.pool
);
1327 t
->osd
= CEPH_HOMELESS_OSD
;
1328 ct_res
= CALC_TARGET_POOL_DNE
;
1332 if (osdc
->osdmap
->epoch
== pi
->last_force_request_resend
) {
1333 if (last_force_resend
&&
1334 *last_force_resend
< pi
->last_force_request_resend
) {
1335 *last_force_resend
= pi
->last_force_request_resend
;
1336 force_resend
= true;
1337 } else if (!last_force_resend
) {
1338 force_resend
= true;
1341 if (ceph_oid_empty(&t
->target_oid
) || force_resend
) {
1342 ceph_oid_copy(&t
->target_oid
, &t
->base_oid
);
1343 need_check_tiering
= true;
1345 if (ceph_oloc_empty(&t
->target_oloc
) || force_resend
) {
1346 ceph_oloc_copy(&t
->target_oloc
, &t
->base_oloc
);
1347 need_check_tiering
= true;
1350 if (need_check_tiering
&&
1351 (t
->flags
& CEPH_OSD_FLAG_IGNORE_OVERLAY
) == 0) {
1352 if (t
->flags
& CEPH_OSD_FLAG_READ
&& pi
->read_tier
>= 0)
1353 t
->target_oloc
.pool
= pi
->read_tier
;
1354 if (t
->flags
& CEPH_OSD_FLAG_WRITE
&& pi
->write_tier
>= 0)
1355 t
->target_oloc
.pool
= pi
->write_tier
;
1358 ret
= ceph_object_locator_to_pg(osdc
->osdmap
, &t
->target_oid
,
1359 &t
->target_oloc
, &pgid
);
1361 WARN_ON(ret
!= -ENOENT
);
1362 t
->osd
= CEPH_HOMELESS_OSD
;
1363 ct_res
= CALC_TARGET_POOL_DNE
;
1366 last_pgid
.pool
= pgid
.pool
;
1367 last_pgid
.seed
= ceph_stable_mod(pgid
.seed
, t
->pg_num
, t
->pg_num_mask
);
1369 ceph_pg_to_up_acting_osds(osdc
->osdmap
, &pgid
, &up
, &acting
);
1371 ceph_is_new_interval(&t
->acting
,
1384 force_resend
= true;
1386 if (t
->paused
&& !target_should_be_paused(osdc
, t
, pi
)) {
1391 if (ceph_pg_compare(&t
->pgid
, &pgid
) ||
1392 ceph_osds_changed(&t
->acting
, &acting
, any_change
) ||
1394 t
->pgid
= pgid
; /* struct */
1395 ceph_osds_copy(&t
->acting
, &acting
);
1396 ceph_osds_copy(&t
->up
, &up
);
1398 t
->min_size
= pi
->min_size
;
1399 t
->pg_num
= pi
->pg_num
;
1400 t
->pg_num_mask
= pi
->pg_num_mask
;
1401 t
->sort_bitwise
= sort_bitwise
;
1403 t
->osd
= acting
.primary
;
1407 ct_res
= need_resend
? CALC_TARGET_NEED_RESEND
: CALC_TARGET_NO_ACTION
;
1409 dout("%s t %p -> ct_res %d osd %d\n", __func__
, t
, ct_res
, t
->osd
);
1413 static void setup_request_data(struct ceph_osd_request
*req
,
1414 struct ceph_msg
*msg
)
1419 if (!list_empty(&msg
->data
))
1422 WARN_ON(msg
->data_length
);
1423 for (i
= 0; i
< req
->r_num_ops
; i
++) {
1424 struct ceph_osd_req_op
*op
= &req
->r_ops
[i
];
1428 case CEPH_OSD_OP_WRITE
:
1429 case CEPH_OSD_OP_WRITEFULL
:
1430 WARN_ON(op
->indata_len
!= op
->extent
.length
);
1431 ceph_osdc_msg_data_add(msg
, &op
->extent
.osd_data
);
1433 case CEPH_OSD_OP_SETXATTR
:
1434 case CEPH_OSD_OP_CMPXATTR
:
1435 WARN_ON(op
->indata_len
!= op
->xattr
.name_len
+
1436 op
->xattr
.value_len
);
1437 ceph_osdc_msg_data_add(msg
, &op
->xattr
.osd_data
);
1439 case CEPH_OSD_OP_NOTIFY_ACK
:
1440 ceph_osdc_msg_data_add(msg
,
1441 &op
->notify_ack
.request_data
);
1445 case CEPH_OSD_OP_STAT
:
1446 ceph_osdc_msg_data_add(req
->r_reply
,
1449 case CEPH_OSD_OP_READ
:
1450 ceph_osdc_msg_data_add(req
->r_reply
,
1451 &op
->extent
.osd_data
);
1453 case CEPH_OSD_OP_LIST_WATCHERS
:
1454 ceph_osdc_msg_data_add(req
->r_reply
,
1455 &op
->list_watchers
.response_data
);
1459 case CEPH_OSD_OP_CALL
:
1460 WARN_ON(op
->indata_len
!= op
->cls
.class_len
+
1461 op
->cls
.method_len
+
1462 op
->cls
.indata_len
);
1463 ceph_osdc_msg_data_add(msg
, &op
->cls
.request_info
);
1464 /* optional, can be NONE */
1465 ceph_osdc_msg_data_add(msg
, &op
->cls
.request_data
);
1466 /* optional, can be NONE */
1467 ceph_osdc_msg_data_add(req
->r_reply
,
1468 &op
->cls
.response_data
);
1470 case CEPH_OSD_OP_NOTIFY
:
1471 ceph_osdc_msg_data_add(msg
,
1472 &op
->notify
.request_data
);
1473 ceph_osdc_msg_data_add(req
->r_reply
,
1474 &op
->notify
.response_data
);
1478 data_len
+= op
->indata_len
;
1481 WARN_ON(data_len
!= msg
->data_length
);
1484 static void encode_request(struct ceph_osd_request
*req
, struct ceph_msg
*msg
)
1486 void *p
= msg
->front
.iov_base
;
1487 void *const end
= p
+ msg
->front_alloc_len
;
1491 if (req
->r_flags
& CEPH_OSD_FLAG_WRITE
) {
1492 /* snapshots aren't writeable */
1493 WARN_ON(req
->r_snapid
!= CEPH_NOSNAP
);
1495 WARN_ON(req
->r_mtime
.tv_sec
|| req
->r_mtime
.tv_nsec
||
1496 req
->r_data_offset
|| req
->r_snapc
);
1499 setup_request_data(req
, msg
);
1501 ceph_encode_32(&p
, 1); /* client_inc, always 1 */
1502 ceph_encode_32(&p
, req
->r_osdc
->osdmap
->epoch
);
1503 ceph_encode_32(&p
, req
->r_flags
);
1504 ceph_encode_timespec(p
, &req
->r_mtime
);
1505 p
+= sizeof(struct ceph_timespec
);
1506 /* aka reassert_version */
1507 memcpy(p
, &req
->r_replay_version
, sizeof(req
->r_replay_version
));
1508 p
+= sizeof(req
->r_replay_version
);
1511 ceph_start_encoding(&p
, 5, 4,
1512 ceph_oloc_encoding_size(&req
->r_t
.target_oloc
));
1513 ceph_encode_64(&p
, req
->r_t
.target_oloc
.pool
);
1514 ceph_encode_32(&p
, -1); /* preferred */
1515 ceph_encode_32(&p
, 0); /* key len */
1516 if (req
->r_t
.target_oloc
.pool_ns
)
1517 ceph_encode_string(&p
, end
, req
->r_t
.target_oloc
.pool_ns
->str
,
1518 req
->r_t
.target_oloc
.pool_ns
->len
);
1520 ceph_encode_32(&p
, 0);
1523 ceph_encode_8(&p
, 1);
1524 ceph_encode_64(&p
, req
->r_t
.pgid
.pool
);
1525 ceph_encode_32(&p
, req
->r_t
.pgid
.seed
);
1526 ceph_encode_32(&p
, -1); /* preferred */
1529 ceph_encode_32(&p
, req
->r_t
.target_oid
.name_len
);
1530 memcpy(p
, req
->r_t
.target_oid
.name
, req
->r_t
.target_oid
.name_len
);
1531 p
+= req
->r_t
.target_oid
.name_len
;
1533 /* ops, can imply data */
1534 ceph_encode_16(&p
, req
->r_num_ops
);
1535 for (i
= 0; i
< req
->r_num_ops
; i
++) {
1536 data_len
+= osd_req_encode_op(p
, &req
->r_ops
[i
]);
1537 p
+= sizeof(struct ceph_osd_op
);
1540 ceph_encode_64(&p
, req
->r_snapid
); /* snapid */
1542 ceph_encode_64(&p
, req
->r_snapc
->seq
);
1543 ceph_encode_32(&p
, req
->r_snapc
->num_snaps
);
1544 for (i
= 0; i
< req
->r_snapc
->num_snaps
; i
++)
1545 ceph_encode_64(&p
, req
->r_snapc
->snaps
[i
]);
1547 ceph_encode_64(&p
, 0); /* snap_seq */
1548 ceph_encode_32(&p
, 0); /* snaps len */
1551 ceph_encode_32(&p
, req
->r_attempts
); /* retry_attempt */
1554 msg
->front
.iov_len
= p
- msg
->front
.iov_base
;
1555 msg
->hdr
.version
= cpu_to_le16(4); /* MOSDOp v4 */
1556 msg
->hdr
.front_len
= cpu_to_le32(msg
->front
.iov_len
);
1557 msg
->hdr
.data_len
= cpu_to_le32(data_len
);
1559 * The header "data_off" is a hint to the receiver allowing it
1560 * to align received data into its buffers such that there's no
1561 * need to re-copy it before writing it to disk (direct I/O).
1563 msg
->hdr
.data_off
= cpu_to_le16(req
->r_data_offset
);
1565 dout("%s req %p oid %s oid_len %d front %zu data %u\n", __func__
,
1566 req
, req
->r_t
.target_oid
.name
, req
->r_t
.target_oid
.name_len
,
1567 msg
->front
.iov_len
, data_len
);
1571 * @req has to be assigned a tid and registered.
1573 static void send_request(struct ceph_osd_request
*req
)
1575 struct ceph_osd
*osd
= req
->r_osd
;
1577 verify_osd_locked(osd
);
1578 WARN_ON(osd
->o_osd
!= req
->r_t
.osd
);
1581 * We may have a previously queued request message hanging
1582 * around. Cancel it to avoid corrupting the msgr.
1585 ceph_msg_revoke(req
->r_request
);
1587 req
->r_flags
|= CEPH_OSD_FLAG_KNOWN_REDIR
;
1588 if (req
->r_attempts
)
1589 req
->r_flags
|= CEPH_OSD_FLAG_RETRY
;
1591 WARN_ON(req
->r_flags
& CEPH_OSD_FLAG_RETRY
);
1593 encode_request(req
, req
->r_request
);
1595 dout("%s req %p tid %llu to pg %llu.%x osd%d flags 0x%x attempt %d\n",
1596 __func__
, req
, req
->r_tid
, req
->r_t
.pgid
.pool
, req
->r_t
.pgid
.seed
,
1597 req
->r_t
.osd
, req
->r_flags
, req
->r_attempts
);
1599 req
->r_t
.paused
= false;
1600 req
->r_stamp
= jiffies
;
1603 req
->r_sent
= osd
->o_incarnation
;
1604 req
->r_request
->hdr
.tid
= cpu_to_le64(req
->r_tid
);
1605 ceph_con_send(&osd
->o_con
, ceph_msg_get(req
->r_request
));
1608 static void maybe_request_map(struct ceph_osd_client
*osdc
)
1610 bool continuous
= false;
1612 verify_osdc_locked(osdc
);
1613 WARN_ON(!osdc
->osdmap
->epoch
);
1615 if (ceph_osdmap_flag(osdc
, CEPH_OSDMAP_FULL
) ||
1616 ceph_osdmap_flag(osdc
, CEPH_OSDMAP_PAUSERD
) ||
1617 ceph_osdmap_flag(osdc
, CEPH_OSDMAP_PAUSEWR
)) {
1618 dout("%s osdc %p continuous\n", __func__
, osdc
);
1621 dout("%s osdc %p onetime\n", __func__
, osdc
);
1624 if (ceph_monc_want_map(&osdc
->client
->monc
, CEPH_SUB_OSDMAP
,
1625 osdc
->osdmap
->epoch
+ 1, continuous
))
1626 ceph_monc_renew_subs(&osdc
->client
->monc
);
1629 static void send_map_check(struct ceph_osd_request
*req
);
1631 static void __submit_request(struct ceph_osd_request
*req
, bool wrlocked
)
1633 struct ceph_osd_client
*osdc
= req
->r_osdc
;
1634 struct ceph_osd
*osd
;
1635 enum calc_target_result ct_res
;
1636 bool need_send
= false;
1637 bool promoted
= false;
1639 WARN_ON(req
->r_tid
);
1640 dout("%s req %p wrlocked %d\n", __func__
, req
, wrlocked
);
1643 ct_res
= calc_target(osdc
, &req
->r_t
, &req
->r_last_force_resend
, false);
1644 if (ct_res
== CALC_TARGET_POOL_DNE
&& !wrlocked
)
1647 osd
= lookup_create_osd(osdc
, req
->r_t
.osd
, wrlocked
);
1649 WARN_ON(PTR_ERR(osd
) != -EAGAIN
|| wrlocked
);
1653 if ((req
->r_flags
& CEPH_OSD_FLAG_WRITE
) &&
1654 ceph_osdmap_flag(osdc
, CEPH_OSDMAP_PAUSEWR
)) {
1655 dout("req %p pausewr\n", req
);
1656 req
->r_t
.paused
= true;
1657 maybe_request_map(osdc
);
1658 } else if ((req
->r_flags
& CEPH_OSD_FLAG_READ
) &&
1659 ceph_osdmap_flag(osdc
, CEPH_OSDMAP_PAUSERD
)) {
1660 dout("req %p pauserd\n", req
);
1661 req
->r_t
.paused
= true;
1662 maybe_request_map(osdc
);
1663 } else if ((req
->r_flags
& CEPH_OSD_FLAG_WRITE
) &&
1664 !(req
->r_flags
& (CEPH_OSD_FLAG_FULL_TRY
|
1665 CEPH_OSD_FLAG_FULL_FORCE
)) &&
1666 (ceph_osdmap_flag(osdc
, CEPH_OSDMAP_FULL
) ||
1667 pool_full(osdc
, req
->r_t
.base_oloc
.pool
))) {
1668 dout("req %p full/pool_full\n", req
);
1669 pr_warn_ratelimited("FULL or reached pool quota\n");
1670 req
->r_t
.paused
= true;
1671 maybe_request_map(osdc
);
1672 } else if (!osd_homeless(osd
)) {
1675 maybe_request_map(osdc
);
1678 mutex_lock(&osd
->lock
);
1680 * Assign the tid atomically with send_request() to protect
1681 * multiple writes to the same object from racing with each
1682 * other, resulting in out of order ops on the OSDs.
1684 req
->r_tid
= atomic64_inc_return(&osdc
->last_tid
);
1685 link_request(osd
, req
);
1688 mutex_unlock(&osd
->lock
);
1690 if (ct_res
== CALC_TARGET_POOL_DNE
)
1691 send_map_check(req
);
1694 downgrade_write(&osdc
->lock
);
1698 up_read(&osdc
->lock
);
1699 down_write(&osdc
->lock
);
1705 static void account_request(struct ceph_osd_request
*req
)
1707 WARN_ON(req
->r_flags
& (CEPH_OSD_FLAG_ACK
| CEPH_OSD_FLAG_ONDISK
));
1708 WARN_ON(!(req
->r_flags
& (CEPH_OSD_FLAG_READ
| CEPH_OSD_FLAG_WRITE
)));
1710 req
->r_flags
|= CEPH_OSD_FLAG_ONDISK
;
1711 atomic_inc(&req
->r_osdc
->num_requests
);
1713 req
->r_start_stamp
= jiffies
;
1716 static void submit_request(struct ceph_osd_request
*req
, bool wrlocked
)
1718 ceph_osdc_get_request(req
);
1719 account_request(req
);
1720 __submit_request(req
, wrlocked
);
1723 static void finish_request(struct ceph_osd_request
*req
)
1725 struct ceph_osd_client
*osdc
= req
->r_osdc
;
1726 struct ceph_osd
*osd
= req
->r_osd
;
1728 verify_osd_locked(osd
);
1729 dout("%s req %p tid %llu\n", __func__
, req
, req
->r_tid
);
1731 WARN_ON(lookup_request_mc(&osdc
->map_checks
, req
->r_tid
));
1732 unlink_request(osd
, req
);
1733 atomic_dec(&osdc
->num_requests
);
1736 * If an OSD has failed or returned and a request has been sent
1737 * twice, it's possible to get a reply and end up here while the
1738 * request message is queued for delivery. We will ignore the
1739 * reply, so not a big deal, but better to try and catch it.
1741 ceph_msg_revoke(req
->r_request
);
1742 ceph_msg_revoke_incoming(req
->r_reply
);
1745 static void __complete_request(struct ceph_osd_request
*req
)
1747 if (req
->r_callback
) {
1748 dout("%s req %p tid %llu cb %pf result %d\n", __func__
, req
,
1749 req
->r_tid
, req
->r_callback
, req
->r_result
);
1750 req
->r_callback(req
);
1755 * This is open-coded in handle_reply().
1757 static void complete_request(struct ceph_osd_request
*req
, int err
)
1759 dout("%s req %p tid %llu err %d\n", __func__
, req
, req
->r_tid
, err
);
1761 req
->r_result
= err
;
1762 finish_request(req
);
1763 __complete_request(req
);
1764 complete_all(&req
->r_completion
);
1765 ceph_osdc_put_request(req
);
1768 static void cancel_map_check(struct ceph_osd_request
*req
)
1770 struct ceph_osd_client
*osdc
= req
->r_osdc
;
1771 struct ceph_osd_request
*lookup_req
;
1773 verify_osdc_wrlocked(osdc
);
1775 lookup_req
= lookup_request_mc(&osdc
->map_checks
, req
->r_tid
);
1779 WARN_ON(lookup_req
!= req
);
1780 erase_request_mc(&osdc
->map_checks
, req
);
1781 ceph_osdc_put_request(req
);
1784 static void cancel_request(struct ceph_osd_request
*req
)
1786 dout("%s req %p tid %llu\n", __func__
, req
, req
->r_tid
);
1788 cancel_map_check(req
);
1789 finish_request(req
);
1790 complete_all(&req
->r_completion
);
1791 ceph_osdc_put_request(req
);
1794 static void abort_request(struct ceph_osd_request
*req
, int err
)
1796 dout("%s req %p tid %llu err %d\n", __func__
, req
, req
->r_tid
, err
);
1798 cancel_map_check(req
);
1799 complete_request(req
, err
);
1802 static void check_pool_dne(struct ceph_osd_request
*req
)
1804 struct ceph_osd_client
*osdc
= req
->r_osdc
;
1805 struct ceph_osdmap
*map
= osdc
->osdmap
;
1807 verify_osdc_wrlocked(osdc
);
1808 WARN_ON(!map
->epoch
);
1810 if (req
->r_attempts
) {
1812 * We sent a request earlier, which means that
1813 * previously the pool existed, and now it does not
1814 * (i.e., it was deleted).
1816 req
->r_map_dne_bound
= map
->epoch
;
1817 dout("%s req %p tid %llu pool disappeared\n", __func__
, req
,
1820 dout("%s req %p tid %llu map_dne_bound %u have %u\n", __func__
,
1821 req
, req
->r_tid
, req
->r_map_dne_bound
, map
->epoch
);
1824 if (req
->r_map_dne_bound
) {
1825 if (map
->epoch
>= req
->r_map_dne_bound
) {
1826 /* we had a new enough map */
1827 pr_info_ratelimited("tid %llu pool does not exist\n",
1829 complete_request(req
, -ENOENT
);
1832 send_map_check(req
);
1836 static void map_check_cb(struct ceph_mon_generic_request
*greq
)
1838 struct ceph_osd_client
*osdc
= &greq
->monc
->client
->osdc
;
1839 struct ceph_osd_request
*req
;
1840 u64 tid
= greq
->private_data
;
1842 WARN_ON(greq
->result
|| !greq
->u
.newest
);
1844 down_write(&osdc
->lock
);
1845 req
= lookup_request_mc(&osdc
->map_checks
, tid
);
1847 dout("%s tid %llu dne\n", __func__
, tid
);
1851 dout("%s req %p tid %llu map_dne_bound %u newest %llu\n", __func__
,
1852 req
, req
->r_tid
, req
->r_map_dne_bound
, greq
->u
.newest
);
1853 if (!req
->r_map_dne_bound
)
1854 req
->r_map_dne_bound
= greq
->u
.newest
;
1855 erase_request_mc(&osdc
->map_checks
, req
);
1856 check_pool_dne(req
);
1858 ceph_osdc_put_request(req
);
1860 up_write(&osdc
->lock
);
1863 static void send_map_check(struct ceph_osd_request
*req
)
1865 struct ceph_osd_client
*osdc
= req
->r_osdc
;
1866 struct ceph_osd_request
*lookup_req
;
1869 verify_osdc_wrlocked(osdc
);
1871 lookup_req
= lookup_request_mc(&osdc
->map_checks
, req
->r_tid
);
1873 WARN_ON(lookup_req
!= req
);
1877 ceph_osdc_get_request(req
);
1878 insert_request_mc(&osdc
->map_checks
, req
);
1879 ret
= ceph_monc_get_version_async(&osdc
->client
->monc
, "osdmap",
1880 map_check_cb
, req
->r_tid
);
1885 * lingering requests, watch/notify v2 infrastructure
1887 static void linger_release(struct kref
*kref
)
1889 struct ceph_osd_linger_request
*lreq
=
1890 container_of(kref
, struct ceph_osd_linger_request
, kref
);
1892 dout("%s lreq %p reg_req %p ping_req %p\n", __func__
, lreq
,
1893 lreq
->reg_req
, lreq
->ping_req
);
1894 WARN_ON(!RB_EMPTY_NODE(&lreq
->node
));
1895 WARN_ON(!RB_EMPTY_NODE(&lreq
->osdc_node
));
1896 WARN_ON(!RB_EMPTY_NODE(&lreq
->mc_node
));
1897 WARN_ON(!list_empty(&lreq
->scan_item
));
1898 WARN_ON(!list_empty(&lreq
->pending_lworks
));
1902 ceph_osdc_put_request(lreq
->reg_req
);
1904 ceph_osdc_put_request(lreq
->ping_req
);
1905 target_destroy(&lreq
->t
);
1909 static void linger_put(struct ceph_osd_linger_request
*lreq
)
1912 kref_put(&lreq
->kref
, linger_release
);
1915 static struct ceph_osd_linger_request
*
1916 linger_get(struct ceph_osd_linger_request
*lreq
)
1918 kref_get(&lreq
->kref
);
1922 static struct ceph_osd_linger_request
*
1923 linger_alloc(struct ceph_osd_client
*osdc
)
1925 struct ceph_osd_linger_request
*lreq
;
1927 lreq
= kzalloc(sizeof(*lreq
), GFP_NOIO
);
1931 kref_init(&lreq
->kref
);
1932 mutex_init(&lreq
->lock
);
1933 RB_CLEAR_NODE(&lreq
->node
);
1934 RB_CLEAR_NODE(&lreq
->osdc_node
);
1935 RB_CLEAR_NODE(&lreq
->mc_node
);
1936 INIT_LIST_HEAD(&lreq
->scan_item
);
1937 INIT_LIST_HEAD(&lreq
->pending_lworks
);
1938 init_completion(&lreq
->reg_commit_wait
);
1939 init_completion(&lreq
->notify_finish_wait
);
1942 target_init(&lreq
->t
);
1944 dout("%s lreq %p\n", __func__
, lreq
);
1948 DEFINE_RB_INSDEL_FUNCS(linger
, struct ceph_osd_linger_request
, linger_id
, node
)
1949 DEFINE_RB_FUNCS(linger_osdc
, struct ceph_osd_linger_request
, linger_id
, osdc_node
)
1950 DEFINE_RB_FUNCS(linger_mc
, struct ceph_osd_linger_request
, linger_id
, mc_node
)
1953 * Create linger request <-> OSD session relation.
1955 * @lreq has to be registered, @osd may be homeless.
1957 static void link_linger(struct ceph_osd
*osd
,
1958 struct ceph_osd_linger_request
*lreq
)
1960 verify_osd_locked(osd
);
1961 WARN_ON(!lreq
->linger_id
|| lreq
->osd
);
1962 dout("%s osd %p osd%d lreq %p linger_id %llu\n", __func__
, osd
,
1963 osd
->o_osd
, lreq
, lreq
->linger_id
);
1965 if (!osd_homeless(osd
))
1966 __remove_osd_from_lru(osd
);
1968 atomic_inc(&osd
->o_osdc
->num_homeless
);
1971 insert_linger(&osd
->o_linger_requests
, lreq
);
1975 static void unlink_linger(struct ceph_osd
*osd
,
1976 struct ceph_osd_linger_request
*lreq
)
1978 verify_osd_locked(osd
);
1979 WARN_ON(lreq
->osd
!= osd
);
1980 dout("%s osd %p osd%d lreq %p linger_id %llu\n", __func__
, osd
,
1981 osd
->o_osd
, lreq
, lreq
->linger_id
);
1984 erase_linger(&osd
->o_linger_requests
, lreq
);
1987 if (!osd_homeless(osd
))
1988 maybe_move_osd_to_lru(osd
);
1990 atomic_dec(&osd
->o_osdc
->num_homeless
);
1993 static bool __linger_registered(struct ceph_osd_linger_request
*lreq
)
1995 verify_osdc_locked(lreq
->osdc
);
1997 return !RB_EMPTY_NODE(&lreq
->osdc_node
);
2000 static bool linger_registered(struct ceph_osd_linger_request
*lreq
)
2002 struct ceph_osd_client
*osdc
= lreq
->osdc
;
2005 down_read(&osdc
->lock
);
2006 registered
= __linger_registered(lreq
);
2007 up_read(&osdc
->lock
);
2012 static void linger_register(struct ceph_osd_linger_request
*lreq
)
2014 struct ceph_osd_client
*osdc
= lreq
->osdc
;
2016 verify_osdc_wrlocked(osdc
);
2017 WARN_ON(lreq
->linger_id
);
2020 lreq
->linger_id
= ++osdc
->last_linger_id
;
2021 insert_linger_osdc(&osdc
->linger_requests
, lreq
);
2024 static void linger_unregister(struct ceph_osd_linger_request
*lreq
)
2026 struct ceph_osd_client
*osdc
= lreq
->osdc
;
2028 verify_osdc_wrlocked(osdc
);
2030 erase_linger_osdc(&osdc
->linger_requests
, lreq
);
2034 static void cancel_linger_request(struct ceph_osd_request
*req
)
2036 struct ceph_osd_linger_request
*lreq
= req
->r_priv
;
2038 WARN_ON(!req
->r_linger
);
2039 cancel_request(req
);
2043 struct linger_work
{
2044 struct work_struct work
;
2045 struct ceph_osd_linger_request
*lreq
;
2046 struct list_head pending_item
;
2047 unsigned long queued_stamp
;
2053 void *payload
; /* points into @msg front */
2056 struct ceph_msg
*msg
; /* for ceph_msg_put() */
2064 static struct linger_work
*lwork_alloc(struct ceph_osd_linger_request
*lreq
,
2067 struct linger_work
*lwork
;
2069 lwork
= kzalloc(sizeof(*lwork
), GFP_NOIO
);
2073 INIT_WORK(&lwork
->work
, workfn
);
2074 INIT_LIST_HEAD(&lwork
->pending_item
);
2075 lwork
->lreq
= linger_get(lreq
);
2080 static void lwork_free(struct linger_work
*lwork
)
2082 struct ceph_osd_linger_request
*lreq
= lwork
->lreq
;
2084 mutex_lock(&lreq
->lock
);
2085 list_del(&lwork
->pending_item
);
2086 mutex_unlock(&lreq
->lock
);
2092 static void lwork_queue(struct linger_work
*lwork
)
2094 struct ceph_osd_linger_request
*lreq
= lwork
->lreq
;
2095 struct ceph_osd_client
*osdc
= lreq
->osdc
;
2097 verify_lreq_locked(lreq
);
2098 WARN_ON(!list_empty(&lwork
->pending_item
));
2100 lwork
->queued_stamp
= jiffies
;
2101 list_add_tail(&lwork
->pending_item
, &lreq
->pending_lworks
);
2102 queue_work(osdc
->notify_wq
, &lwork
->work
);
2105 static void do_watch_notify(struct work_struct
*w
)
2107 struct linger_work
*lwork
= container_of(w
, struct linger_work
, work
);
2108 struct ceph_osd_linger_request
*lreq
= lwork
->lreq
;
2110 if (!linger_registered(lreq
)) {
2111 dout("%s lreq %p not registered\n", __func__
, lreq
);
2115 WARN_ON(!lreq
->is_watch
);
2116 dout("%s lreq %p notify_id %llu notifier_id %llu payload_len %zu\n",
2117 __func__
, lreq
, lwork
->notify
.notify_id
, lwork
->notify
.notifier_id
,
2118 lwork
->notify
.payload_len
);
2119 lreq
->wcb(lreq
->data
, lwork
->notify
.notify_id
, lreq
->linger_id
,
2120 lwork
->notify
.notifier_id
, lwork
->notify
.payload
,
2121 lwork
->notify
.payload_len
);
2124 ceph_msg_put(lwork
->notify
.msg
);
2128 static void do_watch_error(struct work_struct
*w
)
2130 struct linger_work
*lwork
= container_of(w
, struct linger_work
, work
);
2131 struct ceph_osd_linger_request
*lreq
= lwork
->lreq
;
2133 if (!linger_registered(lreq
)) {
2134 dout("%s lreq %p not registered\n", __func__
, lreq
);
2138 dout("%s lreq %p err %d\n", __func__
, lreq
, lwork
->error
.err
);
2139 lreq
->errcb(lreq
->data
, lreq
->linger_id
, lwork
->error
.err
);
2145 static void queue_watch_error(struct ceph_osd_linger_request
*lreq
)
2147 struct linger_work
*lwork
;
2149 lwork
= lwork_alloc(lreq
, do_watch_error
);
2151 pr_err("failed to allocate error-lwork\n");
2155 lwork
->error
.err
= lreq
->last_error
;
2159 static void linger_reg_commit_complete(struct ceph_osd_linger_request
*lreq
,
2162 if (!completion_done(&lreq
->reg_commit_wait
)) {
2163 lreq
->reg_commit_error
= (result
<= 0 ? result
: 0);
2164 complete_all(&lreq
->reg_commit_wait
);
2168 static void linger_commit_cb(struct ceph_osd_request
*req
)
2170 struct ceph_osd_linger_request
*lreq
= req
->r_priv
;
2172 mutex_lock(&lreq
->lock
);
2173 dout("%s lreq %p linger_id %llu result %d\n", __func__
, lreq
,
2174 lreq
->linger_id
, req
->r_result
);
2175 linger_reg_commit_complete(lreq
, req
->r_result
);
2176 lreq
->committed
= true;
2178 if (!lreq
->is_watch
) {
2179 struct ceph_osd_data
*osd_data
=
2180 osd_req_op_data(req
, 0, notify
, response_data
);
2181 void *p
= page_address(osd_data
->pages
[0]);
2183 WARN_ON(req
->r_ops
[0].op
!= CEPH_OSD_OP_NOTIFY
||
2184 osd_data
->type
!= CEPH_OSD_DATA_TYPE_PAGES
);
2186 /* make note of the notify_id */
2187 if (req
->r_ops
[0].outdata_len
>= sizeof(u64
)) {
2188 lreq
->notify_id
= ceph_decode_64(&p
);
2189 dout("lreq %p notify_id %llu\n", lreq
,
2192 dout("lreq %p no notify_id\n", lreq
);
2196 mutex_unlock(&lreq
->lock
);
2200 static int normalize_watch_error(int err
)
2203 * Translate ENOENT -> ENOTCONN so that a delete->disconnection
2204 * notification and a failure to reconnect because we raced with
2205 * the delete appear the same to the user.
2213 static void linger_reconnect_cb(struct ceph_osd_request
*req
)
2215 struct ceph_osd_linger_request
*lreq
= req
->r_priv
;
2217 mutex_lock(&lreq
->lock
);
2218 dout("%s lreq %p linger_id %llu result %d last_error %d\n", __func__
,
2219 lreq
, lreq
->linger_id
, req
->r_result
, lreq
->last_error
);
2220 if (req
->r_result
< 0) {
2221 if (!lreq
->last_error
) {
2222 lreq
->last_error
= normalize_watch_error(req
->r_result
);
2223 queue_watch_error(lreq
);
2227 mutex_unlock(&lreq
->lock
);
2231 static void send_linger(struct ceph_osd_linger_request
*lreq
)
2233 struct ceph_osd_request
*req
= lreq
->reg_req
;
2234 struct ceph_osd_req_op
*op
= &req
->r_ops
[0];
2236 verify_osdc_wrlocked(req
->r_osdc
);
2237 dout("%s lreq %p linger_id %llu\n", __func__
, lreq
, lreq
->linger_id
);
2240 cancel_linger_request(req
);
2242 request_reinit(req
);
2243 ceph_oid_copy(&req
->r_base_oid
, &lreq
->t
.base_oid
);
2244 ceph_oloc_copy(&req
->r_base_oloc
, &lreq
->t
.base_oloc
);
2245 req
->r_flags
= lreq
->t
.flags
;
2246 req
->r_mtime
= lreq
->mtime
;
2248 mutex_lock(&lreq
->lock
);
2249 if (lreq
->is_watch
&& lreq
->committed
) {
2250 WARN_ON(op
->op
!= CEPH_OSD_OP_WATCH
||
2251 op
->watch
.cookie
!= lreq
->linger_id
);
2252 op
->watch
.op
= CEPH_OSD_WATCH_OP_RECONNECT
;
2253 op
->watch
.gen
= ++lreq
->register_gen
;
2254 dout("lreq %p reconnect register_gen %u\n", lreq
,
2256 req
->r_callback
= linger_reconnect_cb
;
2258 if (!lreq
->is_watch
)
2259 lreq
->notify_id
= 0;
2261 WARN_ON(op
->watch
.op
!= CEPH_OSD_WATCH_OP_WATCH
);
2262 dout("lreq %p register\n", lreq
);
2263 req
->r_callback
= linger_commit_cb
;
2265 mutex_unlock(&lreq
->lock
);
2267 req
->r_priv
= linger_get(lreq
);
2268 req
->r_linger
= true;
2270 submit_request(req
, true);
2273 static void linger_ping_cb(struct ceph_osd_request
*req
)
2275 struct ceph_osd_linger_request
*lreq
= req
->r_priv
;
2277 mutex_lock(&lreq
->lock
);
2278 dout("%s lreq %p linger_id %llu result %d ping_sent %lu last_error %d\n",
2279 __func__
, lreq
, lreq
->linger_id
, req
->r_result
, lreq
->ping_sent
,
2281 if (lreq
->register_gen
== req
->r_ops
[0].watch
.gen
) {
2282 if (!req
->r_result
) {
2283 lreq
->watch_valid_thru
= lreq
->ping_sent
;
2284 } else if (!lreq
->last_error
) {
2285 lreq
->last_error
= normalize_watch_error(req
->r_result
);
2286 queue_watch_error(lreq
);
2289 dout("lreq %p register_gen %u ignoring old pong %u\n", lreq
,
2290 lreq
->register_gen
, req
->r_ops
[0].watch
.gen
);
2293 mutex_unlock(&lreq
->lock
);
2297 static void send_linger_ping(struct ceph_osd_linger_request
*lreq
)
2299 struct ceph_osd_client
*osdc
= lreq
->osdc
;
2300 struct ceph_osd_request
*req
= lreq
->ping_req
;
2301 struct ceph_osd_req_op
*op
= &req
->r_ops
[0];
2303 if (ceph_osdmap_flag(osdc
, CEPH_OSDMAP_PAUSERD
)) {
2304 dout("%s PAUSERD\n", __func__
);
2308 lreq
->ping_sent
= jiffies
;
2309 dout("%s lreq %p linger_id %llu ping_sent %lu register_gen %u\n",
2310 __func__
, lreq
, lreq
->linger_id
, lreq
->ping_sent
,
2311 lreq
->register_gen
);
2314 cancel_linger_request(req
);
2316 request_reinit(req
);
2317 target_copy(&req
->r_t
, &lreq
->t
);
2319 WARN_ON(op
->op
!= CEPH_OSD_OP_WATCH
||
2320 op
->watch
.cookie
!= lreq
->linger_id
||
2321 op
->watch
.op
!= CEPH_OSD_WATCH_OP_PING
);
2322 op
->watch
.gen
= lreq
->register_gen
;
2323 req
->r_callback
= linger_ping_cb
;
2324 req
->r_priv
= linger_get(lreq
);
2325 req
->r_linger
= true;
2327 ceph_osdc_get_request(req
);
2328 account_request(req
);
2329 req
->r_tid
= atomic64_inc_return(&osdc
->last_tid
);
2330 link_request(lreq
->osd
, req
);
2334 static void linger_submit(struct ceph_osd_linger_request
*lreq
)
2336 struct ceph_osd_client
*osdc
= lreq
->osdc
;
2337 struct ceph_osd
*osd
;
2339 calc_target(osdc
, &lreq
->t
, &lreq
->last_force_resend
, false);
2340 osd
= lookup_create_osd(osdc
, lreq
->t
.osd
, true);
2341 link_linger(osd
, lreq
);
2346 static void cancel_linger_map_check(struct ceph_osd_linger_request
*lreq
)
2348 struct ceph_osd_client
*osdc
= lreq
->osdc
;
2349 struct ceph_osd_linger_request
*lookup_lreq
;
2351 verify_osdc_wrlocked(osdc
);
2353 lookup_lreq
= lookup_linger_mc(&osdc
->linger_map_checks
,
2358 WARN_ON(lookup_lreq
!= lreq
);
2359 erase_linger_mc(&osdc
->linger_map_checks
, lreq
);
2364 * @lreq has to be both registered and linked.
2366 static void __linger_cancel(struct ceph_osd_linger_request
*lreq
)
2368 if (lreq
->is_watch
&& lreq
->ping_req
->r_osd
)
2369 cancel_linger_request(lreq
->ping_req
);
2370 if (lreq
->reg_req
->r_osd
)
2371 cancel_linger_request(lreq
->reg_req
);
2372 cancel_linger_map_check(lreq
);
2373 unlink_linger(lreq
->osd
, lreq
);
2374 linger_unregister(lreq
);
2377 static void linger_cancel(struct ceph_osd_linger_request
*lreq
)
2379 struct ceph_osd_client
*osdc
= lreq
->osdc
;
2381 down_write(&osdc
->lock
);
2382 if (__linger_registered(lreq
))
2383 __linger_cancel(lreq
);
2384 up_write(&osdc
->lock
);
2387 static void send_linger_map_check(struct ceph_osd_linger_request
*lreq
);
2389 static void check_linger_pool_dne(struct ceph_osd_linger_request
*lreq
)
2391 struct ceph_osd_client
*osdc
= lreq
->osdc
;
2392 struct ceph_osdmap
*map
= osdc
->osdmap
;
2394 verify_osdc_wrlocked(osdc
);
2395 WARN_ON(!map
->epoch
);
2397 if (lreq
->register_gen
) {
2398 lreq
->map_dne_bound
= map
->epoch
;
2399 dout("%s lreq %p linger_id %llu pool disappeared\n", __func__
,
2400 lreq
, lreq
->linger_id
);
2402 dout("%s lreq %p linger_id %llu map_dne_bound %u have %u\n",
2403 __func__
, lreq
, lreq
->linger_id
, lreq
->map_dne_bound
,
2407 if (lreq
->map_dne_bound
) {
2408 if (map
->epoch
>= lreq
->map_dne_bound
) {
2409 /* we had a new enough map */
2410 pr_info("linger_id %llu pool does not exist\n",
2412 linger_reg_commit_complete(lreq
, -ENOENT
);
2413 __linger_cancel(lreq
);
2416 send_linger_map_check(lreq
);
2420 static void linger_map_check_cb(struct ceph_mon_generic_request
*greq
)
2422 struct ceph_osd_client
*osdc
= &greq
->monc
->client
->osdc
;
2423 struct ceph_osd_linger_request
*lreq
;
2424 u64 linger_id
= greq
->private_data
;
2426 WARN_ON(greq
->result
|| !greq
->u
.newest
);
2428 down_write(&osdc
->lock
);
2429 lreq
= lookup_linger_mc(&osdc
->linger_map_checks
, linger_id
);
2431 dout("%s linger_id %llu dne\n", __func__
, linger_id
);
2435 dout("%s lreq %p linger_id %llu map_dne_bound %u newest %llu\n",
2436 __func__
, lreq
, lreq
->linger_id
, lreq
->map_dne_bound
,
2438 if (!lreq
->map_dne_bound
)
2439 lreq
->map_dne_bound
= greq
->u
.newest
;
2440 erase_linger_mc(&osdc
->linger_map_checks
, lreq
);
2441 check_linger_pool_dne(lreq
);
2445 up_write(&osdc
->lock
);
2448 static void send_linger_map_check(struct ceph_osd_linger_request
*lreq
)
2450 struct ceph_osd_client
*osdc
= lreq
->osdc
;
2451 struct ceph_osd_linger_request
*lookup_lreq
;
2454 verify_osdc_wrlocked(osdc
);
2456 lookup_lreq
= lookup_linger_mc(&osdc
->linger_map_checks
,
2459 WARN_ON(lookup_lreq
!= lreq
);
2464 insert_linger_mc(&osdc
->linger_map_checks
, lreq
);
2465 ret
= ceph_monc_get_version_async(&osdc
->client
->monc
, "osdmap",
2466 linger_map_check_cb
, lreq
->linger_id
);
2470 static int linger_reg_commit_wait(struct ceph_osd_linger_request
*lreq
)
2474 dout("%s lreq %p linger_id %llu\n", __func__
, lreq
, lreq
->linger_id
);
2475 ret
= wait_for_completion_interruptible(&lreq
->reg_commit_wait
);
2476 return ret
?: lreq
->reg_commit_error
;
2479 static int linger_notify_finish_wait(struct ceph_osd_linger_request
*lreq
)
2483 dout("%s lreq %p linger_id %llu\n", __func__
, lreq
, lreq
->linger_id
);
2484 ret
= wait_for_completion_interruptible(&lreq
->notify_finish_wait
);
2485 return ret
?: lreq
->notify_finish_error
;
2489 * Timeout callback, called every N seconds. When 1 or more OSD
2490 * requests has been active for more than N seconds, we send a keepalive
2491 * (tag + timestamp) to its OSD to ensure any communications channel
2492 * reset is detected.
2494 static void handle_timeout(struct work_struct
*work
)
2496 struct ceph_osd_client
*osdc
=
2497 container_of(work
, struct ceph_osd_client
, timeout_work
.work
);
2498 struct ceph_options
*opts
= osdc
->client
->options
;
2499 unsigned long cutoff
= jiffies
- opts
->osd_keepalive_timeout
;
2500 unsigned long expiry_cutoff
= jiffies
- opts
->osd_request_timeout
;
2501 LIST_HEAD(slow_osds
);
2502 struct rb_node
*n
, *p
;
2504 dout("%s osdc %p\n", __func__
, osdc
);
2505 down_write(&osdc
->lock
);
2508 * ping osds that are a bit slow. this ensures that if there
2509 * is a break in the TCP connection we will notice, and reopen
2510 * a connection with that osd (from the fault callback).
2512 for (n
= rb_first(&osdc
->osds
); n
; n
= rb_next(n
)) {
2513 struct ceph_osd
*osd
= rb_entry(n
, struct ceph_osd
, o_node
);
2516 for (p
= rb_first(&osd
->o_requests
); p
; ) {
2517 struct ceph_osd_request
*req
=
2518 rb_entry(p
, struct ceph_osd_request
, r_node
);
2520 p
= rb_next(p
); /* abort_request() */
2522 if (time_before(req
->r_stamp
, cutoff
)) {
2523 dout(" req %p tid %llu on osd%d is laggy\n",
2524 req
, req
->r_tid
, osd
->o_osd
);
2527 if (opts
->osd_request_timeout
&&
2528 time_before(req
->r_start_stamp
, expiry_cutoff
)) {
2529 pr_err_ratelimited("tid %llu on osd%d timeout\n",
2530 req
->r_tid
, osd
->o_osd
);
2531 abort_request(req
, -ETIMEDOUT
);
2534 for (p
= rb_first(&osd
->o_linger_requests
); p
; p
= rb_next(p
)) {
2535 struct ceph_osd_linger_request
*lreq
=
2536 rb_entry(p
, struct ceph_osd_linger_request
, node
);
2538 dout(" lreq %p linger_id %llu is served by osd%d\n",
2539 lreq
, lreq
->linger_id
, osd
->o_osd
);
2542 mutex_lock(&lreq
->lock
);
2543 if (lreq
->is_watch
&& lreq
->committed
&& !lreq
->last_error
)
2544 send_linger_ping(lreq
);
2545 mutex_unlock(&lreq
->lock
);
2549 list_move_tail(&osd
->o_keepalive_item
, &slow_osds
);
2552 if (opts
->osd_request_timeout
) {
2553 for (p
= rb_first(&osdc
->homeless_osd
.o_requests
); p
; ) {
2554 struct ceph_osd_request
*req
=
2555 rb_entry(p
, struct ceph_osd_request
, r_node
);
2557 p
= rb_next(p
); /* abort_request() */
2559 if (time_before(req
->r_start_stamp
, expiry_cutoff
)) {
2560 pr_err_ratelimited("tid %llu on osd%d timeout\n",
2561 req
->r_tid
, osdc
->homeless_osd
.o_osd
);
2562 abort_request(req
, -ETIMEDOUT
);
2567 if (atomic_read(&osdc
->num_homeless
) || !list_empty(&slow_osds
))
2568 maybe_request_map(osdc
);
2570 while (!list_empty(&slow_osds
)) {
2571 struct ceph_osd
*osd
= list_first_entry(&slow_osds
,
2574 list_del_init(&osd
->o_keepalive_item
);
2575 ceph_con_keepalive(&osd
->o_con
);
2578 up_write(&osdc
->lock
);
2579 schedule_delayed_work(&osdc
->timeout_work
,
2580 osdc
->client
->options
->osd_keepalive_timeout
);
2583 static void handle_osds_timeout(struct work_struct
*work
)
2585 struct ceph_osd_client
*osdc
=
2586 container_of(work
, struct ceph_osd_client
,
2587 osds_timeout_work
.work
);
2588 unsigned long delay
= osdc
->client
->options
->osd_idle_ttl
/ 4;
2589 struct ceph_osd
*osd
, *nosd
;
2591 dout("%s osdc %p\n", __func__
, osdc
);
2592 down_write(&osdc
->lock
);
2593 list_for_each_entry_safe(osd
, nosd
, &osdc
->osd_lru
, o_osd_lru
) {
2594 if (time_before(jiffies
, osd
->lru_ttl
))
2597 WARN_ON(!RB_EMPTY_ROOT(&osd
->o_requests
));
2598 WARN_ON(!RB_EMPTY_ROOT(&osd
->o_linger_requests
));
2602 up_write(&osdc
->lock
);
2603 schedule_delayed_work(&osdc
->osds_timeout_work
,
2604 round_jiffies_relative(delay
));
2607 static int ceph_oloc_decode(void **p
, void *end
,
2608 struct ceph_object_locator
*oloc
)
2610 u8 struct_v
, struct_cv
;
2615 ceph_decode_need(p
, end
, 1 + 1 + 4, e_inval
);
2616 struct_v
= ceph_decode_8(p
);
2617 struct_cv
= ceph_decode_8(p
);
2619 pr_warn("got v %d < 3 cv %d of ceph_object_locator\n",
2620 struct_v
, struct_cv
);
2623 if (struct_cv
> 6) {
2624 pr_warn("got v %d cv %d > 6 of ceph_object_locator\n",
2625 struct_v
, struct_cv
);
2628 len
= ceph_decode_32(p
);
2629 ceph_decode_need(p
, end
, len
, e_inval
);
2630 struct_end
= *p
+ len
;
2632 oloc
->pool
= ceph_decode_64(p
);
2633 *p
+= 4; /* skip preferred */
2635 len
= ceph_decode_32(p
);
2637 pr_warn("ceph_object_locator::key is set\n");
2641 if (struct_v
>= 5) {
2642 bool changed
= false;
2644 len
= ceph_decode_32(p
);
2646 ceph_decode_need(p
, end
, len
, e_inval
);
2647 if (!oloc
->pool_ns
||
2648 ceph_compare_string(oloc
->pool_ns
, *p
, len
))
2656 /* redirect changes namespace */
2657 pr_warn("ceph_object_locator::nspace is changed\n");
2662 if (struct_v
>= 6) {
2663 s64 hash
= ceph_decode_64(p
);
2665 pr_warn("ceph_object_locator::hash is set\n");
2680 static int ceph_redirect_decode(void **p
, void *end
,
2681 struct ceph_request_redirect
*redir
)
2683 u8 struct_v
, struct_cv
;
2688 ceph_decode_need(p
, end
, 1 + 1 + 4, e_inval
);
2689 struct_v
= ceph_decode_8(p
);
2690 struct_cv
= ceph_decode_8(p
);
2691 if (struct_cv
> 1) {
2692 pr_warn("got v %d cv %d > 1 of ceph_request_redirect\n",
2693 struct_v
, struct_cv
);
2696 len
= ceph_decode_32(p
);
2697 ceph_decode_need(p
, end
, len
, e_inval
);
2698 struct_end
= *p
+ len
;
2700 ret
= ceph_oloc_decode(p
, end
, &redir
->oloc
);
2704 len
= ceph_decode_32(p
);
2706 pr_warn("ceph_request_redirect::object_name is set\n");
2710 len
= ceph_decode_32(p
);
2711 *p
+= len
; /* skip osd_instructions */
2723 struct MOSDOpReply
{
2724 struct ceph_pg pgid
;
2729 u32 outdata_len
[CEPH_OSD_MAX_OPS
];
2730 s32 rval
[CEPH_OSD_MAX_OPS
];
2732 struct ceph_eversion replay_version
;
2734 struct ceph_request_redirect redirect
;
2737 static int decode_MOSDOpReply(const struct ceph_msg
*msg
, struct MOSDOpReply
*m
)
2739 void *p
= msg
->front
.iov_base
;
2740 void *const end
= p
+ msg
->front
.iov_len
;
2741 u16 version
= le16_to_cpu(msg
->hdr
.version
);
2742 struct ceph_eversion bad_replay_version
;
2748 ceph_decode_32_safe(&p
, end
, len
, e_inval
);
2749 ceph_decode_need(&p
, end
, len
, e_inval
);
2750 p
+= len
; /* skip oid */
2752 ret
= ceph_decode_pgid(&p
, end
, &m
->pgid
);
2756 ceph_decode_64_safe(&p
, end
, m
->flags
, e_inval
);
2757 ceph_decode_32_safe(&p
, end
, m
->result
, e_inval
);
2758 ceph_decode_need(&p
, end
, sizeof(bad_replay_version
), e_inval
);
2759 memcpy(&bad_replay_version
, p
, sizeof(bad_replay_version
));
2760 p
+= sizeof(bad_replay_version
);
2761 ceph_decode_32_safe(&p
, end
, m
->epoch
, e_inval
);
2763 ceph_decode_32_safe(&p
, end
, m
->num_ops
, e_inval
);
2764 if (m
->num_ops
> ARRAY_SIZE(m
->outdata_len
))
2767 ceph_decode_need(&p
, end
, m
->num_ops
* sizeof(struct ceph_osd_op
),
2769 for (i
= 0; i
< m
->num_ops
; i
++) {
2770 struct ceph_osd_op
*op
= p
;
2772 m
->outdata_len
[i
] = le32_to_cpu(op
->payload_len
);
2776 ceph_decode_32_safe(&p
, end
, m
->retry_attempt
, e_inval
);
2777 for (i
= 0; i
< m
->num_ops
; i
++)
2778 ceph_decode_32_safe(&p
, end
, m
->rval
[i
], e_inval
);
2781 ceph_decode_need(&p
, end
, sizeof(m
->replay_version
), e_inval
);
2782 memcpy(&m
->replay_version
, p
, sizeof(m
->replay_version
));
2783 p
+= sizeof(m
->replay_version
);
2784 ceph_decode_64_safe(&p
, end
, m
->user_version
, e_inval
);
2786 m
->replay_version
= bad_replay_version
; /* struct */
2787 m
->user_version
= le64_to_cpu(m
->replay_version
.version
);
2792 ceph_decode_8_safe(&p
, end
, decode_redir
, e_inval
);
2800 ret
= ceph_redirect_decode(&p
, end
, &m
->redirect
);
2804 ceph_oloc_init(&m
->redirect
.oloc
);
2814 * Handle MOSDOpReply. Set ->r_result and call the callback if it is
2817 static void handle_reply(struct ceph_osd
*osd
, struct ceph_msg
*msg
)
2819 struct ceph_osd_client
*osdc
= osd
->o_osdc
;
2820 struct ceph_osd_request
*req
;
2821 struct MOSDOpReply m
;
2822 u64 tid
= le64_to_cpu(msg
->hdr
.tid
);
2827 dout("%s msg %p tid %llu\n", __func__
, msg
, tid
);
2829 down_read(&osdc
->lock
);
2830 if (!osd_registered(osd
)) {
2831 dout("%s osd%d unknown\n", __func__
, osd
->o_osd
);
2832 goto out_unlock_osdc
;
2834 WARN_ON(osd
->o_osd
!= le64_to_cpu(msg
->hdr
.src
.num
));
2836 mutex_lock(&osd
->lock
);
2837 req
= lookup_request(&osd
->o_requests
, tid
);
2839 dout("%s osd%d tid %llu unknown\n", __func__
, osd
->o_osd
, tid
);
2840 goto out_unlock_session
;
2843 m
.redirect
.oloc
.pool_ns
= req
->r_t
.target_oloc
.pool_ns
;
2844 ret
= decode_MOSDOpReply(msg
, &m
);
2845 m
.redirect
.oloc
.pool_ns
= NULL
;
2847 pr_err("failed to decode MOSDOpReply for tid %llu: %d\n",
2852 dout("%s req %p tid %llu flags 0x%llx pgid %llu.%x epoch %u attempt %d v %u'%llu uv %llu\n",
2853 __func__
, req
, req
->r_tid
, m
.flags
, m
.pgid
.pool
, m
.pgid
.seed
,
2854 m
.epoch
, m
.retry_attempt
, le32_to_cpu(m
.replay_version
.epoch
),
2855 le64_to_cpu(m
.replay_version
.version
), m
.user_version
);
2857 if (m
.retry_attempt
>= 0) {
2858 if (m
.retry_attempt
!= req
->r_attempts
- 1) {
2859 dout("req %p tid %llu retry_attempt %d != %d, ignoring\n",
2860 req
, req
->r_tid
, m
.retry_attempt
,
2861 req
->r_attempts
- 1);
2862 goto out_unlock_session
;
2865 WARN_ON(1); /* MOSDOpReply v4 is assumed */
2868 if (!ceph_oloc_empty(&m
.redirect
.oloc
)) {
2869 dout("req %p tid %llu redirect pool %lld\n", req
, req
->r_tid
,
2870 m
.redirect
.oloc
.pool
);
2871 unlink_request(osd
, req
);
2872 mutex_unlock(&osd
->lock
);
2875 * Not ceph_oloc_copy() - changing pool_ns is not
2878 req
->r_t
.target_oloc
.pool
= m
.redirect
.oloc
.pool
;
2879 req
->r_flags
|= CEPH_OSD_FLAG_REDIRECTED
;
2881 __submit_request(req
, false);
2882 goto out_unlock_osdc
;
2885 if (m
.num_ops
!= req
->r_num_ops
) {
2886 pr_err("num_ops %d != %d for tid %llu\n", m
.num_ops
,
2887 req
->r_num_ops
, req
->r_tid
);
2890 for (i
= 0; i
< req
->r_num_ops
; i
++) {
2891 dout(" req %p tid %llu op %d rval %d len %u\n", req
,
2892 req
->r_tid
, i
, m
.rval
[i
], m
.outdata_len
[i
]);
2893 req
->r_ops
[i
].rval
= m
.rval
[i
];
2894 req
->r_ops
[i
].outdata_len
= m
.outdata_len
[i
];
2895 data_len
+= m
.outdata_len
[i
];
2897 if (data_len
!= le32_to_cpu(msg
->hdr
.data_len
)) {
2898 pr_err("sum of lens %u != %u for tid %llu\n", data_len
,
2899 le32_to_cpu(msg
->hdr
.data_len
), req
->r_tid
);
2902 dout("%s req %p tid %llu result %d data_len %u\n", __func__
,
2903 req
, req
->r_tid
, m
.result
, data_len
);
2906 * Since we only ever request ONDISK, we should only ever get
2907 * one (type of) reply back.
2909 WARN_ON(!(m
.flags
& CEPH_OSD_FLAG_ONDISK
));
2910 req
->r_result
= m
.result
?: data_len
;
2911 finish_request(req
);
2912 mutex_unlock(&osd
->lock
);
2913 up_read(&osdc
->lock
);
2915 __complete_request(req
);
2916 complete_all(&req
->r_completion
);
2917 ceph_osdc_put_request(req
);
2921 complete_request(req
, -EIO
);
2923 mutex_unlock(&osd
->lock
);
2925 up_read(&osdc
->lock
);
2928 static void set_pool_was_full(struct ceph_osd_client
*osdc
)
2932 for (n
= rb_first(&osdc
->osdmap
->pg_pools
); n
; n
= rb_next(n
)) {
2933 struct ceph_pg_pool_info
*pi
=
2934 rb_entry(n
, struct ceph_pg_pool_info
, node
);
2936 pi
->was_full
= __pool_full(pi
);
2940 static bool pool_cleared_full(struct ceph_osd_client
*osdc
, s64 pool_id
)
2942 struct ceph_pg_pool_info
*pi
;
2944 pi
= ceph_pg_pool_by_id(osdc
->osdmap
, pool_id
);
2948 return pi
->was_full
&& !__pool_full(pi
);
2951 static enum calc_target_result
2952 recalc_linger_target(struct ceph_osd_linger_request
*lreq
)
2954 struct ceph_osd_client
*osdc
= lreq
->osdc
;
2955 enum calc_target_result ct_res
;
2957 ct_res
= calc_target(osdc
, &lreq
->t
, &lreq
->last_force_resend
, true);
2958 if (ct_res
== CALC_TARGET_NEED_RESEND
) {
2959 struct ceph_osd
*osd
;
2961 osd
= lookup_create_osd(osdc
, lreq
->t
.osd
, true);
2962 if (osd
!= lreq
->osd
) {
2963 unlink_linger(lreq
->osd
, lreq
);
2964 link_linger(osd
, lreq
);
2972 * Requeue requests whose mapping to an OSD has changed.
2974 static void scan_requests(struct ceph_osd
*osd
,
2977 bool check_pool_cleared_full
,
2978 struct rb_root
*need_resend
,
2979 struct list_head
*need_resend_linger
)
2981 struct ceph_osd_client
*osdc
= osd
->o_osdc
;
2983 bool force_resend_writes
;
2985 for (n
= rb_first(&osd
->o_linger_requests
); n
; ) {
2986 struct ceph_osd_linger_request
*lreq
=
2987 rb_entry(n
, struct ceph_osd_linger_request
, node
);
2988 enum calc_target_result ct_res
;
2990 n
= rb_next(n
); /* recalc_linger_target() */
2992 dout("%s lreq %p linger_id %llu\n", __func__
, lreq
,
2994 ct_res
= recalc_linger_target(lreq
);
2996 case CALC_TARGET_NO_ACTION
:
2997 force_resend_writes
= cleared_full
||
2998 (check_pool_cleared_full
&&
2999 pool_cleared_full(osdc
, lreq
->t
.base_oloc
.pool
));
3000 if (!force_resend
&& !force_resend_writes
)
3004 case CALC_TARGET_NEED_RESEND
:
3005 cancel_linger_map_check(lreq
);
3007 * scan_requests() for the previous epoch(s)
3008 * may have already added it to the list, since
3009 * it's not unlinked here.
3011 if (list_empty(&lreq
->scan_item
))
3012 list_add_tail(&lreq
->scan_item
, need_resend_linger
);
3014 case CALC_TARGET_POOL_DNE
:
3015 check_linger_pool_dne(lreq
);
3020 for (n
= rb_first(&osd
->o_requests
); n
; ) {
3021 struct ceph_osd_request
*req
=
3022 rb_entry(n
, struct ceph_osd_request
, r_node
);
3023 enum calc_target_result ct_res
;
3025 n
= rb_next(n
); /* unlink_request(), check_pool_dne() */
3027 dout("%s req %p tid %llu\n", __func__
, req
, req
->r_tid
);
3028 ct_res
= calc_target(osdc
, &req
->r_t
,
3029 &req
->r_last_force_resend
, false);
3031 case CALC_TARGET_NO_ACTION
:
3032 force_resend_writes
= cleared_full
||
3033 (check_pool_cleared_full
&&
3034 pool_cleared_full(osdc
, req
->r_t
.base_oloc
.pool
));
3035 if (!force_resend
&&
3036 (!(req
->r_flags
& CEPH_OSD_FLAG_WRITE
) ||
3037 !force_resend_writes
))
3041 case CALC_TARGET_NEED_RESEND
:
3042 cancel_map_check(req
);
3043 unlink_request(osd
, req
);
3044 insert_request(need_resend
, req
);
3046 case CALC_TARGET_POOL_DNE
:
3047 check_pool_dne(req
);
3053 static int handle_one_map(struct ceph_osd_client
*osdc
,
3054 void *p
, void *end
, bool incremental
,
3055 struct rb_root
*need_resend
,
3056 struct list_head
*need_resend_linger
)
3058 struct ceph_osdmap
*newmap
;
3060 bool skipped_map
= false;
3063 was_full
= ceph_osdmap_flag(osdc
, CEPH_OSDMAP_FULL
);
3064 set_pool_was_full(osdc
);
3067 newmap
= osdmap_apply_incremental(&p
, end
, osdc
->osdmap
);
3069 newmap
= ceph_osdmap_decode(&p
, end
);
3071 return PTR_ERR(newmap
);
3073 if (newmap
!= osdc
->osdmap
) {
3075 * Preserve ->was_full before destroying the old map.
3076 * For pools that weren't in the old map, ->was_full
3079 for (n
= rb_first(&newmap
->pg_pools
); n
; n
= rb_next(n
)) {
3080 struct ceph_pg_pool_info
*pi
=
3081 rb_entry(n
, struct ceph_pg_pool_info
, node
);
3082 struct ceph_pg_pool_info
*old_pi
;
3084 old_pi
= ceph_pg_pool_by_id(osdc
->osdmap
, pi
->id
);
3086 pi
->was_full
= old_pi
->was_full
;
3088 WARN_ON(pi
->was_full
);
3091 if (osdc
->osdmap
->epoch
&&
3092 osdc
->osdmap
->epoch
+ 1 < newmap
->epoch
) {
3093 WARN_ON(incremental
);
3097 ceph_osdmap_destroy(osdc
->osdmap
);
3098 osdc
->osdmap
= newmap
;
3101 was_full
&= !ceph_osdmap_flag(osdc
, CEPH_OSDMAP_FULL
);
3102 scan_requests(&osdc
->homeless_osd
, skipped_map
, was_full
, true,
3103 need_resend
, need_resend_linger
);
3105 for (n
= rb_first(&osdc
->osds
); n
; ) {
3106 struct ceph_osd
*osd
= rb_entry(n
, struct ceph_osd
, o_node
);
3108 n
= rb_next(n
); /* close_osd() */
3110 scan_requests(osd
, skipped_map
, was_full
, true, need_resend
,
3111 need_resend_linger
);
3112 if (!ceph_osd_is_up(osdc
->osdmap
, osd
->o_osd
) ||
3113 memcmp(&osd
->o_con
.peer_addr
,
3114 ceph_osd_addr(osdc
->osdmap
, osd
->o_osd
),
3115 sizeof(struct ceph_entity_addr
)))
3122 static void kick_requests(struct ceph_osd_client
*osdc
,
3123 struct rb_root
*need_resend
,
3124 struct list_head
*need_resend_linger
)
3126 struct ceph_osd_linger_request
*lreq
, *nlreq
;
3129 for (n
= rb_first(need_resend
); n
; ) {
3130 struct ceph_osd_request
*req
=
3131 rb_entry(n
, struct ceph_osd_request
, r_node
);
3132 struct ceph_osd
*osd
;
3135 erase_request(need_resend
, req
); /* before link_request() */
3137 WARN_ON(req
->r_osd
);
3138 calc_target(osdc
, &req
->r_t
, NULL
, false);
3139 osd
= lookup_create_osd(osdc
, req
->r_t
.osd
, true);
3140 link_request(osd
, req
);
3141 if (!req
->r_linger
) {
3142 if (!osd_homeless(osd
) && !req
->r_t
.paused
)
3145 cancel_linger_request(req
);
3149 list_for_each_entry_safe(lreq
, nlreq
, need_resend_linger
, scan_item
) {
3150 if (!osd_homeless(lreq
->osd
))
3153 list_del_init(&lreq
->scan_item
);
3158 * Process updated osd map.
3160 * The message contains any number of incremental and full maps, normally
3161 * indicating some sort of topology change in the cluster. Kick requests
3162 * off to different OSDs as needed.
3164 void ceph_osdc_handle_map(struct ceph_osd_client
*osdc
, struct ceph_msg
*msg
)
3166 void *p
= msg
->front
.iov_base
;
3167 void *const end
= p
+ msg
->front
.iov_len
;
3168 u32 nr_maps
, maplen
;
3170 struct ceph_fsid fsid
;
3171 struct rb_root need_resend
= RB_ROOT
;
3172 LIST_HEAD(need_resend_linger
);
3173 bool handled_incremental
= false;
3174 bool was_pauserd
, was_pausewr
;
3175 bool pauserd
, pausewr
;
3178 dout("%s have %u\n", __func__
, osdc
->osdmap
->epoch
);
3179 down_write(&osdc
->lock
);
3182 ceph_decode_need(&p
, end
, sizeof(fsid
), bad
);
3183 ceph_decode_copy(&p
, &fsid
, sizeof(fsid
));
3184 if (ceph_check_fsid(osdc
->client
, &fsid
) < 0)
3187 was_pauserd
= ceph_osdmap_flag(osdc
, CEPH_OSDMAP_PAUSERD
);
3188 was_pausewr
= ceph_osdmap_flag(osdc
, CEPH_OSDMAP_PAUSEWR
) ||
3189 ceph_osdmap_flag(osdc
, CEPH_OSDMAP_FULL
) ||
3190 have_pool_full(osdc
);
3192 /* incremental maps */
3193 ceph_decode_32_safe(&p
, end
, nr_maps
, bad
);
3194 dout(" %d inc maps\n", nr_maps
);
3195 while (nr_maps
> 0) {
3196 ceph_decode_need(&p
, end
, 2*sizeof(u32
), bad
);
3197 epoch
= ceph_decode_32(&p
);
3198 maplen
= ceph_decode_32(&p
);
3199 ceph_decode_need(&p
, end
, maplen
, bad
);
3200 if (osdc
->osdmap
->epoch
&&
3201 osdc
->osdmap
->epoch
+ 1 == epoch
) {
3202 dout("applying incremental map %u len %d\n",
3204 err
= handle_one_map(osdc
, p
, p
+ maplen
, true,
3205 &need_resend
, &need_resend_linger
);
3208 handled_incremental
= true;
3210 dout("ignoring incremental map %u len %d\n",
3216 if (handled_incremental
)
3220 ceph_decode_32_safe(&p
, end
, nr_maps
, bad
);
3221 dout(" %d full maps\n", nr_maps
);
3223 ceph_decode_need(&p
, end
, 2*sizeof(u32
), bad
);
3224 epoch
= ceph_decode_32(&p
);
3225 maplen
= ceph_decode_32(&p
);
3226 ceph_decode_need(&p
, end
, maplen
, bad
);
3228 dout("skipping non-latest full map %u len %d\n",
3230 } else if (osdc
->osdmap
->epoch
>= epoch
) {
3231 dout("skipping full map %u len %d, "
3232 "older than our %u\n", epoch
, maplen
,
3233 osdc
->osdmap
->epoch
);
3235 dout("taking full map %u len %d\n", epoch
, maplen
);
3236 err
= handle_one_map(osdc
, p
, p
+ maplen
, false,
3237 &need_resend
, &need_resend_linger
);
3247 * subscribe to subsequent osdmap updates if full to ensure
3248 * we find out when we are no longer full and stop returning
3251 pauserd
= ceph_osdmap_flag(osdc
, CEPH_OSDMAP_PAUSERD
);
3252 pausewr
= ceph_osdmap_flag(osdc
, CEPH_OSDMAP_PAUSEWR
) ||
3253 ceph_osdmap_flag(osdc
, CEPH_OSDMAP_FULL
) ||
3254 have_pool_full(osdc
);
3255 if (was_pauserd
|| was_pausewr
|| pauserd
|| pausewr
)
3256 maybe_request_map(osdc
);
3258 kick_requests(osdc
, &need_resend
, &need_resend_linger
);
3260 ceph_monc_got_map(&osdc
->client
->monc
, CEPH_SUB_OSDMAP
,
3261 osdc
->osdmap
->epoch
);
3262 up_write(&osdc
->lock
);
3263 wake_up_all(&osdc
->client
->auth_wq
);
3267 pr_err("osdc handle_map corrupt msg\n");
3269 up_write(&osdc
->lock
);
3273 * Resubmit requests pending on the given osd.
3275 static void kick_osd_requests(struct ceph_osd
*osd
)
3279 for (n
= rb_first(&osd
->o_requests
); n
; ) {
3280 struct ceph_osd_request
*req
=
3281 rb_entry(n
, struct ceph_osd_request
, r_node
);
3283 n
= rb_next(n
); /* cancel_linger_request() */
3285 if (!req
->r_linger
) {
3286 if (!req
->r_t
.paused
)
3289 cancel_linger_request(req
);
3292 for (n
= rb_first(&osd
->o_linger_requests
); n
; n
= rb_next(n
)) {
3293 struct ceph_osd_linger_request
*lreq
=
3294 rb_entry(n
, struct ceph_osd_linger_request
, node
);
3301 * If the osd connection drops, we need to resubmit all requests.
3303 static void osd_fault(struct ceph_connection
*con
)
3305 struct ceph_osd
*osd
= con
->private;
3306 struct ceph_osd_client
*osdc
= osd
->o_osdc
;
3308 dout("%s osd %p osd%d\n", __func__
, osd
, osd
->o_osd
);
3310 down_write(&osdc
->lock
);
3311 if (!osd_registered(osd
)) {
3312 dout("%s osd%d unknown\n", __func__
, osd
->o_osd
);
3316 if (!reopen_osd(osd
))
3317 kick_osd_requests(osd
);
3318 maybe_request_map(osdc
);
3321 up_write(&osdc
->lock
);
3325 * Process osd watch notifications
3327 static void handle_watch_notify(struct ceph_osd_client
*osdc
,
3328 struct ceph_msg
*msg
)
3330 void *p
= msg
->front
.iov_base
;
3331 void *const end
= p
+ msg
->front
.iov_len
;
3332 struct ceph_osd_linger_request
*lreq
;
3333 struct linger_work
*lwork
;
3334 u8 proto_ver
, opcode
;
3335 u64 cookie
, notify_id
;
3336 u64 notifier_id
= 0;
3337 s32 return_code
= 0;
3338 void *payload
= NULL
;
3339 u32 payload_len
= 0;
3341 ceph_decode_8_safe(&p
, end
, proto_ver
, bad
);
3342 ceph_decode_8_safe(&p
, end
, opcode
, bad
);
3343 ceph_decode_64_safe(&p
, end
, cookie
, bad
);
3344 p
+= 8; /* skip ver */
3345 ceph_decode_64_safe(&p
, end
, notify_id
, bad
);
3347 if (proto_ver
>= 1) {
3348 ceph_decode_32_safe(&p
, end
, payload_len
, bad
);
3349 ceph_decode_need(&p
, end
, payload_len
, bad
);
3354 if (le16_to_cpu(msg
->hdr
.version
) >= 2)
3355 ceph_decode_32_safe(&p
, end
, return_code
, bad
);
3357 if (le16_to_cpu(msg
->hdr
.version
) >= 3)
3358 ceph_decode_64_safe(&p
, end
, notifier_id
, bad
);
3360 down_read(&osdc
->lock
);
3361 lreq
= lookup_linger_osdc(&osdc
->linger_requests
, cookie
);
3363 dout("%s opcode %d cookie %llu dne\n", __func__
, opcode
,
3365 goto out_unlock_osdc
;
3368 mutex_lock(&lreq
->lock
);
3369 dout("%s opcode %d cookie %llu lreq %p is_watch %d\n", __func__
,
3370 opcode
, cookie
, lreq
, lreq
->is_watch
);
3371 if (opcode
== CEPH_WATCH_EVENT_DISCONNECT
) {
3372 if (!lreq
->last_error
) {
3373 lreq
->last_error
= -ENOTCONN
;
3374 queue_watch_error(lreq
);
3376 } else if (!lreq
->is_watch
) {
3377 /* CEPH_WATCH_EVENT_NOTIFY_COMPLETE */
3378 if (lreq
->notify_id
&& lreq
->notify_id
!= notify_id
) {
3379 dout("lreq %p notify_id %llu != %llu, ignoring\n", lreq
,
3380 lreq
->notify_id
, notify_id
);
3381 } else if (!completion_done(&lreq
->notify_finish_wait
)) {
3382 struct ceph_msg_data
*data
=
3383 list_first_entry_or_null(&msg
->data
,
3384 struct ceph_msg_data
,
3388 if (lreq
->preply_pages
) {
3389 WARN_ON(data
->type
!=
3390 CEPH_MSG_DATA_PAGES
);
3391 *lreq
->preply_pages
= data
->pages
;
3392 *lreq
->preply_len
= data
->length
;
3394 ceph_release_page_vector(data
->pages
,
3395 calc_pages_for(0, data
->length
));
3398 lreq
->notify_finish_error
= return_code
;
3399 complete_all(&lreq
->notify_finish_wait
);
3402 /* CEPH_WATCH_EVENT_NOTIFY */
3403 lwork
= lwork_alloc(lreq
, do_watch_notify
);
3405 pr_err("failed to allocate notify-lwork\n");
3406 goto out_unlock_lreq
;
3409 lwork
->notify
.notify_id
= notify_id
;
3410 lwork
->notify
.notifier_id
= notifier_id
;
3411 lwork
->notify
.payload
= payload
;
3412 lwork
->notify
.payload_len
= payload_len
;
3413 lwork
->notify
.msg
= ceph_msg_get(msg
);
3418 mutex_unlock(&lreq
->lock
);
3420 up_read(&osdc
->lock
);
3424 pr_err("osdc handle_watch_notify corrupt msg\n");
3428 * Register request, send initial attempt.
3430 int ceph_osdc_start_request(struct ceph_osd_client
*osdc
,
3431 struct ceph_osd_request
*req
,
3434 down_read(&osdc
->lock
);
3435 submit_request(req
, false);
3436 up_read(&osdc
->lock
);
3440 EXPORT_SYMBOL(ceph_osdc_start_request
);
3443 * Unregister a registered request. The request is not completed:
3444 * ->r_result isn't set and __complete_request() isn't called.
3446 void ceph_osdc_cancel_request(struct ceph_osd_request
*req
)
3448 struct ceph_osd_client
*osdc
= req
->r_osdc
;
3450 down_write(&osdc
->lock
);
3452 cancel_request(req
);
3453 up_write(&osdc
->lock
);
3455 EXPORT_SYMBOL(ceph_osdc_cancel_request
);
3458 * @timeout: in jiffies, 0 means "wait forever"
3460 static int wait_request_timeout(struct ceph_osd_request
*req
,
3461 unsigned long timeout
)
3465 dout("%s req %p tid %llu\n", __func__
, req
, req
->r_tid
);
3466 left
= wait_for_completion_killable_timeout(&req
->r_completion
,
3467 ceph_timeout_jiffies(timeout
));
3469 left
= left
?: -ETIMEDOUT
;
3470 ceph_osdc_cancel_request(req
);
3472 left
= req
->r_result
; /* completed */
3479 * wait for a request to complete
3481 int ceph_osdc_wait_request(struct ceph_osd_client
*osdc
,
3482 struct ceph_osd_request
*req
)
3484 return wait_request_timeout(req
, 0);
3486 EXPORT_SYMBOL(ceph_osdc_wait_request
);
3489 * sync - wait for all in-flight requests to flush. avoid starvation.
3491 void ceph_osdc_sync(struct ceph_osd_client
*osdc
)
3493 struct rb_node
*n
, *p
;
3494 u64 last_tid
= atomic64_read(&osdc
->last_tid
);
3497 down_read(&osdc
->lock
);
3498 for (n
= rb_first(&osdc
->osds
); n
; n
= rb_next(n
)) {
3499 struct ceph_osd
*osd
= rb_entry(n
, struct ceph_osd
, o_node
);
3501 mutex_lock(&osd
->lock
);
3502 for (p
= rb_first(&osd
->o_requests
); p
; p
= rb_next(p
)) {
3503 struct ceph_osd_request
*req
=
3504 rb_entry(p
, struct ceph_osd_request
, r_node
);
3506 if (req
->r_tid
> last_tid
)
3509 if (!(req
->r_flags
& CEPH_OSD_FLAG_WRITE
))
3512 ceph_osdc_get_request(req
);
3513 mutex_unlock(&osd
->lock
);
3514 up_read(&osdc
->lock
);
3515 dout("%s waiting on req %p tid %llu last_tid %llu\n",
3516 __func__
, req
, req
->r_tid
, last_tid
);
3517 wait_for_completion(&req
->r_completion
);
3518 ceph_osdc_put_request(req
);
3522 mutex_unlock(&osd
->lock
);
3525 up_read(&osdc
->lock
);
3526 dout("%s done last_tid %llu\n", __func__
, last_tid
);
3528 EXPORT_SYMBOL(ceph_osdc_sync
);
3530 static struct ceph_osd_request
*
3531 alloc_linger_request(struct ceph_osd_linger_request
*lreq
)
3533 struct ceph_osd_request
*req
;
3535 req
= ceph_osdc_alloc_request(lreq
->osdc
, NULL
, 1, false, GFP_NOIO
);
3539 ceph_oid_copy(&req
->r_base_oid
, &lreq
->t
.base_oid
);
3540 ceph_oloc_copy(&req
->r_base_oloc
, &lreq
->t
.base_oloc
);
3542 if (ceph_osdc_alloc_messages(req
, GFP_NOIO
)) {
3543 ceph_osdc_put_request(req
);
3551 * Returns a handle, caller owns a ref.
3553 struct ceph_osd_linger_request
*
3554 ceph_osdc_watch(struct ceph_osd_client
*osdc
,
3555 struct ceph_object_id
*oid
,
3556 struct ceph_object_locator
*oloc
,
3557 rados_watchcb2_t wcb
,
3558 rados_watcherrcb_t errcb
,
3561 struct ceph_osd_linger_request
*lreq
;
3564 lreq
= linger_alloc(osdc
);
3566 return ERR_PTR(-ENOMEM
);
3568 lreq
->is_watch
= true;
3570 lreq
->errcb
= errcb
;
3572 lreq
->watch_valid_thru
= jiffies
;
3574 ceph_oid_copy(&lreq
->t
.base_oid
, oid
);
3575 ceph_oloc_copy(&lreq
->t
.base_oloc
, oloc
);
3576 lreq
->t
.flags
= CEPH_OSD_FLAG_WRITE
;
3577 lreq
->mtime
= CURRENT_TIME
;
3579 lreq
->reg_req
= alloc_linger_request(lreq
);
3580 if (!lreq
->reg_req
) {
3585 lreq
->ping_req
= alloc_linger_request(lreq
);
3586 if (!lreq
->ping_req
) {
3591 down_write(&osdc
->lock
);
3592 linger_register(lreq
); /* before osd_req_op_* */
3593 osd_req_op_watch_init(lreq
->reg_req
, 0, lreq
->linger_id
,
3594 CEPH_OSD_WATCH_OP_WATCH
);
3595 osd_req_op_watch_init(lreq
->ping_req
, 0, lreq
->linger_id
,
3596 CEPH_OSD_WATCH_OP_PING
);
3597 linger_submit(lreq
);
3598 up_write(&osdc
->lock
);
3600 ret
= linger_reg_commit_wait(lreq
);
3602 linger_cancel(lreq
);
3610 return ERR_PTR(ret
);
3612 EXPORT_SYMBOL(ceph_osdc_watch
);
3617 * Times out after mount_timeout to preserve rbd unmap behaviour
3618 * introduced in 2894e1d76974 ("rbd: timeout watch teardown on unmap
3619 * with mount_timeout").
3621 int ceph_osdc_unwatch(struct ceph_osd_client
*osdc
,
3622 struct ceph_osd_linger_request
*lreq
)
3624 struct ceph_options
*opts
= osdc
->client
->options
;
3625 struct ceph_osd_request
*req
;
3628 req
= ceph_osdc_alloc_request(osdc
, NULL
, 1, false, GFP_NOIO
);
3632 ceph_oid_copy(&req
->r_base_oid
, &lreq
->t
.base_oid
);
3633 ceph_oloc_copy(&req
->r_base_oloc
, &lreq
->t
.base_oloc
);
3634 req
->r_flags
= CEPH_OSD_FLAG_WRITE
;
3635 req
->r_mtime
= CURRENT_TIME
;
3636 osd_req_op_watch_init(req
, 0, lreq
->linger_id
,
3637 CEPH_OSD_WATCH_OP_UNWATCH
);
3639 ret
= ceph_osdc_alloc_messages(req
, GFP_NOIO
);
3643 ceph_osdc_start_request(osdc
, req
, false);
3644 linger_cancel(lreq
);
3646 ret
= wait_request_timeout(req
, opts
->mount_timeout
);
3649 ceph_osdc_put_request(req
);
3652 EXPORT_SYMBOL(ceph_osdc_unwatch
);
3654 static int osd_req_op_notify_ack_init(struct ceph_osd_request
*req
, int which
,
3655 u64 notify_id
, u64 cookie
, void *payload
,
3658 struct ceph_osd_req_op
*op
;
3659 struct ceph_pagelist
*pl
;
3662 op
= _osd_req_op_init(req
, which
, CEPH_OSD_OP_NOTIFY_ACK
, 0);
3664 pl
= kmalloc(sizeof(*pl
), GFP_NOIO
);
3668 ceph_pagelist_init(pl
);
3669 ret
= ceph_pagelist_encode_64(pl
, notify_id
);
3670 ret
|= ceph_pagelist_encode_64(pl
, cookie
);
3672 ret
|= ceph_pagelist_encode_32(pl
, payload_len
);
3673 ret
|= ceph_pagelist_append(pl
, payload
, payload_len
);
3675 ret
|= ceph_pagelist_encode_32(pl
, 0);
3678 ceph_pagelist_release(pl
);
3682 ceph_osd_data_pagelist_init(&op
->notify_ack
.request_data
, pl
);
3683 op
->indata_len
= pl
->length
;
3687 int ceph_osdc_notify_ack(struct ceph_osd_client
*osdc
,
3688 struct ceph_object_id
*oid
,
3689 struct ceph_object_locator
*oloc
,
3695 struct ceph_osd_request
*req
;
3698 req
= ceph_osdc_alloc_request(osdc
, NULL
, 1, false, GFP_NOIO
);
3702 ceph_oid_copy(&req
->r_base_oid
, oid
);
3703 ceph_oloc_copy(&req
->r_base_oloc
, oloc
);
3704 req
->r_flags
= CEPH_OSD_FLAG_READ
;
3706 ret
= ceph_osdc_alloc_messages(req
, GFP_NOIO
);
3710 ret
= osd_req_op_notify_ack_init(req
, 0, notify_id
, cookie
, payload
,
3715 ceph_osdc_start_request(osdc
, req
, false);
3716 ret
= ceph_osdc_wait_request(osdc
, req
);
3719 ceph_osdc_put_request(req
);
3722 EXPORT_SYMBOL(ceph_osdc_notify_ack
);
3724 static int osd_req_op_notify_init(struct ceph_osd_request
*req
, int which
,
3725 u64 cookie
, u32 prot_ver
, u32 timeout
,
3726 void *payload
, size_t payload_len
)
3728 struct ceph_osd_req_op
*op
;
3729 struct ceph_pagelist
*pl
;
3732 op
= _osd_req_op_init(req
, which
, CEPH_OSD_OP_NOTIFY
, 0);
3733 op
->notify
.cookie
= cookie
;
3735 pl
= kmalloc(sizeof(*pl
), GFP_NOIO
);
3739 ceph_pagelist_init(pl
);
3740 ret
= ceph_pagelist_encode_32(pl
, 1); /* prot_ver */
3741 ret
|= ceph_pagelist_encode_32(pl
, timeout
);
3742 ret
|= ceph_pagelist_encode_32(pl
, payload_len
);
3743 ret
|= ceph_pagelist_append(pl
, payload
, payload_len
);
3745 ceph_pagelist_release(pl
);
3749 ceph_osd_data_pagelist_init(&op
->notify
.request_data
, pl
);
3750 op
->indata_len
= pl
->length
;
3755 * @timeout: in seconds
3757 * @preply_{pages,len} are initialized both on success and error.
3758 * The caller is responsible for:
3760 * ceph_release_page_vector(reply_pages, calc_pages_for(0, reply_len))
3762 int ceph_osdc_notify(struct ceph_osd_client
*osdc
,
3763 struct ceph_object_id
*oid
,
3764 struct ceph_object_locator
*oloc
,
3768 struct page
***preply_pages
,
3771 struct ceph_osd_linger_request
*lreq
;
3772 struct page
**pages
;
3777 *preply_pages
= NULL
;
3781 lreq
= linger_alloc(osdc
);
3785 lreq
->preply_pages
= preply_pages
;
3786 lreq
->preply_len
= preply_len
;
3788 ceph_oid_copy(&lreq
->t
.base_oid
, oid
);
3789 ceph_oloc_copy(&lreq
->t
.base_oloc
, oloc
);
3790 lreq
->t
.flags
= CEPH_OSD_FLAG_READ
;
3792 lreq
->reg_req
= alloc_linger_request(lreq
);
3793 if (!lreq
->reg_req
) {
3799 pages
= ceph_alloc_page_vector(1, GFP_NOIO
);
3800 if (IS_ERR(pages
)) {
3801 ret
= PTR_ERR(pages
);
3805 down_write(&osdc
->lock
);
3806 linger_register(lreq
); /* before osd_req_op_* */
3807 ret
= osd_req_op_notify_init(lreq
->reg_req
, 0, lreq
->linger_id
, 1,
3808 timeout
, payload
, payload_len
);
3810 linger_unregister(lreq
);
3811 up_write(&osdc
->lock
);
3812 ceph_release_page_vector(pages
, 1);
3815 ceph_osd_data_pages_init(osd_req_op_data(lreq
->reg_req
, 0, notify
,
3817 pages
, PAGE_SIZE
, 0, false, true);
3818 linger_submit(lreq
);
3819 up_write(&osdc
->lock
);
3821 ret
= linger_reg_commit_wait(lreq
);
3823 ret
= linger_notify_finish_wait(lreq
);
3825 dout("lreq %p failed to initiate notify %d\n", lreq
, ret
);
3827 linger_cancel(lreq
);
3832 EXPORT_SYMBOL(ceph_osdc_notify
);
3835 * Return the number of milliseconds since the watch was last
3836 * confirmed, or an error. If there is an error, the watch is no
3837 * longer valid, and should be destroyed with ceph_osdc_unwatch().
3839 int ceph_osdc_watch_check(struct ceph_osd_client
*osdc
,
3840 struct ceph_osd_linger_request
*lreq
)
3842 unsigned long stamp
, age
;
3845 down_read(&osdc
->lock
);
3846 mutex_lock(&lreq
->lock
);
3847 stamp
= lreq
->watch_valid_thru
;
3848 if (!list_empty(&lreq
->pending_lworks
)) {
3849 struct linger_work
*lwork
=
3850 list_first_entry(&lreq
->pending_lworks
,
3854 if (time_before(lwork
->queued_stamp
, stamp
))
3855 stamp
= lwork
->queued_stamp
;
3857 age
= jiffies
- stamp
;
3858 dout("%s lreq %p linger_id %llu age %lu last_error %d\n", __func__
,
3859 lreq
, lreq
->linger_id
, age
, lreq
->last_error
);
3860 /* we are truncating to msecs, so return a safe upper bound */
3861 ret
= lreq
->last_error
?: 1 + jiffies_to_msecs(age
);
3863 mutex_unlock(&lreq
->lock
);
3864 up_read(&osdc
->lock
);
3868 static int decode_watcher(void **p
, void *end
, struct ceph_watch_item
*item
)
3874 ret
= ceph_start_decoding(p
, end
, 2, "watch_item_t",
3875 &struct_v
, &struct_len
);
3879 ceph_decode_copy(p
, &item
->name
, sizeof(item
->name
));
3880 item
->cookie
= ceph_decode_64(p
);
3881 *p
+= 4; /* skip timeout_seconds */
3882 if (struct_v
>= 2) {
3883 ceph_decode_copy(p
, &item
->addr
, sizeof(item
->addr
));
3884 ceph_decode_addr(&item
->addr
);
3887 dout("%s %s%llu cookie %llu addr %s\n", __func__
,
3888 ENTITY_NAME(item
->name
), item
->cookie
,
3889 ceph_pr_addr(&item
->addr
.in_addr
));
3893 static int decode_watchers(void **p
, void *end
,
3894 struct ceph_watch_item
**watchers
,
3902 ret
= ceph_start_decoding(p
, end
, 1, "obj_list_watch_response_t",
3903 &struct_v
, &struct_len
);
3907 *num_watchers
= ceph_decode_32(p
);
3908 *watchers
= kcalloc(*num_watchers
, sizeof(**watchers
), GFP_NOIO
);
3912 for (i
= 0; i
< *num_watchers
; i
++) {
3913 ret
= decode_watcher(p
, end
, *watchers
+ i
);
3924 * On success, the caller is responsible for:
3928 int ceph_osdc_list_watchers(struct ceph_osd_client
*osdc
,
3929 struct ceph_object_id
*oid
,
3930 struct ceph_object_locator
*oloc
,
3931 struct ceph_watch_item
**watchers
,
3934 struct ceph_osd_request
*req
;
3935 struct page
**pages
;
3938 req
= ceph_osdc_alloc_request(osdc
, NULL
, 1, false, GFP_NOIO
);
3942 ceph_oid_copy(&req
->r_base_oid
, oid
);
3943 ceph_oloc_copy(&req
->r_base_oloc
, oloc
);
3944 req
->r_flags
= CEPH_OSD_FLAG_READ
;
3946 ret
= ceph_osdc_alloc_messages(req
, GFP_NOIO
);
3950 pages
= ceph_alloc_page_vector(1, GFP_NOIO
);
3951 if (IS_ERR(pages
)) {
3952 ret
= PTR_ERR(pages
);
3956 osd_req_op_init(req
, 0, CEPH_OSD_OP_LIST_WATCHERS
, 0);
3957 ceph_osd_data_pages_init(osd_req_op_data(req
, 0, list_watchers
,
3959 pages
, PAGE_SIZE
, 0, false, true);
3961 ceph_osdc_start_request(osdc
, req
, false);
3962 ret
= ceph_osdc_wait_request(osdc
, req
);
3964 void *p
= page_address(pages
[0]);
3965 void *const end
= p
+ req
->r_ops
[0].outdata_len
;
3967 ret
= decode_watchers(&p
, end
, watchers
, num_watchers
);
3971 ceph_osdc_put_request(req
);
3974 EXPORT_SYMBOL(ceph_osdc_list_watchers
);
3977 * Call all pending notify callbacks - for use after a watch is
3978 * unregistered, to make sure no more callbacks for it will be invoked
3980 void ceph_osdc_flush_notifies(struct ceph_osd_client
*osdc
)
3982 dout("%s osdc %p\n", __func__
, osdc
);
3983 flush_workqueue(osdc
->notify_wq
);
3985 EXPORT_SYMBOL(ceph_osdc_flush_notifies
);
3987 void ceph_osdc_maybe_request_map(struct ceph_osd_client
*osdc
)
3989 down_read(&osdc
->lock
);
3990 maybe_request_map(osdc
);
3991 up_read(&osdc
->lock
);
3993 EXPORT_SYMBOL(ceph_osdc_maybe_request_map
);
3996 * Execute an OSD class method on an object.
3998 * @flags: CEPH_OSD_FLAG_*
3999 * @resp_len: in/out param for reply length
4001 int ceph_osdc_call(struct ceph_osd_client
*osdc
,
4002 struct ceph_object_id
*oid
,
4003 struct ceph_object_locator
*oloc
,
4004 const char *class, const char *method
,
4006 struct page
*req_page
, size_t req_len
,
4007 struct page
*resp_page
, size_t *resp_len
)
4009 struct ceph_osd_request
*req
;
4012 if (req_len
> PAGE_SIZE
|| (resp_page
&& *resp_len
> PAGE_SIZE
))
4015 req
= ceph_osdc_alloc_request(osdc
, NULL
, 1, false, GFP_NOIO
);
4019 ceph_oid_copy(&req
->r_base_oid
, oid
);
4020 ceph_oloc_copy(&req
->r_base_oloc
, oloc
);
4021 req
->r_flags
= flags
;
4023 ret
= ceph_osdc_alloc_messages(req
, GFP_NOIO
);
4027 osd_req_op_cls_init(req
, 0, CEPH_OSD_OP_CALL
, class, method
);
4029 osd_req_op_cls_request_data_pages(req
, 0, &req_page
, req_len
,
4032 osd_req_op_cls_response_data_pages(req
, 0, &resp_page
,
4033 *resp_len
, 0, false, false);
4035 ceph_osdc_start_request(osdc
, req
, false);
4036 ret
= ceph_osdc_wait_request(osdc
, req
);
4038 ret
= req
->r_ops
[0].rval
;
4040 *resp_len
= req
->r_ops
[0].outdata_len
;
4044 ceph_osdc_put_request(req
);
4047 EXPORT_SYMBOL(ceph_osdc_call
);
4052 int ceph_osdc_init(struct ceph_osd_client
*osdc
, struct ceph_client
*client
)
4057 osdc
->client
= client
;
4058 init_rwsem(&osdc
->lock
);
4059 osdc
->osds
= RB_ROOT
;
4060 INIT_LIST_HEAD(&osdc
->osd_lru
);
4061 spin_lock_init(&osdc
->osd_lru_lock
);
4062 osd_init(&osdc
->homeless_osd
);
4063 osdc
->homeless_osd
.o_osdc
= osdc
;
4064 osdc
->homeless_osd
.o_osd
= CEPH_HOMELESS_OSD
;
4065 osdc
->last_linger_id
= CEPH_LINGER_ID_START
;
4066 osdc
->linger_requests
= RB_ROOT
;
4067 osdc
->map_checks
= RB_ROOT
;
4068 osdc
->linger_map_checks
= RB_ROOT
;
4069 INIT_DELAYED_WORK(&osdc
->timeout_work
, handle_timeout
);
4070 INIT_DELAYED_WORK(&osdc
->osds_timeout_work
, handle_osds_timeout
);
4073 osdc
->osdmap
= ceph_osdmap_alloc();
4077 osdc
->req_mempool
= mempool_create_slab_pool(10,
4078 ceph_osd_request_cache
);
4079 if (!osdc
->req_mempool
)
4082 err
= ceph_msgpool_init(&osdc
->msgpool_op
, CEPH_MSG_OSD_OP
,
4083 PAGE_SIZE
, 10, true, "osd_op");
4086 err
= ceph_msgpool_init(&osdc
->msgpool_op_reply
, CEPH_MSG_OSD_OPREPLY
,
4087 PAGE_SIZE
, 10, true, "osd_op_reply");
4092 osdc
->notify_wq
= create_singlethread_workqueue("ceph-watch-notify");
4093 if (!osdc
->notify_wq
)
4094 goto out_msgpool_reply
;
4096 schedule_delayed_work(&osdc
->timeout_work
,
4097 osdc
->client
->options
->osd_keepalive_timeout
);
4098 schedule_delayed_work(&osdc
->osds_timeout_work
,
4099 round_jiffies_relative(osdc
->client
->options
->osd_idle_ttl
));
4104 ceph_msgpool_destroy(&osdc
->msgpool_op_reply
);
4106 ceph_msgpool_destroy(&osdc
->msgpool_op
);
4108 mempool_destroy(osdc
->req_mempool
);
4110 ceph_osdmap_destroy(osdc
->osdmap
);
4115 void ceph_osdc_stop(struct ceph_osd_client
*osdc
)
4117 flush_workqueue(osdc
->notify_wq
);
4118 destroy_workqueue(osdc
->notify_wq
);
4119 cancel_delayed_work_sync(&osdc
->timeout_work
);
4120 cancel_delayed_work_sync(&osdc
->osds_timeout_work
);
4122 down_write(&osdc
->lock
);
4123 while (!RB_EMPTY_ROOT(&osdc
->osds
)) {
4124 struct ceph_osd
*osd
= rb_entry(rb_first(&osdc
->osds
),
4125 struct ceph_osd
, o_node
);
4128 up_write(&osdc
->lock
);
4129 WARN_ON(atomic_read(&osdc
->homeless_osd
.o_ref
) != 1);
4130 osd_cleanup(&osdc
->homeless_osd
);
4132 WARN_ON(!list_empty(&osdc
->osd_lru
));
4133 WARN_ON(!RB_EMPTY_ROOT(&osdc
->linger_requests
));
4134 WARN_ON(!RB_EMPTY_ROOT(&osdc
->map_checks
));
4135 WARN_ON(!RB_EMPTY_ROOT(&osdc
->linger_map_checks
));
4136 WARN_ON(atomic_read(&osdc
->num_requests
));
4137 WARN_ON(atomic_read(&osdc
->num_homeless
));
4139 ceph_osdmap_destroy(osdc
->osdmap
);
4140 mempool_destroy(osdc
->req_mempool
);
4141 ceph_msgpool_destroy(&osdc
->msgpool_op
);
4142 ceph_msgpool_destroy(&osdc
->msgpool_op_reply
);
4146 * Read some contiguous pages. If we cross a stripe boundary, shorten
4147 * *plen. Return number of bytes read, or error.
4149 int ceph_osdc_readpages(struct ceph_osd_client
*osdc
,
4150 struct ceph_vino vino
, struct ceph_file_layout
*layout
,
4152 u32 truncate_seq
, u64 truncate_size
,
4153 struct page
**pages
, int num_pages
, int page_align
)
4155 struct ceph_osd_request
*req
;
4158 dout("readpages on ino %llx.%llx on %llu~%llu\n", vino
.ino
,
4159 vino
.snap
, off
, *plen
);
4160 req
= ceph_osdc_new_request(osdc
, layout
, vino
, off
, plen
, 0, 1,
4161 CEPH_OSD_OP_READ
, CEPH_OSD_FLAG_READ
,
4162 NULL
, truncate_seq
, truncate_size
,
4165 return PTR_ERR(req
);
4167 /* it may be a short read due to an object boundary */
4168 osd_req_op_extent_osd_data_pages(req
, 0,
4169 pages
, *plen
, page_align
, false, false);
4171 dout("readpages final extent is %llu~%llu (%llu bytes align %d)\n",
4172 off
, *plen
, *plen
, page_align
);
4174 rc
= ceph_osdc_start_request(osdc
, req
, false);
4176 rc
= ceph_osdc_wait_request(osdc
, req
);
4178 ceph_osdc_put_request(req
);
4179 dout("readpages result %d\n", rc
);
4182 EXPORT_SYMBOL(ceph_osdc_readpages
);
4185 * do a synchronous write on N pages
4187 int ceph_osdc_writepages(struct ceph_osd_client
*osdc
, struct ceph_vino vino
,
4188 struct ceph_file_layout
*layout
,
4189 struct ceph_snap_context
*snapc
,
4191 u32 truncate_seq
, u64 truncate_size
,
4192 struct timespec
*mtime
,
4193 struct page
**pages
, int num_pages
)
4195 struct ceph_osd_request
*req
;
4197 int page_align
= off
& ~PAGE_MASK
;
4199 req
= ceph_osdc_new_request(osdc
, layout
, vino
, off
, &len
, 0, 1,
4200 CEPH_OSD_OP_WRITE
, CEPH_OSD_FLAG_WRITE
,
4201 snapc
, truncate_seq
, truncate_size
,
4204 return PTR_ERR(req
);
4206 /* it may be a short write due to an object boundary */
4207 osd_req_op_extent_osd_data_pages(req
, 0, pages
, len
, page_align
,
4209 dout("writepages %llu~%llu (%llu bytes)\n", off
, len
, len
);
4211 req
->r_mtime
= *mtime
;
4212 rc
= ceph_osdc_start_request(osdc
, req
, true);
4214 rc
= ceph_osdc_wait_request(osdc
, req
);
4216 ceph_osdc_put_request(req
);
4219 dout("writepages result %d\n", rc
);
4222 EXPORT_SYMBOL(ceph_osdc_writepages
);
4224 int ceph_osdc_setup(void)
4226 size_t size
= sizeof(struct ceph_osd_request
) +
4227 CEPH_OSD_SLAB_OPS
* sizeof(struct ceph_osd_req_op
);
4229 BUG_ON(ceph_osd_request_cache
);
4230 ceph_osd_request_cache
= kmem_cache_create("ceph_osd_request", size
,
4233 return ceph_osd_request_cache
? 0 : -ENOMEM
;
4235 EXPORT_SYMBOL(ceph_osdc_setup
);
4237 void ceph_osdc_cleanup(void)
4239 BUG_ON(!ceph_osd_request_cache
);
4240 kmem_cache_destroy(ceph_osd_request_cache
);
4241 ceph_osd_request_cache
= NULL
;
4243 EXPORT_SYMBOL(ceph_osdc_cleanup
);
4246 * handle incoming message
4248 static void dispatch(struct ceph_connection
*con
, struct ceph_msg
*msg
)
4250 struct ceph_osd
*osd
= con
->private;
4251 struct ceph_osd_client
*osdc
= osd
->o_osdc
;
4252 int type
= le16_to_cpu(msg
->hdr
.type
);
4255 case CEPH_MSG_OSD_MAP
:
4256 ceph_osdc_handle_map(osdc
, msg
);
4258 case CEPH_MSG_OSD_OPREPLY
:
4259 handle_reply(osd
, msg
);
4261 case CEPH_MSG_WATCH_NOTIFY
:
4262 handle_watch_notify(osdc
, msg
);
4266 pr_err("received unknown message type %d %s\n", type
,
4267 ceph_msg_type_name(type
));
4274 * Lookup and return message for incoming reply. Don't try to do
4275 * anything about a larger than preallocated data portion of the
4276 * message at the moment - for now, just skip the message.
4278 static struct ceph_msg
*get_reply(struct ceph_connection
*con
,
4279 struct ceph_msg_header
*hdr
,
4282 struct ceph_osd
*osd
= con
->private;
4283 struct ceph_osd_client
*osdc
= osd
->o_osdc
;
4284 struct ceph_msg
*m
= NULL
;
4285 struct ceph_osd_request
*req
;
4286 int front_len
= le32_to_cpu(hdr
->front_len
);
4287 int data_len
= le32_to_cpu(hdr
->data_len
);
4288 u64 tid
= le64_to_cpu(hdr
->tid
);
4290 down_read(&osdc
->lock
);
4291 if (!osd_registered(osd
)) {
4292 dout("%s osd%d unknown, skipping\n", __func__
, osd
->o_osd
);
4294 goto out_unlock_osdc
;
4296 WARN_ON(osd
->o_osd
!= le64_to_cpu(hdr
->src
.num
));
4298 mutex_lock(&osd
->lock
);
4299 req
= lookup_request(&osd
->o_requests
, tid
);
4301 dout("%s osd%d tid %llu unknown, skipping\n", __func__
,
4304 goto out_unlock_session
;
4307 ceph_msg_revoke_incoming(req
->r_reply
);
4309 if (front_len
> req
->r_reply
->front_alloc_len
) {
4310 pr_warn("%s osd%d tid %llu front %d > preallocated %d\n",
4311 __func__
, osd
->o_osd
, req
->r_tid
, front_len
,
4312 req
->r_reply
->front_alloc_len
);
4313 m
= ceph_msg_new(CEPH_MSG_OSD_OPREPLY
, front_len
, GFP_NOFS
,
4316 goto out_unlock_session
;
4317 ceph_msg_put(req
->r_reply
);
4321 if (data_len
> req
->r_reply
->data_length
) {
4322 pr_warn("%s osd%d tid %llu data %d > preallocated %zu, skipping\n",
4323 __func__
, osd
->o_osd
, req
->r_tid
, data_len
,
4324 req
->r_reply
->data_length
);
4327 goto out_unlock_session
;
4330 m
= ceph_msg_get(req
->r_reply
);
4331 dout("get_reply tid %lld %p\n", tid
, m
);
4334 mutex_unlock(&osd
->lock
);
4336 up_read(&osdc
->lock
);
4341 * TODO: switch to a msg-owned pagelist
4343 static struct ceph_msg
*alloc_msg_with_page_vector(struct ceph_msg_header
*hdr
)
4346 int type
= le16_to_cpu(hdr
->type
);
4347 u32 front_len
= le32_to_cpu(hdr
->front_len
);
4348 u32 data_len
= le32_to_cpu(hdr
->data_len
);
4350 m
= ceph_msg_new(type
, front_len
, GFP_NOIO
, false);
4355 struct page
**pages
;
4356 struct ceph_osd_data osd_data
;
4358 pages
= ceph_alloc_page_vector(calc_pages_for(0, data_len
),
4360 if (IS_ERR(pages
)) {
4365 ceph_osd_data_pages_init(&osd_data
, pages
, data_len
, 0, false,
4367 ceph_osdc_msg_data_add(m
, &osd_data
);
4373 static struct ceph_msg
*alloc_msg(struct ceph_connection
*con
,
4374 struct ceph_msg_header
*hdr
,
4377 struct ceph_osd
*osd
= con
->private;
4378 int type
= le16_to_cpu(hdr
->type
);
4382 case CEPH_MSG_OSD_MAP
:
4383 case CEPH_MSG_WATCH_NOTIFY
:
4384 return alloc_msg_with_page_vector(hdr
);
4385 case CEPH_MSG_OSD_OPREPLY
:
4386 return get_reply(con
, hdr
, skip
);
4388 pr_warn("%s osd%d unknown msg type %d, skipping\n", __func__
,
4396 * Wrappers to refcount containing ceph_osd struct
4398 static struct ceph_connection
*get_osd_con(struct ceph_connection
*con
)
4400 struct ceph_osd
*osd
= con
->private;
4406 static void put_osd_con(struct ceph_connection
*con
)
4408 struct ceph_osd
*osd
= con
->private;
4416 * Note: returned pointer is the address of a structure that's
4417 * managed separately. Caller must *not* attempt to free it.
4419 static struct ceph_auth_handshake
*get_authorizer(struct ceph_connection
*con
,
4420 int *proto
, int force_new
)
4422 struct ceph_osd
*o
= con
->private;
4423 struct ceph_osd_client
*osdc
= o
->o_osdc
;
4424 struct ceph_auth_client
*ac
= osdc
->client
->monc
.auth
;
4425 struct ceph_auth_handshake
*auth
= &o
->o_auth
;
4427 if (force_new
&& auth
->authorizer
) {
4428 ceph_auth_destroy_authorizer(auth
->authorizer
);
4429 auth
->authorizer
= NULL
;
4431 if (!auth
->authorizer
) {
4432 int ret
= ceph_auth_create_authorizer(ac
, CEPH_ENTITY_TYPE_OSD
,
4435 return ERR_PTR(ret
);
4437 int ret
= ceph_auth_update_authorizer(ac
, CEPH_ENTITY_TYPE_OSD
,
4440 return ERR_PTR(ret
);
4442 *proto
= ac
->protocol
;
4448 static int verify_authorizer_reply(struct ceph_connection
*con
)
4450 struct ceph_osd
*o
= con
->private;
4451 struct ceph_osd_client
*osdc
= o
->o_osdc
;
4452 struct ceph_auth_client
*ac
= osdc
->client
->monc
.auth
;
4454 return ceph_auth_verify_authorizer_reply(ac
, o
->o_auth
.authorizer
);
4457 static int invalidate_authorizer(struct ceph_connection
*con
)
4459 struct ceph_osd
*o
= con
->private;
4460 struct ceph_osd_client
*osdc
= o
->o_osdc
;
4461 struct ceph_auth_client
*ac
= osdc
->client
->monc
.auth
;
4463 ceph_auth_invalidate_authorizer(ac
, CEPH_ENTITY_TYPE_OSD
);
4464 return ceph_monc_validate_auth(&osdc
->client
->monc
);
4467 static int osd_sign_message(struct ceph_msg
*msg
)
4469 struct ceph_osd
*o
= msg
->con
->private;
4470 struct ceph_auth_handshake
*auth
= &o
->o_auth
;
4472 return ceph_auth_sign_message(auth
, msg
);
4475 static int osd_check_message_signature(struct ceph_msg
*msg
)
4477 struct ceph_osd
*o
= msg
->con
->private;
4478 struct ceph_auth_handshake
*auth
= &o
->o_auth
;
4480 return ceph_auth_check_message_signature(auth
, msg
);
4483 static const struct ceph_connection_operations osd_con_ops
= {
4486 .dispatch
= dispatch
,
4487 .get_authorizer
= get_authorizer
,
4488 .verify_authorizer_reply
= verify_authorizer_reply
,
4489 .invalidate_authorizer
= invalidate_authorizer
,
4490 .alloc_msg
= alloc_msg
,
4491 .sign_message
= osd_sign_message
,
4492 .check_message_signature
= osd_check_message_signature
,