1 // SPDX-License-Identifier: GPL-2.0
3 #include <linux/ceph/ceph_debug.h>
5 #include <linux/module.h>
6 #include <linux/slab.h>
8 #include <linux/ceph/libceph.h>
9 #include <linux/ceph/osdmap.h>
10 #include <linux/ceph/decode.h>
11 #include <linux/crush/hash.h>
12 #include <linux/crush/mapper.h>
15 void osdmap_info(const struct ceph_osdmap
*map
, const char *fmt
, ...)
24 printk(KERN_INFO
"%s (%pU e%u): %pV", KBUILD_MODNAME
, &map
->fsid
,
30 char *ceph_osdmap_state_str(char *str
, int len
, u32 state
)
35 if ((state
& CEPH_OSD_EXISTS
) && (state
& CEPH_OSD_UP
))
36 snprintf(str
, len
, "exists, up");
37 else if (state
& CEPH_OSD_EXISTS
)
38 snprintf(str
, len
, "exists");
39 else if (state
& CEPH_OSD_UP
)
40 snprintf(str
, len
, "up");
42 snprintf(str
, len
, "doesn't exist");
49 static int calc_bits_of(unsigned int t
)
60 * the foo_mask is the smallest value 2^n-1 that is >= foo.
62 static void calc_pg_masks(struct ceph_pg_pool_info
*pi
)
64 pi
->pg_num_mask
= (1 << calc_bits_of(pi
->pg_num
-1)) - 1;
65 pi
->pgp_num_mask
= (1 << calc_bits_of(pi
->pgp_num
-1)) - 1;
71 static int crush_decode_uniform_bucket(void **p
, void *end
,
72 struct crush_bucket_uniform
*b
)
74 dout("crush_decode_uniform_bucket %p to %p\n", *p
, end
);
75 ceph_decode_need(p
, end
, (1+b
->h
.size
) * sizeof(u32
), bad
);
76 b
->item_weight
= ceph_decode_32(p
);
82 static int crush_decode_list_bucket(void **p
, void *end
,
83 struct crush_bucket_list
*b
)
86 dout("crush_decode_list_bucket %p to %p\n", *p
, end
);
87 b
->item_weights
= kcalloc(b
->h
.size
, sizeof(u32
), GFP_NOFS
);
88 if (b
->item_weights
== NULL
)
90 b
->sum_weights
= kcalloc(b
->h
.size
, sizeof(u32
), GFP_NOFS
);
91 if (b
->sum_weights
== NULL
)
93 ceph_decode_need(p
, end
, 2 * b
->h
.size
* sizeof(u32
), bad
);
94 for (j
= 0; j
< b
->h
.size
; j
++) {
95 b
->item_weights
[j
] = ceph_decode_32(p
);
96 b
->sum_weights
[j
] = ceph_decode_32(p
);
103 static int crush_decode_tree_bucket(void **p
, void *end
,
104 struct crush_bucket_tree
*b
)
107 dout("crush_decode_tree_bucket %p to %p\n", *p
, end
);
108 ceph_decode_8_safe(p
, end
, b
->num_nodes
, bad
);
109 b
->node_weights
= kcalloc(b
->num_nodes
, sizeof(u32
), GFP_NOFS
);
110 if (b
->node_weights
== NULL
)
112 ceph_decode_need(p
, end
, b
->num_nodes
* sizeof(u32
), bad
);
113 for (j
= 0; j
< b
->num_nodes
; j
++)
114 b
->node_weights
[j
] = ceph_decode_32(p
);
120 static int crush_decode_straw_bucket(void **p
, void *end
,
121 struct crush_bucket_straw
*b
)
124 dout("crush_decode_straw_bucket %p to %p\n", *p
, end
);
125 b
->item_weights
= kcalloc(b
->h
.size
, sizeof(u32
), GFP_NOFS
);
126 if (b
->item_weights
== NULL
)
128 b
->straws
= kcalloc(b
->h
.size
, sizeof(u32
), GFP_NOFS
);
129 if (b
->straws
== NULL
)
131 ceph_decode_need(p
, end
, 2 * b
->h
.size
* sizeof(u32
), bad
);
132 for (j
= 0; j
< b
->h
.size
; j
++) {
133 b
->item_weights
[j
] = ceph_decode_32(p
);
134 b
->straws
[j
] = ceph_decode_32(p
);
141 static int crush_decode_straw2_bucket(void **p
, void *end
,
142 struct crush_bucket_straw2
*b
)
145 dout("crush_decode_straw2_bucket %p to %p\n", *p
, end
);
146 b
->item_weights
= kcalloc(b
->h
.size
, sizeof(u32
), GFP_NOFS
);
147 if (b
->item_weights
== NULL
)
149 ceph_decode_need(p
, end
, b
->h
.size
* sizeof(u32
), bad
);
150 for (j
= 0; j
< b
->h
.size
; j
++)
151 b
->item_weights
[j
] = ceph_decode_32(p
);
157 struct crush_name_node
{
158 struct rb_node cn_node
;
163 static struct crush_name_node
*alloc_crush_name(size_t name_len
)
165 struct crush_name_node
*cn
;
167 cn
= kmalloc(sizeof(*cn
) + name_len
+ 1, GFP_NOIO
);
171 RB_CLEAR_NODE(&cn
->cn_node
);
175 static void free_crush_name(struct crush_name_node
*cn
)
177 WARN_ON(!RB_EMPTY_NODE(&cn
->cn_node
));
182 DEFINE_RB_FUNCS(crush_name
, struct crush_name_node
, cn_id
, cn_node
)
184 static int decode_crush_names(void **p
, void *end
, struct rb_root
*root
)
188 ceph_decode_32_safe(p
, end
, n
, e_inval
);
190 struct crush_name_node
*cn
;
194 ceph_decode_32_safe(p
, end
, id
, e_inval
);
195 ceph_decode_32_safe(p
, end
, name_len
, e_inval
);
196 ceph_decode_need(p
, end
, name_len
, e_inval
);
198 cn
= alloc_crush_name(name_len
);
203 memcpy(cn
->cn_name
, *p
, name_len
);
204 cn
->cn_name
[name_len
] = '\0';
207 if (!__insert_crush_name(root
, cn
)) {
219 void clear_crush_names(struct rb_root
*root
)
221 while (!RB_EMPTY_ROOT(root
)) {
222 struct crush_name_node
*cn
=
223 rb_entry(rb_first(root
), struct crush_name_node
, cn_node
);
225 erase_crush_name(root
, cn
);
230 static struct crush_choose_arg_map
*alloc_choose_arg_map(void)
232 struct crush_choose_arg_map
*arg_map
;
234 arg_map
= kzalloc(sizeof(*arg_map
), GFP_NOIO
);
238 RB_CLEAR_NODE(&arg_map
->node
);
242 static void free_choose_arg_map(struct crush_choose_arg_map
*arg_map
)
247 WARN_ON(!RB_EMPTY_NODE(&arg_map
->node
));
249 for (i
= 0; i
< arg_map
->size
; i
++) {
250 struct crush_choose_arg
*arg
= &arg_map
->args
[i
];
252 for (j
= 0; j
< arg
->weight_set_size
; j
++)
253 kfree(arg
->weight_set
[j
].weights
);
254 kfree(arg
->weight_set
);
257 kfree(arg_map
->args
);
262 DEFINE_RB_FUNCS(choose_arg_map
, struct crush_choose_arg_map
, choose_args_index
,
265 void clear_choose_args(struct crush_map
*c
)
267 while (!RB_EMPTY_ROOT(&c
->choose_args
)) {
268 struct crush_choose_arg_map
*arg_map
=
269 rb_entry(rb_first(&c
->choose_args
),
270 struct crush_choose_arg_map
, node
);
272 erase_choose_arg_map(&c
->choose_args
, arg_map
);
273 free_choose_arg_map(arg_map
);
277 static u32
*decode_array_32_alloc(void **p
, void *end
, u32
*plen
)
283 ceph_decode_32_safe(p
, end
, len
, e_inval
);
287 a
= kmalloc_array(len
, sizeof(u32
), GFP_NOIO
);
293 ceph_decode_need(p
, end
, len
* sizeof(u32
), e_inval
);
294 for (i
= 0; i
< len
; i
++)
295 a
[i
] = ceph_decode_32(p
);
309 * Assumes @arg is zero-initialized.
311 static int decode_choose_arg(void **p
, void *end
, struct crush_choose_arg
*arg
)
315 ceph_decode_32_safe(p
, end
, arg
->weight_set_size
, e_inval
);
316 if (arg
->weight_set_size
) {
319 arg
->weight_set
= kmalloc_array(arg
->weight_set_size
,
320 sizeof(*arg
->weight_set
),
322 if (!arg
->weight_set
)
325 for (i
= 0; i
< arg
->weight_set_size
; i
++) {
326 struct crush_weight_set
*w
= &arg
->weight_set
[i
];
328 w
->weights
= decode_array_32_alloc(p
, end
, &w
->size
);
329 if (IS_ERR(w
->weights
)) {
330 ret
= PTR_ERR(w
->weights
);
337 arg
->ids
= decode_array_32_alloc(p
, end
, &arg
->ids_size
);
338 if (IS_ERR(arg
->ids
)) {
339 ret
= PTR_ERR(arg
->ids
);
350 static int decode_choose_args(void **p
, void *end
, struct crush_map
*c
)
352 struct crush_choose_arg_map
*arg_map
= NULL
;
353 u32 num_choose_arg_maps
, num_buckets
;
356 ceph_decode_32_safe(p
, end
, num_choose_arg_maps
, e_inval
);
357 while (num_choose_arg_maps
--) {
358 arg_map
= alloc_choose_arg_map();
364 ceph_decode_64_safe(p
, end
, arg_map
->choose_args_index
,
366 arg_map
->size
= c
->max_buckets
;
367 arg_map
->args
= kcalloc(arg_map
->size
, sizeof(*arg_map
->args
),
369 if (!arg_map
->args
) {
374 ceph_decode_32_safe(p
, end
, num_buckets
, e_inval
);
375 while (num_buckets
--) {
376 struct crush_choose_arg
*arg
;
379 ceph_decode_32_safe(p
, end
, bucket_index
, e_inval
);
380 if (bucket_index
>= arg_map
->size
)
383 arg
= &arg_map
->args
[bucket_index
];
384 ret
= decode_choose_arg(p
, end
, arg
);
389 arg
->ids_size
!= c
->buckets
[bucket_index
]->size
)
393 insert_choose_arg_map(&c
->choose_args
, arg_map
);
401 free_choose_arg_map(arg_map
);
405 static void crush_finalize(struct crush_map
*c
)
409 /* Space for the array of pointers to per-bucket workspace */
410 c
->working_size
= sizeof(struct crush_work
) +
411 c
->max_buckets
* sizeof(struct crush_work_bucket
*);
413 for (b
= 0; b
< c
->max_buckets
; b
++) {
417 switch (c
->buckets
[b
]->alg
) {
420 * The base case, permutation variables and
421 * the pointer to the permutation array.
423 c
->working_size
+= sizeof(struct crush_work_bucket
);
426 /* Every bucket has a permutation array. */
427 c
->working_size
+= c
->buckets
[b
]->size
* sizeof(__u32
);
431 static struct crush_map
*crush_decode(void *pbyval
, void *end
)
437 void *start
= pbyval
;
440 dout("crush_decode %p to %p len %d\n", *p
, end
, (int)(end
- *p
));
442 c
= kzalloc(sizeof(*c
), GFP_NOFS
);
444 return ERR_PTR(-ENOMEM
);
446 c
->type_names
= RB_ROOT
;
448 c
->choose_args
= RB_ROOT
;
450 /* set tunables to default values */
451 c
->choose_local_tries
= 2;
452 c
->choose_local_fallback_tries
= 5;
453 c
->choose_total_tries
= 19;
454 c
->chooseleaf_descend_once
= 0;
456 ceph_decode_need(p
, end
, 4*sizeof(u32
), bad
);
457 magic
= ceph_decode_32(p
);
458 if (magic
!= CRUSH_MAGIC
) {
459 pr_err("crush_decode magic %x != current %x\n",
460 (unsigned int)magic
, (unsigned int)CRUSH_MAGIC
);
463 c
->max_buckets
= ceph_decode_32(p
);
464 c
->max_rules
= ceph_decode_32(p
);
465 c
->max_devices
= ceph_decode_32(p
);
467 c
->buckets
= kcalloc(c
->max_buckets
, sizeof(*c
->buckets
), GFP_NOFS
);
468 if (c
->buckets
== NULL
)
470 c
->rules
= kcalloc(c
->max_rules
, sizeof(*c
->rules
), GFP_NOFS
);
471 if (c
->rules
== NULL
)
475 for (i
= 0; i
< c
->max_buckets
; i
++) {
478 struct crush_bucket
*b
;
480 ceph_decode_32_safe(p
, end
, alg
, bad
);
482 c
->buckets
[i
] = NULL
;
485 dout("crush_decode bucket %d off %x %p to %p\n",
486 i
, (int)(*p
-start
), *p
, end
);
489 case CRUSH_BUCKET_UNIFORM
:
490 size
= sizeof(struct crush_bucket_uniform
);
492 case CRUSH_BUCKET_LIST
:
493 size
= sizeof(struct crush_bucket_list
);
495 case CRUSH_BUCKET_TREE
:
496 size
= sizeof(struct crush_bucket_tree
);
498 case CRUSH_BUCKET_STRAW
:
499 size
= sizeof(struct crush_bucket_straw
);
501 case CRUSH_BUCKET_STRAW2
:
502 size
= sizeof(struct crush_bucket_straw2
);
508 b
= c
->buckets
[i
] = kzalloc(size
, GFP_NOFS
);
512 ceph_decode_need(p
, end
, 4*sizeof(u32
), bad
);
513 b
->id
= ceph_decode_32(p
);
514 b
->type
= ceph_decode_16(p
);
515 b
->alg
= ceph_decode_8(p
);
516 b
->hash
= ceph_decode_8(p
);
517 b
->weight
= ceph_decode_32(p
);
518 b
->size
= ceph_decode_32(p
);
520 dout("crush_decode bucket size %d off %x %p to %p\n",
521 b
->size
, (int)(*p
-start
), *p
, end
);
523 b
->items
= kcalloc(b
->size
, sizeof(__s32
), GFP_NOFS
);
524 if (b
->items
== NULL
)
527 ceph_decode_need(p
, end
, b
->size
*sizeof(u32
), bad
);
528 for (j
= 0; j
< b
->size
; j
++)
529 b
->items
[j
] = ceph_decode_32(p
);
532 case CRUSH_BUCKET_UNIFORM
:
533 err
= crush_decode_uniform_bucket(p
, end
,
534 (struct crush_bucket_uniform
*)b
);
538 case CRUSH_BUCKET_LIST
:
539 err
= crush_decode_list_bucket(p
, end
,
540 (struct crush_bucket_list
*)b
);
544 case CRUSH_BUCKET_TREE
:
545 err
= crush_decode_tree_bucket(p
, end
,
546 (struct crush_bucket_tree
*)b
);
550 case CRUSH_BUCKET_STRAW
:
551 err
= crush_decode_straw_bucket(p
, end
,
552 (struct crush_bucket_straw
*)b
);
556 case CRUSH_BUCKET_STRAW2
:
557 err
= crush_decode_straw2_bucket(p
, end
,
558 (struct crush_bucket_straw2
*)b
);
566 dout("rule vec is %p\n", c
->rules
);
567 for (i
= 0; i
< c
->max_rules
; i
++) {
569 struct crush_rule
*r
;
571 ceph_decode_32_safe(p
, end
, yes
, bad
);
573 dout("crush_decode NO rule %d off %x %p to %p\n",
574 i
, (int)(*p
-start
), *p
, end
);
579 dout("crush_decode rule %d off %x %p to %p\n",
580 i
, (int)(*p
-start
), *p
, end
);
583 ceph_decode_32_safe(p
, end
, yes
, bad
);
584 #if BITS_PER_LONG == 32
585 if (yes
> (ULONG_MAX
- sizeof(*r
))
586 / sizeof(struct crush_rule_step
))
589 r
= kmalloc(struct_size(r
, steps
, yes
), GFP_NOFS
);
592 dout(" rule %d is at %p\n", i
, r
);
595 ceph_decode_copy_safe(p
, end
, &r
->mask
, 4, bad
); /* 4 u8's */
596 ceph_decode_need(p
, end
, r
->len
*3*sizeof(u32
), bad
);
597 for (j
= 0; j
< r
->len
; j
++) {
598 r
->steps
[j
].op
= ceph_decode_32(p
);
599 r
->steps
[j
].arg1
= ceph_decode_32(p
);
600 r
->steps
[j
].arg2
= ceph_decode_32(p
);
604 err
= decode_crush_names(p
, end
, &c
->type_names
);
608 err
= decode_crush_names(p
, end
, &c
->names
);
612 ceph_decode_skip_map(p
, end
, 32, string
, bad
); /* rule_name_map */
615 ceph_decode_need(p
, end
, 3*sizeof(u32
), done
);
616 c
->choose_local_tries
= ceph_decode_32(p
);
617 c
->choose_local_fallback_tries
= ceph_decode_32(p
);
618 c
->choose_total_tries
= ceph_decode_32(p
);
619 dout("crush decode tunable choose_local_tries = %d\n",
620 c
->choose_local_tries
);
621 dout("crush decode tunable choose_local_fallback_tries = %d\n",
622 c
->choose_local_fallback_tries
);
623 dout("crush decode tunable choose_total_tries = %d\n",
624 c
->choose_total_tries
);
626 ceph_decode_need(p
, end
, sizeof(u32
), done
);
627 c
->chooseleaf_descend_once
= ceph_decode_32(p
);
628 dout("crush decode tunable chooseleaf_descend_once = %d\n",
629 c
->chooseleaf_descend_once
);
631 ceph_decode_need(p
, end
, sizeof(u8
), done
);
632 c
->chooseleaf_vary_r
= ceph_decode_8(p
);
633 dout("crush decode tunable chooseleaf_vary_r = %d\n",
634 c
->chooseleaf_vary_r
);
636 /* skip straw_calc_version, allowed_bucket_algs */
637 ceph_decode_need(p
, end
, sizeof(u8
) + sizeof(u32
), done
);
638 *p
+= sizeof(u8
) + sizeof(u32
);
640 ceph_decode_need(p
, end
, sizeof(u8
), done
);
641 c
->chooseleaf_stable
= ceph_decode_8(p
);
642 dout("crush decode tunable chooseleaf_stable = %d\n",
643 c
->chooseleaf_stable
);
647 ceph_decode_skip_map(p
, end
, 32, 32, bad
);
649 ceph_decode_skip_map(p
, end
, 32, string
, bad
);
651 ceph_decode_skip_map_of_map(p
, end
, 32, 32, 32, bad
);
655 err
= decode_choose_args(p
, end
, c
);
662 dout("crush_decode success\n");
668 dout("crush_decode fail %d\n", err
);
677 int ceph_pg_compare(const struct ceph_pg
*lhs
, const struct ceph_pg
*rhs
)
679 if (lhs
->pool
< rhs
->pool
)
681 if (lhs
->pool
> rhs
->pool
)
683 if (lhs
->seed
< rhs
->seed
)
685 if (lhs
->seed
> rhs
->seed
)
691 int ceph_spg_compare(const struct ceph_spg
*lhs
, const struct ceph_spg
*rhs
)
695 ret
= ceph_pg_compare(&lhs
->pgid
, &rhs
->pgid
);
699 if (lhs
->shard
< rhs
->shard
)
701 if (lhs
->shard
> rhs
->shard
)
707 static struct ceph_pg_mapping
*alloc_pg_mapping(size_t payload_len
)
709 struct ceph_pg_mapping
*pg
;
711 pg
= kmalloc(sizeof(*pg
) + payload_len
, GFP_NOIO
);
715 RB_CLEAR_NODE(&pg
->node
);
719 static void free_pg_mapping(struct ceph_pg_mapping
*pg
)
721 WARN_ON(!RB_EMPTY_NODE(&pg
->node
));
727 * rbtree of pg_mapping for handling pg_temp (explicit mapping of pgid
728 * to a set of osds) and primary_temp (explicit primary setting)
730 DEFINE_RB_FUNCS2(pg_mapping
, struct ceph_pg_mapping
, pgid
, ceph_pg_compare
,
731 RB_BYPTR
, const struct ceph_pg
*, node
)
734 * rbtree of pg pool info
736 DEFINE_RB_FUNCS(pg_pool
, struct ceph_pg_pool_info
, id
, node
)
738 struct ceph_pg_pool_info
*ceph_pg_pool_by_id(struct ceph_osdmap
*map
, u64 id
)
740 return lookup_pg_pool(&map
->pg_pools
, id
);
743 const char *ceph_pg_pool_name_by_id(struct ceph_osdmap
*map
, u64 id
)
745 struct ceph_pg_pool_info
*pi
;
747 if (id
== CEPH_NOPOOL
)
750 if (WARN_ON_ONCE(id
> (u64
) INT_MAX
))
753 pi
= lookup_pg_pool(&map
->pg_pools
, id
);
754 return pi
? pi
->name
: NULL
;
756 EXPORT_SYMBOL(ceph_pg_pool_name_by_id
);
758 int ceph_pg_poolid_by_name(struct ceph_osdmap
*map
, const char *name
)
762 for (rbp
= rb_first(&map
->pg_pools
); rbp
; rbp
= rb_next(rbp
)) {
763 struct ceph_pg_pool_info
*pi
=
764 rb_entry(rbp
, struct ceph_pg_pool_info
, node
);
765 if (pi
->name
&& strcmp(pi
->name
, name
) == 0)
770 EXPORT_SYMBOL(ceph_pg_poolid_by_name
);
772 u64
ceph_pg_pool_flags(struct ceph_osdmap
*map
, u64 id
)
774 struct ceph_pg_pool_info
*pi
;
776 pi
= lookup_pg_pool(&map
->pg_pools
, id
);
777 return pi
? pi
->flags
: 0;
779 EXPORT_SYMBOL(ceph_pg_pool_flags
);
781 static void __remove_pg_pool(struct rb_root
*root
, struct ceph_pg_pool_info
*pi
)
783 erase_pg_pool(root
, pi
);
788 static int decode_pool(void **p
, void *end
, struct ceph_pg_pool_info
*pi
)
794 ceph_decode_need(p
, end
, 2 + 4, bad
);
795 ev
= ceph_decode_8(p
); /* encoding version */
796 cv
= ceph_decode_8(p
); /* compat version */
798 pr_warn("got v %d < 5 cv %d of ceph_pg_pool\n", ev
, cv
);
802 pr_warn("got v %d cv %d > 9 of ceph_pg_pool\n", ev
, cv
);
805 len
= ceph_decode_32(p
);
806 ceph_decode_need(p
, end
, len
, bad
);
809 pi
->type
= ceph_decode_8(p
);
810 pi
->size
= ceph_decode_8(p
);
811 pi
->crush_ruleset
= ceph_decode_8(p
);
812 pi
->object_hash
= ceph_decode_8(p
);
814 pi
->pg_num
= ceph_decode_32(p
);
815 pi
->pgp_num
= ceph_decode_32(p
);
817 *p
+= 4 + 4; /* skip lpg* */
818 *p
+= 4; /* skip last_change */
819 *p
+= 8 + 4; /* skip snap_seq, snap_epoch */
822 num
= ceph_decode_32(p
);
824 *p
+= 8; /* snapid key */
825 *p
+= 1 + 1; /* versions */
826 len
= ceph_decode_32(p
);
830 /* skip removed_snaps */
831 num
= ceph_decode_32(p
);
834 *p
+= 8; /* skip auid */
835 pi
->flags
= ceph_decode_64(p
);
836 *p
+= 4; /* skip crash_replay_interval */
839 pi
->min_size
= ceph_decode_8(p
);
841 pi
->min_size
= pi
->size
- pi
->size
/ 2;
844 *p
+= 8 + 8; /* skip quota_max_* */
848 num
= ceph_decode_32(p
);
851 *p
+= 8; /* skip tier_of */
852 *p
+= 1; /* skip cache_mode */
854 pi
->read_tier
= ceph_decode_64(p
);
855 pi
->write_tier
= ceph_decode_64(p
);
862 /* skip properties */
863 num
= ceph_decode_32(p
);
865 len
= ceph_decode_32(p
);
867 len
= ceph_decode_32(p
);
873 /* skip hit_set_params */
874 *p
+= 1 + 1; /* versions */
875 len
= ceph_decode_32(p
);
878 *p
+= 4; /* skip hit_set_period */
879 *p
+= 4; /* skip hit_set_count */
883 *p
+= 4; /* skip stripe_width */
886 *p
+= 8; /* skip target_max_bytes */
887 *p
+= 8; /* skip target_max_objects */
888 *p
+= 4; /* skip cache_target_dirty_ratio_micro */
889 *p
+= 4; /* skip cache_target_full_ratio_micro */
890 *p
+= 4; /* skip cache_min_flush_age */
891 *p
+= 4; /* skip cache_min_evict_age */
895 /* skip erasure_code_profile */
896 len
= ceph_decode_32(p
);
901 * last_force_op_resend_preluminous, will be overridden if the
902 * map was encoded with RESEND_ON_SPLIT
905 pi
->last_force_request_resend
= ceph_decode_32(p
);
907 pi
->last_force_request_resend
= 0;
910 *p
+= 4; /* skip min_read_recency_for_promote */
913 *p
+= 8; /* skip expected_num_objects */
916 *p
+= 4; /* skip cache_target_dirty_high_ratio_micro */
919 *p
+= 4; /* skip min_write_recency_for_promote */
922 *p
+= 1; /* skip use_gmt_hitset */
925 *p
+= 1; /* skip fast_read */
928 *p
+= 4; /* skip hit_set_grade_decay_rate */
929 *p
+= 4; /* skip hit_set_search_last_n */
934 *p
+= 1 + 1; /* versions */
935 len
= ceph_decode_32(p
);
940 pi
->last_force_request_resend
= ceph_decode_32(p
);
942 /* ignore the rest */
952 static int decode_pool_names(void **p
, void *end
, struct ceph_osdmap
*map
)
954 struct ceph_pg_pool_info
*pi
;
958 ceph_decode_32_safe(p
, end
, num
, bad
);
959 dout(" %d pool names\n", num
);
961 ceph_decode_64_safe(p
, end
, pool
, bad
);
962 ceph_decode_32_safe(p
, end
, len
, bad
);
963 dout(" pool %llu len %d\n", pool
, len
);
964 ceph_decode_need(p
, end
, len
, bad
);
965 pi
= lookup_pg_pool(&map
->pg_pools
, pool
);
967 char *name
= kstrndup(*p
, len
, GFP_NOFS
);
973 dout(" name is %s\n", pi
->name
);
986 * workspace_manager framework borrowed from fs/btrfs/compression.c.
987 * Two simplifications: there is only one type of workspace and there
988 * is always at least one workspace.
990 static struct crush_work
*alloc_workspace(const struct crush_map
*c
)
992 struct crush_work
*work
;
995 WARN_ON(!c
->working_size
);
996 work_size
= crush_work_size(c
, CEPH_PG_MAX_SIZE
);
997 dout("%s work_size %zu bytes\n", __func__
, work_size
);
999 work
= kvmalloc(work_size
, GFP_NOIO
);
1003 INIT_LIST_HEAD(&work
->item
);
1004 crush_init_workspace(c
, work
);
1008 static void free_workspace(struct crush_work
*work
)
1010 WARN_ON(!list_empty(&work
->item
));
1014 static void init_workspace_manager(struct workspace_manager
*wsm
)
1016 INIT_LIST_HEAD(&wsm
->idle_ws
);
1017 spin_lock_init(&wsm
->ws_lock
);
1018 atomic_set(&wsm
->total_ws
, 0);
1020 init_waitqueue_head(&wsm
->ws_wait
);
1023 static void add_initial_workspace(struct workspace_manager
*wsm
,
1024 struct crush_work
*work
)
1026 WARN_ON(!list_empty(&wsm
->idle_ws
));
1028 list_add(&work
->item
, &wsm
->idle_ws
);
1029 atomic_set(&wsm
->total_ws
, 1);
1033 static void cleanup_workspace_manager(struct workspace_manager
*wsm
)
1035 struct crush_work
*work
;
1037 while (!list_empty(&wsm
->idle_ws
)) {
1038 work
= list_first_entry(&wsm
->idle_ws
, struct crush_work
,
1040 list_del_init(&work
->item
);
1041 free_workspace(work
);
1043 atomic_set(&wsm
->total_ws
, 0);
1048 * Finds an available workspace or allocates a new one. If it's not
1049 * possible to allocate a new one, waits until there is one.
1051 static struct crush_work
*get_workspace(struct workspace_manager
*wsm
,
1052 const struct crush_map
*c
)
1054 struct crush_work
*work
;
1055 int cpus
= num_online_cpus();
1058 spin_lock(&wsm
->ws_lock
);
1059 if (!list_empty(&wsm
->idle_ws
)) {
1060 work
= list_first_entry(&wsm
->idle_ws
, struct crush_work
,
1062 list_del_init(&work
->item
);
1064 spin_unlock(&wsm
->ws_lock
);
1068 if (atomic_read(&wsm
->total_ws
) > cpus
) {
1071 spin_unlock(&wsm
->ws_lock
);
1072 prepare_to_wait(&wsm
->ws_wait
, &wait
, TASK_UNINTERRUPTIBLE
);
1073 if (atomic_read(&wsm
->total_ws
) > cpus
&& !wsm
->free_ws
)
1075 finish_wait(&wsm
->ws_wait
, &wait
);
1078 atomic_inc(&wsm
->total_ws
);
1079 spin_unlock(&wsm
->ws_lock
);
1081 work
= alloc_workspace(c
);
1083 atomic_dec(&wsm
->total_ws
);
1084 wake_up(&wsm
->ws_wait
);
1087 * Do not return the error but go back to waiting. We
1088 * have the initial workspace and the CRUSH computation
1089 * time is bounded so we will get it eventually.
1091 WARN_ON(atomic_read(&wsm
->total_ws
) < 1);
1098 * Puts a workspace back on the list or frees it if we have enough
1099 * idle ones sitting around.
1101 static void put_workspace(struct workspace_manager
*wsm
,
1102 struct crush_work
*work
)
1104 spin_lock(&wsm
->ws_lock
);
1105 if (wsm
->free_ws
<= num_online_cpus()) {
1106 list_add(&work
->item
, &wsm
->idle_ws
);
1108 spin_unlock(&wsm
->ws_lock
);
1111 spin_unlock(&wsm
->ws_lock
);
1113 free_workspace(work
);
1114 atomic_dec(&wsm
->total_ws
);
1116 if (wq_has_sleeper(&wsm
->ws_wait
))
1117 wake_up(&wsm
->ws_wait
);
1123 struct ceph_osdmap
*ceph_osdmap_alloc(void)
1125 struct ceph_osdmap
*map
;
1127 map
= kzalloc(sizeof(*map
), GFP_NOIO
);
1131 map
->pg_pools
= RB_ROOT
;
1133 map
->pg_temp
= RB_ROOT
;
1134 map
->primary_temp
= RB_ROOT
;
1135 map
->pg_upmap
= RB_ROOT
;
1136 map
->pg_upmap_items
= RB_ROOT
;
1138 init_workspace_manager(&map
->crush_wsm
);
1143 void ceph_osdmap_destroy(struct ceph_osdmap
*map
)
1145 dout("osdmap_destroy %p\n", map
);
1148 crush_destroy(map
->crush
);
1149 cleanup_workspace_manager(&map
->crush_wsm
);
1151 while (!RB_EMPTY_ROOT(&map
->pg_temp
)) {
1152 struct ceph_pg_mapping
*pg
=
1153 rb_entry(rb_first(&map
->pg_temp
),
1154 struct ceph_pg_mapping
, node
);
1155 erase_pg_mapping(&map
->pg_temp
, pg
);
1156 free_pg_mapping(pg
);
1158 while (!RB_EMPTY_ROOT(&map
->primary_temp
)) {
1159 struct ceph_pg_mapping
*pg
=
1160 rb_entry(rb_first(&map
->primary_temp
),
1161 struct ceph_pg_mapping
, node
);
1162 erase_pg_mapping(&map
->primary_temp
, pg
);
1163 free_pg_mapping(pg
);
1165 while (!RB_EMPTY_ROOT(&map
->pg_upmap
)) {
1166 struct ceph_pg_mapping
*pg
=
1167 rb_entry(rb_first(&map
->pg_upmap
),
1168 struct ceph_pg_mapping
, node
);
1169 rb_erase(&pg
->node
, &map
->pg_upmap
);
1172 while (!RB_EMPTY_ROOT(&map
->pg_upmap_items
)) {
1173 struct ceph_pg_mapping
*pg
=
1174 rb_entry(rb_first(&map
->pg_upmap_items
),
1175 struct ceph_pg_mapping
, node
);
1176 rb_erase(&pg
->node
, &map
->pg_upmap_items
);
1179 while (!RB_EMPTY_ROOT(&map
->pg_pools
)) {
1180 struct ceph_pg_pool_info
*pi
=
1181 rb_entry(rb_first(&map
->pg_pools
),
1182 struct ceph_pg_pool_info
, node
);
1183 __remove_pg_pool(&map
->pg_pools
, pi
);
1185 kvfree(map
->osd_state
);
1186 kvfree(map
->osd_weight
);
1187 kvfree(map
->osd_addr
);
1188 kvfree(map
->osd_primary_affinity
);
1193 * Adjust max_osd value, (re)allocate arrays.
1195 * The new elements are properly initialized.
1197 static int osdmap_set_max_osd(struct ceph_osdmap
*map
, u32 max
)
1201 struct ceph_entity_addr
*addr
;
1205 dout("%s old %u new %u\n", __func__
, map
->max_osd
, max
);
1206 if (max
== map
->max_osd
)
1209 state
= kvmalloc(array_size(max
, sizeof(*state
)), GFP_NOFS
);
1210 weight
= kvmalloc(array_size(max
, sizeof(*weight
)), GFP_NOFS
);
1211 addr
= kvmalloc(array_size(max
, sizeof(*addr
)), GFP_NOFS
);
1212 if (!state
|| !weight
|| !addr
) {
1219 to_copy
= min(map
->max_osd
, max
);
1220 if (map
->osd_state
) {
1221 memcpy(state
, map
->osd_state
, to_copy
* sizeof(*state
));
1222 memcpy(weight
, map
->osd_weight
, to_copy
* sizeof(*weight
));
1223 memcpy(addr
, map
->osd_addr
, to_copy
* sizeof(*addr
));
1224 kvfree(map
->osd_state
);
1225 kvfree(map
->osd_weight
);
1226 kvfree(map
->osd_addr
);
1229 map
->osd_state
= state
;
1230 map
->osd_weight
= weight
;
1231 map
->osd_addr
= addr
;
1232 for (i
= map
->max_osd
; i
< max
; i
++) {
1233 map
->osd_state
[i
] = 0;
1234 map
->osd_weight
[i
] = CEPH_OSD_OUT
;
1235 memset(map
->osd_addr
+ i
, 0, sizeof(*map
->osd_addr
));
1238 if (map
->osd_primary_affinity
) {
1241 affinity
= kvmalloc(array_size(max
, sizeof(*affinity
)),
1246 memcpy(affinity
, map
->osd_primary_affinity
,
1247 to_copy
* sizeof(*affinity
));
1248 kvfree(map
->osd_primary_affinity
);
1250 map
->osd_primary_affinity
= affinity
;
1251 for (i
= map
->max_osd
; i
< max
; i
++)
1252 map
->osd_primary_affinity
[i
] =
1253 CEPH_OSD_DEFAULT_PRIMARY_AFFINITY
;
1261 static int osdmap_set_crush(struct ceph_osdmap
*map
, struct crush_map
*crush
)
1263 struct crush_work
*work
;
1266 return PTR_ERR(crush
);
1268 work
= alloc_workspace(crush
);
1270 crush_destroy(crush
);
1275 crush_destroy(map
->crush
);
1276 cleanup_workspace_manager(&map
->crush_wsm
);
1278 add_initial_workspace(&map
->crush_wsm
, work
);
1282 #define OSDMAP_WRAPPER_COMPAT_VER 7
1283 #define OSDMAP_CLIENT_DATA_COMPAT_VER 1
1286 * Return 0 or error. On success, *v is set to 0 for old (v6) osdmaps,
1287 * to struct_v of the client_data section for new (v7 and above)
1290 static int get_osdmap_client_data_v(void **p
, void *end
,
1291 const char *prefix
, u8
*v
)
1295 ceph_decode_8_safe(p
, end
, struct_v
, e_inval
);
1296 if (struct_v
>= 7) {
1299 ceph_decode_8_safe(p
, end
, struct_compat
, e_inval
);
1300 if (struct_compat
> OSDMAP_WRAPPER_COMPAT_VER
) {
1301 pr_warn("got v %d cv %d > %d of %s ceph_osdmap\n",
1302 struct_v
, struct_compat
,
1303 OSDMAP_WRAPPER_COMPAT_VER
, prefix
);
1306 *p
+= 4; /* ignore wrapper struct_len */
1308 ceph_decode_8_safe(p
, end
, struct_v
, e_inval
);
1309 ceph_decode_8_safe(p
, end
, struct_compat
, e_inval
);
1310 if (struct_compat
> OSDMAP_CLIENT_DATA_COMPAT_VER
) {
1311 pr_warn("got v %d cv %d > %d of %s ceph_osdmap client data\n",
1312 struct_v
, struct_compat
,
1313 OSDMAP_CLIENT_DATA_COMPAT_VER
, prefix
);
1316 *p
+= 4; /* ignore client data struct_len */
1321 ceph_decode_16_safe(p
, end
, version
, e_inval
);
1323 pr_warn("got v %d < 6 of %s ceph_osdmap\n",
1328 /* old osdmap encoding */
1339 static int __decode_pools(void **p
, void *end
, struct ceph_osdmap
*map
,
1344 ceph_decode_32_safe(p
, end
, n
, e_inval
);
1346 struct ceph_pg_pool_info
*pi
;
1350 ceph_decode_64_safe(p
, end
, pool
, e_inval
);
1352 pi
= lookup_pg_pool(&map
->pg_pools
, pool
);
1353 if (!incremental
|| !pi
) {
1354 pi
= kzalloc(sizeof(*pi
), GFP_NOFS
);
1358 RB_CLEAR_NODE(&pi
->node
);
1361 if (!__insert_pg_pool(&map
->pg_pools
, pi
)) {
1367 ret
= decode_pool(p
, end
, pi
);
1378 static int decode_pools(void **p
, void *end
, struct ceph_osdmap
*map
)
1380 return __decode_pools(p
, end
, map
, false);
1383 static int decode_new_pools(void **p
, void *end
, struct ceph_osdmap
*map
)
1385 return __decode_pools(p
, end
, map
, true);
1388 typedef struct ceph_pg_mapping
*(*decode_mapping_fn_t
)(void **, void *, bool);
1390 static int decode_pg_mapping(void **p
, void *end
, struct rb_root
*mapping_root
,
1391 decode_mapping_fn_t fn
, bool incremental
)
1395 WARN_ON(!incremental
&& !fn
);
1397 ceph_decode_32_safe(p
, end
, n
, e_inval
);
1399 struct ceph_pg_mapping
*pg
;
1400 struct ceph_pg pgid
;
1403 ret
= ceph_decode_pgid(p
, end
, &pgid
);
1407 pg
= lookup_pg_mapping(mapping_root
, &pgid
);
1409 WARN_ON(!incremental
);
1410 erase_pg_mapping(mapping_root
, pg
);
1411 free_pg_mapping(pg
);
1415 pg
= fn(p
, end
, incremental
);
1420 pg
->pgid
= pgid
; /* struct */
1421 insert_pg_mapping(mapping_root
, pg
);
1432 static struct ceph_pg_mapping
*__decode_pg_temp(void **p
, void *end
,
1435 struct ceph_pg_mapping
*pg
;
1438 ceph_decode_32_safe(p
, end
, len
, e_inval
);
1439 if (len
== 0 && incremental
)
1440 return NULL
; /* new_pg_temp: [] to remove */
1441 if (len
> (SIZE_MAX
- sizeof(*pg
)) / sizeof(u32
))
1442 return ERR_PTR(-EINVAL
);
1444 ceph_decode_need(p
, end
, len
* sizeof(u32
), e_inval
);
1445 pg
= alloc_pg_mapping(len
* sizeof(u32
));
1447 return ERR_PTR(-ENOMEM
);
1449 pg
->pg_temp
.len
= len
;
1450 for (i
= 0; i
< len
; i
++)
1451 pg
->pg_temp
.osds
[i
] = ceph_decode_32(p
);
1456 return ERR_PTR(-EINVAL
);
1459 static int decode_pg_temp(void **p
, void *end
, struct ceph_osdmap
*map
)
1461 return decode_pg_mapping(p
, end
, &map
->pg_temp
, __decode_pg_temp
,
1465 static int decode_new_pg_temp(void **p
, void *end
, struct ceph_osdmap
*map
)
1467 return decode_pg_mapping(p
, end
, &map
->pg_temp
, __decode_pg_temp
,
1471 static struct ceph_pg_mapping
*__decode_primary_temp(void **p
, void *end
,
1474 struct ceph_pg_mapping
*pg
;
1477 ceph_decode_32_safe(p
, end
, osd
, e_inval
);
1478 if (osd
== (u32
)-1 && incremental
)
1479 return NULL
; /* new_primary_temp: -1 to remove */
1481 pg
= alloc_pg_mapping(0);
1483 return ERR_PTR(-ENOMEM
);
1485 pg
->primary_temp
.osd
= osd
;
1489 return ERR_PTR(-EINVAL
);
1492 static int decode_primary_temp(void **p
, void *end
, struct ceph_osdmap
*map
)
1494 return decode_pg_mapping(p
, end
, &map
->primary_temp
,
1495 __decode_primary_temp
, false);
1498 static int decode_new_primary_temp(void **p
, void *end
,
1499 struct ceph_osdmap
*map
)
1501 return decode_pg_mapping(p
, end
, &map
->primary_temp
,
1502 __decode_primary_temp
, true);
1505 u32
ceph_get_primary_affinity(struct ceph_osdmap
*map
, int osd
)
1507 BUG_ON(osd
>= map
->max_osd
);
1509 if (!map
->osd_primary_affinity
)
1510 return CEPH_OSD_DEFAULT_PRIMARY_AFFINITY
;
1512 return map
->osd_primary_affinity
[osd
];
1515 static int set_primary_affinity(struct ceph_osdmap
*map
, int osd
, u32 aff
)
1517 BUG_ON(osd
>= map
->max_osd
);
1519 if (!map
->osd_primary_affinity
) {
1522 map
->osd_primary_affinity
= kvmalloc(
1523 array_size(map
->max_osd
, sizeof(*map
->osd_primary_affinity
)),
1525 if (!map
->osd_primary_affinity
)
1528 for (i
= 0; i
< map
->max_osd
; i
++)
1529 map
->osd_primary_affinity
[i
] =
1530 CEPH_OSD_DEFAULT_PRIMARY_AFFINITY
;
1533 map
->osd_primary_affinity
[osd
] = aff
;
1538 static int decode_primary_affinity(void **p
, void *end
,
1539 struct ceph_osdmap
*map
)
1543 ceph_decode_32_safe(p
, end
, len
, e_inval
);
1545 kvfree(map
->osd_primary_affinity
);
1546 map
->osd_primary_affinity
= NULL
;
1549 if (len
!= map
->max_osd
)
1552 ceph_decode_need(p
, end
, map
->max_osd
*sizeof(u32
), e_inval
);
1554 for (i
= 0; i
< map
->max_osd
; i
++) {
1557 ret
= set_primary_affinity(map
, i
, ceph_decode_32(p
));
1568 static int decode_new_primary_affinity(void **p
, void *end
,
1569 struct ceph_osdmap
*map
)
1573 ceph_decode_32_safe(p
, end
, n
, e_inval
);
1578 ceph_decode_32_safe(p
, end
, osd
, e_inval
);
1579 ceph_decode_32_safe(p
, end
, aff
, e_inval
);
1581 ret
= set_primary_affinity(map
, osd
, aff
);
1585 osdmap_info(map
, "osd%d primary-affinity 0x%x\n", osd
, aff
);
1594 static struct ceph_pg_mapping
*__decode_pg_upmap(void **p
, void *end
,
1597 return __decode_pg_temp(p
, end
, false);
1600 static int decode_pg_upmap(void **p
, void *end
, struct ceph_osdmap
*map
)
1602 return decode_pg_mapping(p
, end
, &map
->pg_upmap
, __decode_pg_upmap
,
1606 static int decode_new_pg_upmap(void **p
, void *end
, struct ceph_osdmap
*map
)
1608 return decode_pg_mapping(p
, end
, &map
->pg_upmap
, __decode_pg_upmap
,
1612 static int decode_old_pg_upmap(void **p
, void *end
, struct ceph_osdmap
*map
)
1614 return decode_pg_mapping(p
, end
, &map
->pg_upmap
, NULL
, true);
1617 static struct ceph_pg_mapping
*__decode_pg_upmap_items(void **p
, void *end
,
1620 struct ceph_pg_mapping
*pg
;
1623 ceph_decode_32_safe(p
, end
, len
, e_inval
);
1624 if (len
> (SIZE_MAX
- sizeof(*pg
)) / (2 * sizeof(u32
)))
1625 return ERR_PTR(-EINVAL
);
1627 ceph_decode_need(p
, end
, 2 * len
* sizeof(u32
), e_inval
);
1628 pg
= alloc_pg_mapping(2 * len
* sizeof(u32
));
1630 return ERR_PTR(-ENOMEM
);
1632 pg
->pg_upmap_items
.len
= len
;
1633 for (i
= 0; i
< len
; i
++) {
1634 pg
->pg_upmap_items
.from_to
[i
][0] = ceph_decode_32(p
);
1635 pg
->pg_upmap_items
.from_to
[i
][1] = ceph_decode_32(p
);
1641 return ERR_PTR(-EINVAL
);
1644 static int decode_pg_upmap_items(void **p
, void *end
, struct ceph_osdmap
*map
)
1646 return decode_pg_mapping(p
, end
, &map
->pg_upmap_items
,
1647 __decode_pg_upmap_items
, false);
1650 static int decode_new_pg_upmap_items(void **p
, void *end
,
1651 struct ceph_osdmap
*map
)
1653 return decode_pg_mapping(p
, end
, &map
->pg_upmap_items
,
1654 __decode_pg_upmap_items
, true);
1657 static int decode_old_pg_upmap_items(void **p
, void *end
,
1658 struct ceph_osdmap
*map
)
1660 return decode_pg_mapping(p
, end
, &map
->pg_upmap_items
, NULL
, true);
1664 * decode a full map.
1666 static int osdmap_decode(void **p
, void *end
, bool msgr2
,
1667 struct ceph_osdmap
*map
)
1676 dout("%s %p to %p len %d\n", __func__
, *p
, end
, (int)(end
- *p
));
1678 err
= get_osdmap_client_data_v(p
, end
, "full", &struct_v
);
1682 /* fsid, epoch, created, modified */
1683 ceph_decode_need(p
, end
, sizeof(map
->fsid
) + sizeof(u32
) +
1684 sizeof(map
->created
) + sizeof(map
->modified
), e_inval
);
1685 ceph_decode_copy(p
, &map
->fsid
, sizeof(map
->fsid
));
1686 epoch
= map
->epoch
= ceph_decode_32(p
);
1687 ceph_decode_copy(p
, &map
->created
, sizeof(map
->created
));
1688 ceph_decode_copy(p
, &map
->modified
, sizeof(map
->modified
));
1691 err
= decode_pools(p
, end
, map
);
1696 err
= decode_pool_names(p
, end
, map
);
1700 ceph_decode_32_safe(p
, end
, map
->pool_max
, e_inval
);
1702 ceph_decode_32_safe(p
, end
, map
->flags
, e_inval
);
1705 ceph_decode_32_safe(p
, end
, max
, e_inval
);
1707 /* (re)alloc osd arrays */
1708 err
= osdmap_set_max_osd(map
, max
);
1712 /* osd_state, osd_weight, osd_addrs->client_addr */
1713 ceph_decode_need(p
, end
, 3*sizeof(u32
) +
1714 map
->max_osd
*(struct_v
>= 5 ? sizeof(u32
) :
1716 sizeof(*map
->osd_weight
), e_inval
);
1717 if (ceph_decode_32(p
) != map
->max_osd
)
1720 if (struct_v
>= 5) {
1721 for (i
= 0; i
< map
->max_osd
; i
++)
1722 map
->osd_state
[i
] = ceph_decode_32(p
);
1724 for (i
= 0; i
< map
->max_osd
; i
++)
1725 map
->osd_state
[i
] = ceph_decode_8(p
);
1728 if (ceph_decode_32(p
) != map
->max_osd
)
1731 for (i
= 0; i
< map
->max_osd
; i
++)
1732 map
->osd_weight
[i
] = ceph_decode_32(p
);
1734 if (ceph_decode_32(p
) != map
->max_osd
)
1737 for (i
= 0; i
< map
->max_osd
; i
++) {
1738 struct ceph_entity_addr
*addr
= &map
->osd_addr
[i
];
1741 err
= ceph_decode_entity_addrvec(p
, end
, msgr2
, addr
);
1743 err
= ceph_decode_entity_addr(p
, end
, addr
);
1747 dout("%s osd%d addr %s\n", __func__
, i
, ceph_pr_addr(addr
));
1751 err
= decode_pg_temp(p
, end
, map
);
1756 if (struct_v
>= 1) {
1757 err
= decode_primary_temp(p
, end
, map
);
1762 /* primary_affinity */
1763 if (struct_v
>= 2) {
1764 err
= decode_primary_affinity(p
, end
, map
);
1768 WARN_ON(map
->osd_primary_affinity
);
1772 ceph_decode_32_safe(p
, end
, len
, e_inval
);
1773 err
= osdmap_set_crush(map
, crush_decode(*p
, min(*p
+ len
, end
)));
1778 if (struct_v
>= 3) {
1779 /* erasure_code_profiles */
1780 ceph_decode_skip_map_of_map(p
, end
, string
, string
, string
,
1784 if (struct_v
>= 4) {
1785 err
= decode_pg_upmap(p
, end
, map
);
1789 err
= decode_pg_upmap_items(p
, end
, map
);
1793 WARN_ON(!RB_EMPTY_ROOT(&map
->pg_upmap
));
1794 WARN_ON(!RB_EMPTY_ROOT(&map
->pg_upmap_items
));
1797 /* ignore the rest */
1800 dout("full osdmap epoch %d max_osd %d\n", map
->epoch
, map
->max_osd
);
1806 pr_err("corrupt full osdmap (%d) epoch %d off %d (%p of %p-%p)\n",
1807 err
, epoch
, (int)(*p
- start
), *p
, start
, end
);
1808 print_hex_dump(KERN_DEBUG
, "osdmap: ",
1809 DUMP_PREFIX_OFFSET
, 16, 1,
1810 start
, end
- start
, true);
1815 * Allocate and decode a full map.
1817 struct ceph_osdmap
*ceph_osdmap_decode(void **p
, void *end
, bool msgr2
)
1819 struct ceph_osdmap
*map
;
1822 map
= ceph_osdmap_alloc();
1824 return ERR_PTR(-ENOMEM
);
1826 ret
= osdmap_decode(p
, end
, msgr2
, map
);
1828 ceph_osdmap_destroy(map
);
1829 return ERR_PTR(ret
);
1836 * Encoding order is (new_up_client, new_state, new_weight). Need to
1837 * apply in the (new_weight, new_state, new_up_client) order, because
1838 * an incremental map may look like e.g.
1840 * new_up_client: { osd=6, addr=... } # set osd_state and addr
1841 * new_state: { osd=6, xorstate=EXISTS } # clear osd_state
1843 static int decode_new_up_state_weight(void **p
, void *end
, u8 struct_v
,
1844 bool msgr2
, struct ceph_osdmap
*map
)
1846 void *new_up_client
;
1848 void *new_weight_end
;
1854 ceph_decode_32_safe(p
, end
, len
, e_inval
);
1855 for (i
= 0; i
< len
; ++i
) {
1856 struct ceph_entity_addr addr
;
1858 ceph_decode_skip_32(p
, end
, e_inval
);
1860 ret
= ceph_decode_entity_addrvec(p
, end
, msgr2
, &addr
);
1862 ret
= ceph_decode_entity_addr(p
, end
, &addr
);
1868 ceph_decode_32_safe(p
, end
, len
, e_inval
);
1869 len
*= sizeof(u32
) + (struct_v
>= 5 ? sizeof(u32
) : sizeof(u8
));
1870 ceph_decode_need(p
, end
, len
, e_inval
);
1874 ceph_decode_32_safe(p
, end
, len
, e_inval
);
1879 ceph_decode_need(p
, end
, 2*sizeof(u32
), e_inval
);
1880 osd
= ceph_decode_32(p
);
1881 w
= ceph_decode_32(p
);
1882 BUG_ON(osd
>= map
->max_osd
);
1883 osdmap_info(map
, "osd%d weight 0x%x %s\n", osd
, w
,
1884 w
== CEPH_OSD_IN
? "(in)" :
1885 (w
== CEPH_OSD_OUT
? "(out)" : ""));
1886 map
->osd_weight
[osd
] = w
;
1889 * If we are marking in, set the EXISTS, and clear the
1890 * AUTOOUT and NEW bits.
1893 map
->osd_state
[osd
] |= CEPH_OSD_EXISTS
;
1894 map
->osd_state
[osd
] &= ~(CEPH_OSD_AUTOOUT
|
1898 new_weight_end
= *p
;
1900 /* new_state (up/down) */
1902 len
= ceph_decode_32(p
);
1907 osd
= ceph_decode_32(p
);
1909 xorstate
= ceph_decode_32(p
);
1911 xorstate
= ceph_decode_8(p
);
1913 xorstate
= CEPH_OSD_UP
;
1914 BUG_ON(osd
>= map
->max_osd
);
1915 if ((map
->osd_state
[osd
] & CEPH_OSD_UP
) &&
1916 (xorstate
& CEPH_OSD_UP
))
1917 osdmap_info(map
, "osd%d down\n", osd
);
1918 if ((map
->osd_state
[osd
] & CEPH_OSD_EXISTS
) &&
1919 (xorstate
& CEPH_OSD_EXISTS
)) {
1920 osdmap_info(map
, "osd%d does not exist\n", osd
);
1921 ret
= set_primary_affinity(map
, osd
,
1922 CEPH_OSD_DEFAULT_PRIMARY_AFFINITY
);
1925 memset(map
->osd_addr
+ osd
, 0, sizeof(*map
->osd_addr
));
1926 map
->osd_state
[osd
] = 0;
1928 map
->osd_state
[osd
] ^= xorstate
;
1934 len
= ceph_decode_32(p
);
1937 struct ceph_entity_addr addr
;
1939 osd
= ceph_decode_32(p
);
1940 BUG_ON(osd
>= map
->max_osd
);
1942 ret
= ceph_decode_entity_addrvec(p
, end
, msgr2
, &addr
);
1944 ret
= ceph_decode_entity_addr(p
, end
, &addr
);
1948 dout("%s osd%d addr %s\n", __func__
, osd
, ceph_pr_addr(&addr
));
1950 osdmap_info(map
, "osd%d up\n", osd
);
1951 map
->osd_state
[osd
] |= CEPH_OSD_EXISTS
| CEPH_OSD_UP
;
1952 map
->osd_addr
[osd
] = addr
;
1955 *p
= new_weight_end
;
1963 * decode and apply an incremental map update.
1965 struct ceph_osdmap
*osdmap_apply_incremental(void **p
, void *end
, bool msgr2
,
1966 struct ceph_osdmap
*map
)
1968 struct ceph_fsid fsid
;
1970 struct ceph_timespec modified
;
1974 __s32 new_flags
, max
;
1979 dout("%s %p to %p len %d\n", __func__
, *p
, end
, (int)(end
- *p
));
1981 err
= get_osdmap_client_data_v(p
, end
, "inc", &struct_v
);
1985 /* fsid, epoch, modified, new_pool_max, new_flags */
1986 ceph_decode_need(p
, end
, sizeof(fsid
) + sizeof(u32
) + sizeof(modified
) +
1987 sizeof(u64
) + sizeof(u32
), e_inval
);
1988 ceph_decode_copy(p
, &fsid
, sizeof(fsid
));
1989 epoch
= ceph_decode_32(p
);
1990 BUG_ON(epoch
!= map
->epoch
+1);
1991 ceph_decode_copy(p
, &modified
, sizeof(modified
));
1992 new_pool_max
= ceph_decode_64(p
);
1993 new_flags
= ceph_decode_32(p
);
1996 ceph_decode_32_safe(p
, end
, len
, e_inval
);
1998 dout("apply_incremental full map len %d, %p to %p\n",
2000 return ceph_osdmap_decode(p
, min(*p
+len
, end
), msgr2
);
2004 ceph_decode_32_safe(p
, end
, len
, e_inval
);
2006 err
= osdmap_set_crush(map
,
2007 crush_decode(*p
, min(*p
+ len
, end
)));
2015 map
->flags
= new_flags
;
2016 if (new_pool_max
>= 0)
2017 map
->pool_max
= new_pool_max
;
2020 ceph_decode_32_safe(p
, end
, max
, e_inval
);
2022 err
= osdmap_set_max_osd(map
, max
);
2028 map
->modified
= modified
;
2031 err
= decode_new_pools(p
, end
, map
);
2035 /* new_pool_names */
2036 err
= decode_pool_names(p
, end
, map
);
2041 ceph_decode_32_safe(p
, end
, len
, e_inval
);
2043 struct ceph_pg_pool_info
*pi
;
2045 ceph_decode_64_safe(p
, end
, pool
, e_inval
);
2046 pi
= lookup_pg_pool(&map
->pg_pools
, pool
);
2048 __remove_pg_pool(&map
->pg_pools
, pi
);
2051 /* new_up_client, new_state, new_weight */
2052 err
= decode_new_up_state_weight(p
, end
, struct_v
, msgr2
, map
);
2057 err
= decode_new_pg_temp(p
, end
, map
);
2061 /* new_primary_temp */
2062 if (struct_v
>= 1) {
2063 err
= decode_new_primary_temp(p
, end
, map
);
2068 /* new_primary_affinity */
2069 if (struct_v
>= 2) {
2070 err
= decode_new_primary_affinity(p
, end
, map
);
2075 if (struct_v
>= 3) {
2076 /* new_erasure_code_profiles */
2077 ceph_decode_skip_map_of_map(p
, end
, string
, string
, string
,
2079 /* old_erasure_code_profiles */
2080 ceph_decode_skip_set(p
, end
, string
, e_inval
);
2083 if (struct_v
>= 4) {
2084 err
= decode_new_pg_upmap(p
, end
, map
);
2088 err
= decode_old_pg_upmap(p
, end
, map
);
2092 err
= decode_new_pg_upmap_items(p
, end
, map
);
2096 err
= decode_old_pg_upmap_items(p
, end
, map
);
2101 /* ignore the rest */
2104 dout("inc osdmap epoch %d max_osd %d\n", map
->epoch
, map
->max_osd
);
2110 pr_err("corrupt inc osdmap (%d) epoch %d off %d (%p of %p-%p)\n",
2111 err
, epoch
, (int)(*p
- start
), *p
, start
, end
);
2112 print_hex_dump(KERN_DEBUG
, "osdmap: ",
2113 DUMP_PREFIX_OFFSET
, 16, 1,
2114 start
, end
- start
, true);
2115 return ERR_PTR(err
);
2118 void ceph_oloc_copy(struct ceph_object_locator
*dest
,
2119 const struct ceph_object_locator
*src
)
2121 ceph_oloc_destroy(dest
);
2123 dest
->pool
= src
->pool
;
2125 dest
->pool_ns
= ceph_get_string(src
->pool_ns
);
2127 dest
->pool_ns
= NULL
;
2129 EXPORT_SYMBOL(ceph_oloc_copy
);
2131 void ceph_oloc_destroy(struct ceph_object_locator
*oloc
)
2133 ceph_put_string(oloc
->pool_ns
);
2135 EXPORT_SYMBOL(ceph_oloc_destroy
);
2137 void ceph_oid_copy(struct ceph_object_id
*dest
,
2138 const struct ceph_object_id
*src
)
2140 ceph_oid_destroy(dest
);
2142 if (src
->name
!= src
->inline_name
) {
2143 /* very rare, see ceph_object_id definition */
2144 dest
->name
= kmalloc(src
->name_len
+ 1,
2145 GFP_NOIO
| __GFP_NOFAIL
);
2147 dest
->name
= dest
->inline_name
;
2149 memcpy(dest
->name
, src
->name
, src
->name_len
+ 1);
2150 dest
->name_len
= src
->name_len
;
2152 EXPORT_SYMBOL(ceph_oid_copy
);
2154 static __printf(2, 0)
2155 int oid_printf_vargs(struct ceph_object_id
*oid
, const char *fmt
, va_list ap
)
2159 WARN_ON(!ceph_oid_empty(oid
));
2161 len
= vsnprintf(oid
->inline_name
, sizeof(oid
->inline_name
), fmt
, ap
);
2162 if (len
>= sizeof(oid
->inline_name
))
2165 oid
->name_len
= len
;
2170 * If oid doesn't fit into inline buffer, BUG.
2172 void ceph_oid_printf(struct ceph_object_id
*oid
, const char *fmt
, ...)
2177 BUG_ON(oid_printf_vargs(oid
, fmt
, ap
));
2180 EXPORT_SYMBOL(ceph_oid_printf
);
2182 static __printf(3, 0)
2183 int oid_aprintf_vargs(struct ceph_object_id
*oid
, gfp_t gfp
,
2184 const char *fmt
, va_list ap
)
2190 len
= oid_printf_vargs(oid
, fmt
, aq
);
2194 char *external_name
;
2196 external_name
= kmalloc(len
+ 1, gfp
);
2200 oid
->name
= external_name
;
2201 WARN_ON(vsnprintf(oid
->name
, len
+ 1, fmt
, ap
) != len
);
2202 oid
->name_len
= len
;
2209 * If oid doesn't fit into inline buffer, allocate.
2211 int ceph_oid_aprintf(struct ceph_object_id
*oid
, gfp_t gfp
,
2212 const char *fmt
, ...)
2218 ret
= oid_aprintf_vargs(oid
, gfp
, fmt
, ap
);
2223 EXPORT_SYMBOL(ceph_oid_aprintf
);
2225 void ceph_oid_destroy(struct ceph_object_id
*oid
)
2227 if (oid
->name
!= oid
->inline_name
)
2230 EXPORT_SYMBOL(ceph_oid_destroy
);
2235 static bool __osds_equal(const struct ceph_osds
*lhs
,
2236 const struct ceph_osds
*rhs
)
2238 if (lhs
->size
== rhs
->size
&&
2239 !memcmp(lhs
->osds
, rhs
->osds
, rhs
->size
* sizeof(rhs
->osds
[0])))
2248 static bool osds_equal(const struct ceph_osds
*lhs
,
2249 const struct ceph_osds
*rhs
)
2251 if (__osds_equal(lhs
, rhs
) &&
2252 lhs
->primary
== rhs
->primary
)
2258 static bool osds_valid(const struct ceph_osds
*set
)
2261 if (set
->size
> 0 && set
->primary
>= 0)
2264 /* empty can_shift_osds set */
2265 if (!set
->size
&& set
->primary
== -1)
2268 /* empty !can_shift_osds set - all NONE */
2269 if (set
->size
> 0 && set
->primary
== -1) {
2272 for (i
= 0; i
< set
->size
; i
++) {
2273 if (set
->osds
[i
] != CRUSH_ITEM_NONE
)
2283 void ceph_osds_copy(struct ceph_osds
*dest
, const struct ceph_osds
*src
)
2285 memcpy(dest
->osds
, src
->osds
, src
->size
* sizeof(src
->osds
[0]));
2286 dest
->size
= src
->size
;
2287 dest
->primary
= src
->primary
;
2290 bool ceph_pg_is_split(const struct ceph_pg
*pgid
, u32 old_pg_num
,
2293 int old_bits
= calc_bits_of(old_pg_num
);
2294 int old_mask
= (1 << old_bits
) - 1;
2297 WARN_ON(pgid
->seed
>= old_pg_num
);
2298 if (new_pg_num
<= old_pg_num
)
2301 for (n
= 1; ; n
++) {
2302 int next_bit
= n
<< (old_bits
- 1);
2303 u32 s
= next_bit
| pgid
->seed
;
2305 if (s
< old_pg_num
|| s
== pgid
->seed
)
2307 if (s
>= new_pg_num
)
2310 s
= ceph_stable_mod(s
, old_pg_num
, old_mask
);
2311 if (s
== pgid
->seed
)
2318 bool ceph_is_new_interval(const struct ceph_osds
*old_acting
,
2319 const struct ceph_osds
*new_acting
,
2320 const struct ceph_osds
*old_up
,
2321 const struct ceph_osds
*new_up
,
2328 bool old_sort_bitwise
,
2329 bool new_sort_bitwise
,
2330 bool old_recovery_deletes
,
2331 bool new_recovery_deletes
,
2332 const struct ceph_pg
*pgid
)
2334 return !osds_equal(old_acting
, new_acting
) ||
2335 !osds_equal(old_up
, new_up
) ||
2336 old_size
!= new_size
||
2337 old_min_size
!= new_min_size
||
2338 ceph_pg_is_split(pgid
, old_pg_num
, new_pg_num
) ||
2339 old_sort_bitwise
!= new_sort_bitwise
||
2340 old_recovery_deletes
!= new_recovery_deletes
;
2343 static int calc_pg_rank(int osd
, const struct ceph_osds
*acting
)
2347 for (i
= 0; i
< acting
->size
; i
++) {
2348 if (acting
->osds
[i
] == osd
)
2355 static bool primary_changed(const struct ceph_osds
*old_acting
,
2356 const struct ceph_osds
*new_acting
)
2358 if (!old_acting
->size
&& !new_acting
->size
)
2359 return false; /* both still empty */
2361 if (!old_acting
->size
^ !new_acting
->size
)
2362 return true; /* was empty, now not, or vice versa */
2364 if (old_acting
->primary
!= new_acting
->primary
)
2365 return true; /* primary changed */
2367 if (calc_pg_rank(old_acting
->primary
, old_acting
) !=
2368 calc_pg_rank(new_acting
->primary
, new_acting
))
2371 return false; /* same primary (tho replicas may have changed) */
2374 bool ceph_osds_changed(const struct ceph_osds
*old_acting
,
2375 const struct ceph_osds
*new_acting
,
2378 if (primary_changed(old_acting
, new_acting
))
2381 if (any_change
&& !__osds_equal(old_acting
, new_acting
))
2388 * Map an object into a PG.
2390 * Should only be called with target_oid and target_oloc (as opposed to
2391 * base_oid and base_oloc), since tiering isn't taken into account.
2393 void __ceph_object_locator_to_pg(struct ceph_pg_pool_info
*pi
,
2394 const struct ceph_object_id
*oid
,
2395 const struct ceph_object_locator
*oloc
,
2396 struct ceph_pg
*raw_pgid
)
2398 WARN_ON(pi
->id
!= oloc
->pool
);
2400 if (!oloc
->pool_ns
) {
2401 raw_pgid
->pool
= oloc
->pool
;
2402 raw_pgid
->seed
= ceph_str_hash(pi
->object_hash
, oid
->name
,
2404 dout("%s %s -> raw_pgid %llu.%x\n", __func__
, oid
->name
,
2405 raw_pgid
->pool
, raw_pgid
->seed
);
2407 char stack_buf
[256];
2408 char *buf
= stack_buf
;
2409 int nsl
= oloc
->pool_ns
->len
;
2410 size_t total
= nsl
+ 1 + oid
->name_len
;
2412 if (total
> sizeof(stack_buf
))
2413 buf
= kmalloc(total
, GFP_NOIO
| __GFP_NOFAIL
);
2414 memcpy(buf
, oloc
->pool_ns
->str
, nsl
);
2416 memcpy(buf
+ nsl
+ 1, oid
->name
, oid
->name_len
);
2417 raw_pgid
->pool
= oloc
->pool
;
2418 raw_pgid
->seed
= ceph_str_hash(pi
->object_hash
, buf
, total
);
2419 if (buf
!= stack_buf
)
2421 dout("%s %s ns %.*s -> raw_pgid %llu.%x\n", __func__
,
2422 oid
->name
, nsl
, oloc
->pool_ns
->str
,
2423 raw_pgid
->pool
, raw_pgid
->seed
);
2427 int ceph_object_locator_to_pg(struct ceph_osdmap
*osdmap
,
2428 const struct ceph_object_id
*oid
,
2429 const struct ceph_object_locator
*oloc
,
2430 struct ceph_pg
*raw_pgid
)
2432 struct ceph_pg_pool_info
*pi
;
2434 pi
= ceph_pg_pool_by_id(osdmap
, oloc
->pool
);
2438 __ceph_object_locator_to_pg(pi
, oid
, oloc
, raw_pgid
);
2441 EXPORT_SYMBOL(ceph_object_locator_to_pg
);
2444 * Map a raw PG (full precision ps) into an actual PG.
2446 static void raw_pg_to_pg(struct ceph_pg_pool_info
*pi
,
2447 const struct ceph_pg
*raw_pgid
,
2448 struct ceph_pg
*pgid
)
2450 pgid
->pool
= raw_pgid
->pool
;
2451 pgid
->seed
= ceph_stable_mod(raw_pgid
->seed
, pi
->pg_num
,
2456 * Map a raw PG (full precision ps) into a placement ps (placement
2457 * seed). Include pool id in that value so that different pools don't
2458 * use the same seeds.
2460 static u32
raw_pg_to_pps(struct ceph_pg_pool_info
*pi
,
2461 const struct ceph_pg
*raw_pgid
)
2463 if (pi
->flags
& CEPH_POOL_FLAG_HASHPSPOOL
) {
2464 /* hash pool id and seed so that pool PGs do not overlap */
2465 return crush_hash32_2(CRUSH_HASH_RJENKINS1
,
2466 ceph_stable_mod(raw_pgid
->seed
,
2472 * legacy behavior: add ps and pool together. this is
2473 * not a great approach because the PGs from each pool
2474 * will overlap on top of each other: 0.5 == 1.4 ==
2477 return ceph_stable_mod(raw_pgid
->seed
, pi
->pgp_num
,
2479 (unsigned)raw_pgid
->pool
;
2484 * Magic value used for a "default" fallback choose_args, used if the
2485 * crush_choose_arg_map passed to do_crush() does not exist. If this
2486 * also doesn't exist, fall back to canonical weights.
2488 #define CEPH_DEFAULT_CHOOSE_ARGS -1
2490 static int do_crush(struct ceph_osdmap
*map
, int ruleno
, int x
,
2491 int *result
, int result_max
,
2492 const __u32
*weight
, int weight_max
,
2493 s64 choose_args_index
)
2495 struct crush_choose_arg_map
*arg_map
;
2496 struct crush_work
*work
;
2499 BUG_ON(result_max
> CEPH_PG_MAX_SIZE
);
2501 arg_map
= lookup_choose_arg_map(&map
->crush
->choose_args
,
2504 arg_map
= lookup_choose_arg_map(&map
->crush
->choose_args
,
2505 CEPH_DEFAULT_CHOOSE_ARGS
);
2507 work
= get_workspace(&map
->crush_wsm
, map
->crush
);
2508 r
= crush_do_rule(map
->crush
, ruleno
, x
, result
, result_max
,
2509 weight
, weight_max
, work
,
2510 arg_map
? arg_map
->args
: NULL
);
2511 put_workspace(&map
->crush_wsm
, work
);
2515 static void remove_nonexistent_osds(struct ceph_osdmap
*osdmap
,
2516 struct ceph_pg_pool_info
*pi
,
2517 struct ceph_osds
*set
)
2521 if (ceph_can_shift_osds(pi
)) {
2525 for (i
= 0; i
< set
->size
; i
++) {
2526 if (!ceph_osd_exists(osdmap
, set
->osds
[i
])) {
2531 set
->osds
[i
- removed
] = set
->osds
[i
];
2533 set
->size
-= removed
;
2535 /* set dne devices to NONE */
2536 for (i
= 0; i
< set
->size
; i
++) {
2537 if (!ceph_osd_exists(osdmap
, set
->osds
[i
]))
2538 set
->osds
[i
] = CRUSH_ITEM_NONE
;
2544 * Calculate raw set (CRUSH output) for given PG and filter out
2545 * nonexistent OSDs. ->primary is undefined for a raw set.
2547 * Placement seed (CRUSH input) is returned through @ppps.
2549 static void pg_to_raw_osds(struct ceph_osdmap
*osdmap
,
2550 struct ceph_pg_pool_info
*pi
,
2551 const struct ceph_pg
*raw_pgid
,
2552 struct ceph_osds
*raw
,
2555 u32 pps
= raw_pg_to_pps(pi
, raw_pgid
);
2559 ceph_osds_init(raw
);
2563 ruleno
= crush_find_rule(osdmap
->crush
, pi
->crush_ruleset
, pi
->type
,
2566 pr_err("no crush rule: pool %lld ruleset %d type %d size %d\n",
2567 pi
->id
, pi
->crush_ruleset
, pi
->type
, pi
->size
);
2571 if (pi
->size
> ARRAY_SIZE(raw
->osds
)) {
2572 pr_err_ratelimited("pool %lld ruleset %d type %d too wide: size %d > %zu\n",
2573 pi
->id
, pi
->crush_ruleset
, pi
->type
, pi
->size
,
2574 ARRAY_SIZE(raw
->osds
));
2578 len
= do_crush(osdmap
, ruleno
, pps
, raw
->osds
, pi
->size
,
2579 osdmap
->osd_weight
, osdmap
->max_osd
, pi
->id
);
2581 pr_err("error %d from crush rule %d: pool %lld ruleset %d type %d size %d\n",
2582 len
, ruleno
, pi
->id
, pi
->crush_ruleset
, pi
->type
,
2588 remove_nonexistent_osds(osdmap
, pi
, raw
);
2591 /* apply pg_upmap[_items] mappings */
2592 static void apply_upmap(struct ceph_osdmap
*osdmap
,
2593 const struct ceph_pg
*pgid
,
2594 struct ceph_osds
*raw
)
2596 struct ceph_pg_mapping
*pg
;
2599 pg
= lookup_pg_mapping(&osdmap
->pg_upmap
, pgid
);
2601 /* make sure targets aren't marked out */
2602 for (i
= 0; i
< pg
->pg_upmap
.len
; i
++) {
2603 int osd
= pg
->pg_upmap
.osds
[i
];
2605 if (osd
!= CRUSH_ITEM_NONE
&&
2606 osd
< osdmap
->max_osd
&&
2607 osdmap
->osd_weight
[osd
] == 0) {
2608 /* reject/ignore explicit mapping */
2612 for (i
= 0; i
< pg
->pg_upmap
.len
; i
++)
2613 raw
->osds
[i
] = pg
->pg_upmap
.osds
[i
];
2614 raw
->size
= pg
->pg_upmap
.len
;
2615 /* check and apply pg_upmap_items, if any */
2618 pg
= lookup_pg_mapping(&osdmap
->pg_upmap_items
, pgid
);
2621 * Note: this approach does not allow a bidirectional swap,
2622 * e.g., [[1,2],[2,1]] applied to [0,1,2] -> [0,2,1].
2624 for (i
= 0; i
< pg
->pg_upmap_items
.len
; i
++) {
2625 int from
= pg
->pg_upmap_items
.from_to
[i
][0];
2626 int to
= pg
->pg_upmap_items
.from_to
[i
][1];
2628 bool exists
= false;
2630 /* make sure replacement doesn't already appear */
2631 for (j
= 0; j
< raw
->size
; j
++) {
2632 int osd
= raw
->osds
[j
];
2638 /* ignore mapping if target is marked out */
2639 if (osd
== from
&& pos
< 0 &&
2640 !(to
!= CRUSH_ITEM_NONE
&&
2641 to
< osdmap
->max_osd
&&
2642 osdmap
->osd_weight
[to
] == 0)) {
2646 if (!exists
&& pos
>= 0)
2647 raw
->osds
[pos
] = to
;
2653 * Given raw set, calculate up set and up primary. By definition of an
2654 * up set, the result won't contain nonexistent or down OSDs.
2656 * This is done in-place - on return @set is the up set. If it's
2657 * empty, ->primary will remain undefined.
2659 static void raw_to_up_osds(struct ceph_osdmap
*osdmap
,
2660 struct ceph_pg_pool_info
*pi
,
2661 struct ceph_osds
*set
)
2665 /* ->primary is undefined for a raw set */
2666 BUG_ON(set
->primary
!= -1);
2668 if (ceph_can_shift_osds(pi
)) {
2672 for (i
= 0; i
< set
->size
; i
++) {
2673 if (ceph_osd_is_down(osdmap
, set
->osds
[i
])) {
2678 set
->osds
[i
- removed
] = set
->osds
[i
];
2680 set
->size
-= removed
;
2682 set
->primary
= set
->osds
[0];
2684 /* set down/dne devices to NONE */
2685 for (i
= set
->size
- 1; i
>= 0; i
--) {
2686 if (ceph_osd_is_down(osdmap
, set
->osds
[i
]))
2687 set
->osds
[i
] = CRUSH_ITEM_NONE
;
2689 set
->primary
= set
->osds
[i
];
2694 static void apply_primary_affinity(struct ceph_osdmap
*osdmap
,
2695 struct ceph_pg_pool_info
*pi
,
2697 struct ceph_osds
*up
)
2703 * Do we have any non-default primary_affinity values for these
2706 if (!osdmap
->osd_primary_affinity
)
2709 for (i
= 0; i
< up
->size
; i
++) {
2710 int osd
= up
->osds
[i
];
2712 if (osd
!= CRUSH_ITEM_NONE
&&
2713 osdmap
->osd_primary_affinity
[osd
] !=
2714 CEPH_OSD_DEFAULT_PRIMARY_AFFINITY
) {
2722 * Pick the primary. Feed both the seed (for the pg) and the
2723 * osd into the hash/rng so that a proportional fraction of an
2724 * osd's pgs get rejected as primary.
2726 for (i
= 0; i
< up
->size
; i
++) {
2727 int osd
= up
->osds
[i
];
2730 if (osd
== CRUSH_ITEM_NONE
)
2733 aff
= osdmap
->osd_primary_affinity
[osd
];
2734 if (aff
< CEPH_OSD_MAX_PRIMARY_AFFINITY
&&
2735 (crush_hash32_2(CRUSH_HASH_RJENKINS1
,
2736 pps
, osd
) >> 16) >= aff
) {
2738 * We chose not to use this primary. Note it
2739 * anyway as a fallback in case we don't pick
2740 * anyone else, but keep looking.
2752 up
->primary
= up
->osds
[pos
];
2754 if (ceph_can_shift_osds(pi
) && pos
> 0) {
2755 /* move the new primary to the front */
2756 for (i
= pos
; i
> 0; i
--)
2757 up
->osds
[i
] = up
->osds
[i
- 1];
2758 up
->osds
[0] = up
->primary
;
2763 * Get pg_temp and primary_temp mappings for given PG.
2765 * Note that a PG may have none, only pg_temp, only primary_temp or
2766 * both pg_temp and primary_temp mappings. This means @temp isn't
2767 * always a valid OSD set on return: in the "only primary_temp" case,
2768 * @temp will have its ->primary >= 0 but ->size == 0.
2770 static void get_temp_osds(struct ceph_osdmap
*osdmap
,
2771 struct ceph_pg_pool_info
*pi
,
2772 const struct ceph_pg
*pgid
,
2773 struct ceph_osds
*temp
)
2775 struct ceph_pg_mapping
*pg
;
2778 ceph_osds_init(temp
);
2781 pg
= lookup_pg_mapping(&osdmap
->pg_temp
, pgid
);
2783 for (i
= 0; i
< pg
->pg_temp
.len
; i
++) {
2784 if (ceph_osd_is_down(osdmap
, pg
->pg_temp
.osds
[i
])) {
2785 if (ceph_can_shift_osds(pi
))
2788 temp
->osds
[temp
->size
++] = CRUSH_ITEM_NONE
;
2790 temp
->osds
[temp
->size
++] = pg
->pg_temp
.osds
[i
];
2794 /* apply pg_temp's primary */
2795 for (i
= 0; i
< temp
->size
; i
++) {
2796 if (temp
->osds
[i
] != CRUSH_ITEM_NONE
) {
2797 temp
->primary
= temp
->osds
[i
];
2804 pg
= lookup_pg_mapping(&osdmap
->primary_temp
, pgid
);
2806 temp
->primary
= pg
->primary_temp
.osd
;
2810 * Map a PG to its acting set as well as its up set.
2812 * Acting set is used for data mapping purposes, while up set can be
2813 * recorded for detecting interval changes and deciding whether to
2816 void ceph_pg_to_up_acting_osds(struct ceph_osdmap
*osdmap
,
2817 struct ceph_pg_pool_info
*pi
,
2818 const struct ceph_pg
*raw_pgid
,
2819 struct ceph_osds
*up
,
2820 struct ceph_osds
*acting
)
2822 struct ceph_pg pgid
;
2825 WARN_ON(pi
->id
!= raw_pgid
->pool
);
2826 raw_pg_to_pg(pi
, raw_pgid
, &pgid
);
2828 pg_to_raw_osds(osdmap
, pi
, raw_pgid
, up
, &pps
);
2829 apply_upmap(osdmap
, &pgid
, up
);
2830 raw_to_up_osds(osdmap
, pi
, up
);
2831 apply_primary_affinity(osdmap
, pi
, pps
, up
);
2832 get_temp_osds(osdmap
, pi
, &pgid
, acting
);
2833 if (!acting
->size
) {
2834 memcpy(acting
->osds
, up
->osds
, up
->size
* sizeof(up
->osds
[0]));
2835 acting
->size
= up
->size
;
2836 if (acting
->primary
== -1)
2837 acting
->primary
= up
->primary
;
2839 WARN_ON(!osds_valid(up
) || !osds_valid(acting
));
2842 bool ceph_pg_to_primary_shard(struct ceph_osdmap
*osdmap
,
2843 struct ceph_pg_pool_info
*pi
,
2844 const struct ceph_pg
*raw_pgid
,
2845 struct ceph_spg
*spgid
)
2847 struct ceph_pg pgid
;
2848 struct ceph_osds up
, acting
;
2851 WARN_ON(pi
->id
!= raw_pgid
->pool
);
2852 raw_pg_to_pg(pi
, raw_pgid
, &pgid
);
2854 if (ceph_can_shift_osds(pi
)) {
2855 spgid
->pgid
= pgid
; /* struct */
2856 spgid
->shard
= CEPH_SPG_NOSHARD
;
2860 ceph_pg_to_up_acting_osds(osdmap
, pi
, &pgid
, &up
, &acting
);
2861 for (i
= 0; i
< acting
.size
; i
++) {
2862 if (acting
.osds
[i
] == acting
.primary
) {
2863 spgid
->pgid
= pgid
; /* struct */
2873 * Return acting primary for given PG, or -1 if none.
2875 int ceph_pg_to_acting_primary(struct ceph_osdmap
*osdmap
,
2876 const struct ceph_pg
*raw_pgid
)
2878 struct ceph_pg_pool_info
*pi
;
2879 struct ceph_osds up
, acting
;
2881 pi
= ceph_pg_pool_by_id(osdmap
, raw_pgid
->pool
);
2885 ceph_pg_to_up_acting_osds(osdmap
, pi
, raw_pgid
, &up
, &acting
);
2886 return acting
.primary
;
2888 EXPORT_SYMBOL(ceph_pg_to_acting_primary
);
2890 static struct crush_loc_node
*alloc_crush_loc(size_t type_name_len
,
2893 struct crush_loc_node
*loc
;
2895 loc
= kmalloc(sizeof(*loc
) + type_name_len
+ name_len
+ 2, GFP_NOIO
);
2899 RB_CLEAR_NODE(&loc
->cl_node
);
2903 static void free_crush_loc(struct crush_loc_node
*loc
)
2905 WARN_ON(!RB_EMPTY_NODE(&loc
->cl_node
));
2910 static int crush_loc_compare(const struct crush_loc
*loc1
,
2911 const struct crush_loc
*loc2
)
2913 return strcmp(loc1
->cl_type_name
, loc2
->cl_type_name
) ?:
2914 strcmp(loc1
->cl_name
, loc2
->cl_name
);
2917 DEFINE_RB_FUNCS2(crush_loc
, struct crush_loc_node
, cl_loc
, crush_loc_compare
,
2918 RB_BYPTR
, const struct crush_loc
*, cl_node
)
2921 * Parses a set of <bucket type name>':'<bucket name> pairs separated
2922 * by '|', e.g. "rack:foo1|rack:foo2|datacenter:bar".
2924 * Note that @crush_location is modified by strsep().
2926 int ceph_parse_crush_location(char *crush_location
, struct rb_root
*locs
)
2928 struct crush_loc_node
*loc
;
2929 const char *type_name
, *name
, *colon
;
2930 size_t type_name_len
, name_len
;
2932 dout("%s '%s'\n", __func__
, crush_location
);
2933 while ((type_name
= strsep(&crush_location
, "|"))) {
2934 colon
= strchr(type_name
, ':');
2938 type_name_len
= colon
- type_name
;
2939 if (type_name_len
== 0)
2943 name_len
= strlen(name
);
2947 loc
= alloc_crush_loc(type_name_len
, name_len
);
2951 loc
->cl_loc
.cl_type_name
= loc
->cl_data
;
2952 memcpy(loc
->cl_loc
.cl_type_name
, type_name
, type_name_len
);
2953 loc
->cl_loc
.cl_type_name
[type_name_len
] = '\0';
2955 loc
->cl_loc
.cl_name
= loc
->cl_data
+ type_name_len
+ 1;
2956 memcpy(loc
->cl_loc
.cl_name
, name
, name_len
);
2957 loc
->cl_loc
.cl_name
[name_len
] = '\0';
2959 if (!__insert_crush_loc(locs
, loc
)) {
2960 free_crush_loc(loc
);
2964 dout("%s type_name '%s' name '%s'\n", __func__
,
2965 loc
->cl_loc
.cl_type_name
, loc
->cl_loc
.cl_name
);
2971 int ceph_compare_crush_locs(struct rb_root
*locs1
, struct rb_root
*locs2
)
2973 struct rb_node
*n1
= rb_first(locs1
);
2974 struct rb_node
*n2
= rb_first(locs2
);
2977 for ( ; n1
&& n2
; n1
= rb_next(n1
), n2
= rb_next(n2
)) {
2978 struct crush_loc_node
*loc1
=
2979 rb_entry(n1
, struct crush_loc_node
, cl_node
);
2980 struct crush_loc_node
*loc2
=
2981 rb_entry(n2
, struct crush_loc_node
, cl_node
);
2983 ret
= crush_loc_compare(&loc1
->cl_loc
, &loc2
->cl_loc
);
2995 void ceph_clear_crush_locs(struct rb_root
*locs
)
2997 while (!RB_EMPTY_ROOT(locs
)) {
2998 struct crush_loc_node
*loc
=
2999 rb_entry(rb_first(locs
), struct crush_loc_node
, cl_node
);
3001 erase_crush_loc(locs
, loc
);
3002 free_crush_loc(loc
);
3009 static bool is_valid_crush_name(const char *name
)
3012 if (!('a' <= *name
&& *name
<= 'z') &&
3013 !('A' <= *name
&& *name
<= 'Z') &&
3014 !('0' <= *name
&& *name
<= '9') &&
3015 *name
!= '-' && *name
!= '_' && *name
!= '.')
3017 } while (*++name
!= '\0');
3023 * Gets the parent of an item. Returns its id (<0 because the
3024 * parent is always a bucket), type id (>0 for the same reason,
3025 * via @parent_type_id) and location (via @parent_loc). If no
3026 * parent, returns 0.
3028 * Does a linear search, as there are no parent pointers of any
3029 * kind. Note that the result is ambiguous for items that occur
3030 * multiple times in the map.
3032 static int get_immediate_parent(struct crush_map
*c
, int id
,
3033 u16
*parent_type_id
,
3034 struct crush_loc
*parent_loc
)
3036 struct crush_bucket
*b
;
3037 struct crush_name_node
*type_cn
, *cn
;
3040 for (i
= 0; i
< c
->max_buckets
; i
++) {
3045 /* ignore per-class shadow hierarchy */
3046 cn
= lookup_crush_name(&c
->names
, b
->id
);
3047 if (!cn
|| !is_valid_crush_name(cn
->cn_name
))
3050 for (j
= 0; j
< b
->size
; j
++) {
3051 if (b
->items
[j
] != id
)
3054 *parent_type_id
= b
->type
;
3055 type_cn
= lookup_crush_name(&c
->type_names
, b
->type
);
3056 parent_loc
->cl_type_name
= type_cn
->cn_name
;
3057 parent_loc
->cl_name
= cn
->cn_name
;
3062 return 0; /* no parent */
3066 * Calculates the locality/distance from an item to a client
3067 * location expressed in terms of CRUSH hierarchy as a set of
3068 * (bucket type name, bucket name) pairs. Specifically, looks
3069 * for the lowest-valued bucket type for which the location of
3070 * @id matches one of the locations in @locs, so for standard
3071 * bucket types (host = 1, rack = 3, datacenter = 8, zone = 9)
3072 * a matching host is closer than a matching rack and a matching
3073 * data center is closer than a matching zone.
3075 * Specifying multiple locations (a "multipath" location) such
3076 * as "rack=foo1 rack=foo2 datacenter=bar" is allowed -- @locs
3077 * is a multimap. The locality will be:
3079 * - 3 for OSDs in racks foo1 and foo2
3080 * - 8 for OSDs in data center bar
3081 * - -1 for all other OSDs
3083 * The lowest possible bucket type is 1, so the best locality
3084 * for an OSD is 1 (i.e. a matching host). Locality 0 would be
3087 int ceph_get_crush_locality(struct ceph_osdmap
*osdmap
, int id
,
3088 struct rb_root
*locs
)
3090 struct crush_loc loc
;
3094 * Instead of repeated get_immediate_parent() calls,
3095 * the location of @id could be obtained with a single
3096 * depth-first traversal.
3099 id
= get_immediate_parent(osdmap
->crush
, id
, &type_id
, &loc
);
3101 return -1; /* not local */
3103 if (lookup_crush_loc(locs
, &loc
))