2 #include <linux/ceph/ceph_debug.h>
4 #include <linux/module.h>
5 #include <linux/slab.h>
8 #include <linux/ceph/libceph.h>
9 #include <linux/ceph/osdmap.h>
10 #include <linux/ceph/decode.h>
11 #include <linux/crush/hash.h>
12 #include <linux/crush/mapper.h>
14 char *ceph_osdmap_state_str(char *str
, int len
, int state
)
19 if ((state
& CEPH_OSD_EXISTS
) && (state
& CEPH_OSD_UP
))
20 snprintf(str
, len
, "exists, up");
21 else if (state
& CEPH_OSD_EXISTS
)
22 snprintf(str
, len
, "exists");
23 else if (state
& CEPH_OSD_UP
)
24 snprintf(str
, len
, "up");
26 snprintf(str
, len
, "doesn't exist");
33 static int calc_bits_of(unsigned int t
)
44 * the foo_mask is the smallest value 2^n-1 that is >= foo.
46 static void calc_pg_masks(struct ceph_pg_pool_info
*pi
)
48 pi
->pg_num_mask
= (1 << calc_bits_of(pi
->pg_num
-1)) - 1;
49 pi
->pgp_num_mask
= (1 << calc_bits_of(pi
->pgp_num
-1)) - 1;
55 static int crush_decode_uniform_bucket(void **p
, void *end
,
56 struct crush_bucket_uniform
*b
)
58 dout("crush_decode_uniform_bucket %p to %p\n", *p
, end
);
59 ceph_decode_need(p
, end
, (1+b
->h
.size
) * sizeof(u32
), bad
);
60 b
->item_weight
= ceph_decode_32(p
);
66 static int crush_decode_list_bucket(void **p
, void *end
,
67 struct crush_bucket_list
*b
)
70 dout("crush_decode_list_bucket %p to %p\n", *p
, end
);
71 b
->item_weights
= kcalloc(b
->h
.size
, sizeof(u32
), GFP_NOFS
);
72 if (b
->item_weights
== NULL
)
74 b
->sum_weights
= kcalloc(b
->h
.size
, sizeof(u32
), GFP_NOFS
);
75 if (b
->sum_weights
== NULL
)
77 ceph_decode_need(p
, end
, 2 * b
->h
.size
* sizeof(u32
), bad
);
78 for (j
= 0; j
< b
->h
.size
; j
++) {
79 b
->item_weights
[j
] = ceph_decode_32(p
);
80 b
->sum_weights
[j
] = ceph_decode_32(p
);
87 static int crush_decode_tree_bucket(void **p
, void *end
,
88 struct crush_bucket_tree
*b
)
91 dout("crush_decode_tree_bucket %p to %p\n", *p
, end
);
92 ceph_decode_8_safe(p
, end
, b
->num_nodes
, bad
);
93 b
->node_weights
= kcalloc(b
->num_nodes
, sizeof(u32
), GFP_NOFS
);
94 if (b
->node_weights
== NULL
)
96 ceph_decode_need(p
, end
, b
->num_nodes
* sizeof(u32
), bad
);
97 for (j
= 0; j
< b
->num_nodes
; j
++)
98 b
->node_weights
[j
] = ceph_decode_32(p
);
104 static int crush_decode_straw_bucket(void **p
, void *end
,
105 struct crush_bucket_straw
*b
)
108 dout("crush_decode_straw_bucket %p to %p\n", *p
, end
);
109 b
->item_weights
= kcalloc(b
->h
.size
, sizeof(u32
), GFP_NOFS
);
110 if (b
->item_weights
== NULL
)
112 b
->straws
= kcalloc(b
->h
.size
, sizeof(u32
), GFP_NOFS
);
113 if (b
->straws
== NULL
)
115 ceph_decode_need(p
, end
, 2 * b
->h
.size
* sizeof(u32
), bad
);
116 for (j
= 0; j
< b
->h
.size
; j
++) {
117 b
->item_weights
[j
] = ceph_decode_32(p
);
118 b
->straws
[j
] = ceph_decode_32(p
);
125 static int crush_decode_straw2_bucket(void **p
, void *end
,
126 struct crush_bucket_straw2
*b
)
129 dout("crush_decode_straw2_bucket %p to %p\n", *p
, end
);
130 b
->item_weights
= kcalloc(b
->h
.size
, sizeof(u32
), GFP_NOFS
);
131 if (b
->item_weights
== NULL
)
133 ceph_decode_need(p
, end
, b
->h
.size
* sizeof(u32
), bad
);
134 for (j
= 0; j
< b
->h
.size
; j
++)
135 b
->item_weights
[j
] = ceph_decode_32(p
);
141 static int skip_name_map(void **p
, void *end
)
144 ceph_decode_32_safe(p
, end
, len
,bad
);
148 ceph_decode_32_safe(p
, end
, strlen
, bad
);
156 static struct crush_map
*crush_decode(void *pbyval
, void *end
)
162 void *start
= pbyval
;
166 dout("crush_decode %p to %p len %d\n", *p
, end
, (int)(end
- *p
));
168 c
= kzalloc(sizeof(*c
), GFP_NOFS
);
170 return ERR_PTR(-ENOMEM
);
172 /* set tunables to default values */
173 c
->choose_local_tries
= 2;
174 c
->choose_local_fallback_tries
= 5;
175 c
->choose_total_tries
= 19;
176 c
->chooseleaf_descend_once
= 0;
178 ceph_decode_need(p
, end
, 4*sizeof(u32
), bad
);
179 magic
= ceph_decode_32(p
);
180 if (magic
!= CRUSH_MAGIC
) {
181 pr_err("crush_decode magic %x != current %x\n",
182 (unsigned int)magic
, (unsigned int)CRUSH_MAGIC
);
185 c
->max_buckets
= ceph_decode_32(p
);
186 c
->max_rules
= ceph_decode_32(p
);
187 c
->max_devices
= ceph_decode_32(p
);
189 c
->buckets
= kcalloc(c
->max_buckets
, sizeof(*c
->buckets
), GFP_NOFS
);
190 if (c
->buckets
== NULL
)
192 c
->rules
= kcalloc(c
->max_rules
, sizeof(*c
->rules
), GFP_NOFS
);
193 if (c
->rules
== NULL
)
197 for (i
= 0; i
< c
->max_buckets
; i
++) {
200 struct crush_bucket
*b
;
202 ceph_decode_32_safe(p
, end
, alg
, bad
);
204 c
->buckets
[i
] = NULL
;
207 dout("crush_decode bucket %d off %x %p to %p\n",
208 i
, (int)(*p
-start
), *p
, end
);
211 case CRUSH_BUCKET_UNIFORM
:
212 size
= sizeof(struct crush_bucket_uniform
);
214 case CRUSH_BUCKET_LIST
:
215 size
= sizeof(struct crush_bucket_list
);
217 case CRUSH_BUCKET_TREE
:
218 size
= sizeof(struct crush_bucket_tree
);
220 case CRUSH_BUCKET_STRAW
:
221 size
= sizeof(struct crush_bucket_straw
);
223 case CRUSH_BUCKET_STRAW2
:
224 size
= sizeof(struct crush_bucket_straw2
);
231 b
= c
->buckets
[i
] = kzalloc(size
, GFP_NOFS
);
235 ceph_decode_need(p
, end
, 4*sizeof(u32
), bad
);
236 b
->id
= ceph_decode_32(p
);
237 b
->type
= ceph_decode_16(p
);
238 b
->alg
= ceph_decode_8(p
);
239 b
->hash
= ceph_decode_8(p
);
240 b
->weight
= ceph_decode_32(p
);
241 b
->size
= ceph_decode_32(p
);
243 dout("crush_decode bucket size %d off %x %p to %p\n",
244 b
->size
, (int)(*p
-start
), *p
, end
);
246 b
->items
= kcalloc(b
->size
, sizeof(__s32
), GFP_NOFS
);
247 if (b
->items
== NULL
)
249 b
->perm
= kcalloc(b
->size
, sizeof(u32
), GFP_NOFS
);
254 ceph_decode_need(p
, end
, b
->size
*sizeof(u32
), bad
);
255 for (j
= 0; j
< b
->size
; j
++)
256 b
->items
[j
] = ceph_decode_32(p
);
259 case CRUSH_BUCKET_UNIFORM
:
260 err
= crush_decode_uniform_bucket(p
, end
,
261 (struct crush_bucket_uniform
*)b
);
265 case CRUSH_BUCKET_LIST
:
266 err
= crush_decode_list_bucket(p
, end
,
267 (struct crush_bucket_list
*)b
);
271 case CRUSH_BUCKET_TREE
:
272 err
= crush_decode_tree_bucket(p
, end
,
273 (struct crush_bucket_tree
*)b
);
277 case CRUSH_BUCKET_STRAW
:
278 err
= crush_decode_straw_bucket(p
, end
,
279 (struct crush_bucket_straw
*)b
);
283 case CRUSH_BUCKET_STRAW2
:
284 err
= crush_decode_straw2_bucket(p
, end
,
285 (struct crush_bucket_straw2
*)b
);
293 dout("rule vec is %p\n", c
->rules
);
294 for (i
= 0; i
< c
->max_rules
; i
++) {
296 struct crush_rule
*r
;
298 ceph_decode_32_safe(p
, end
, yes
, bad
);
300 dout("crush_decode NO rule %d off %x %p to %p\n",
301 i
, (int)(*p
-start
), *p
, end
);
306 dout("crush_decode rule %d off %x %p to %p\n",
307 i
, (int)(*p
-start
), *p
, end
);
310 ceph_decode_32_safe(p
, end
, yes
, bad
);
311 #if BITS_PER_LONG == 32
313 if (yes
> (ULONG_MAX
- sizeof(*r
))
314 / sizeof(struct crush_rule_step
))
317 r
= c
->rules
[i
] = kmalloc(sizeof(*r
) +
318 yes
*sizeof(struct crush_rule_step
),
322 dout(" rule %d is at %p\n", i
, r
);
324 ceph_decode_copy_safe(p
, end
, &r
->mask
, 4, bad
); /* 4 u8's */
325 ceph_decode_need(p
, end
, r
->len
*3*sizeof(u32
), bad
);
326 for (j
= 0; j
< r
->len
; j
++) {
327 r
->steps
[j
].op
= ceph_decode_32(p
);
328 r
->steps
[j
].arg1
= ceph_decode_32(p
);
329 r
->steps
[j
].arg2
= ceph_decode_32(p
);
333 /* ignore trailing name maps. */
334 for (num_name_maps
= 0; num_name_maps
< 3; num_name_maps
++) {
335 err
= skip_name_map(p
, end
);
341 ceph_decode_need(p
, end
, 3*sizeof(u32
), done
);
342 c
->choose_local_tries
= ceph_decode_32(p
);
343 c
->choose_local_fallback_tries
= ceph_decode_32(p
);
344 c
->choose_total_tries
= ceph_decode_32(p
);
345 dout("crush decode tunable choose_local_tries = %d\n",
346 c
->choose_local_tries
);
347 dout("crush decode tunable choose_local_fallback_tries = %d\n",
348 c
->choose_local_fallback_tries
);
349 dout("crush decode tunable choose_total_tries = %d\n",
350 c
->choose_total_tries
);
352 ceph_decode_need(p
, end
, sizeof(u32
), done
);
353 c
->chooseleaf_descend_once
= ceph_decode_32(p
);
354 dout("crush decode tunable chooseleaf_descend_once = %d\n",
355 c
->chooseleaf_descend_once
);
357 ceph_decode_need(p
, end
, sizeof(u8
), done
);
358 c
->chooseleaf_vary_r
= ceph_decode_8(p
);
359 dout("crush decode tunable chooseleaf_vary_r = %d\n",
360 c
->chooseleaf_vary_r
);
362 /* skip straw_calc_version, allowed_bucket_algs */
363 ceph_decode_need(p
, end
, sizeof(u8
) + sizeof(u32
), done
);
364 *p
+= sizeof(u8
) + sizeof(u32
);
366 ceph_decode_need(p
, end
, sizeof(u8
), done
);
367 c
->chooseleaf_stable
= ceph_decode_8(p
);
368 dout("crush decode tunable chooseleaf_stable = %d\n",
369 c
->chooseleaf_stable
);
372 dout("crush_decode success\n");
378 dout("crush_decode fail %d\n", err
);
383 int ceph_pg_compare(const struct ceph_pg
*lhs
, const struct ceph_pg
*rhs
)
385 if (lhs
->pool
< rhs
->pool
)
387 if (lhs
->pool
> rhs
->pool
)
389 if (lhs
->seed
< rhs
->seed
)
391 if (lhs
->seed
> rhs
->seed
)
398 * rbtree of pg_mapping for handling pg_temp (explicit mapping of pgid
399 * to a set of osds) and primary_temp (explicit primary setting)
401 static int __insert_pg_mapping(struct ceph_pg_mapping
*new,
402 struct rb_root
*root
)
404 struct rb_node
**p
= &root
->rb_node
;
405 struct rb_node
*parent
= NULL
;
406 struct ceph_pg_mapping
*pg
= NULL
;
409 dout("__insert_pg_mapping %llx %p\n", *(u64
*)&new->pgid
, new);
412 pg
= rb_entry(parent
, struct ceph_pg_mapping
, node
);
413 c
= ceph_pg_compare(&new->pgid
, &pg
->pgid
);
422 rb_link_node(&new->node
, parent
, p
);
423 rb_insert_color(&new->node
, root
);
427 static struct ceph_pg_mapping
*__lookup_pg_mapping(struct rb_root
*root
,
430 struct rb_node
*n
= root
->rb_node
;
431 struct ceph_pg_mapping
*pg
;
435 pg
= rb_entry(n
, struct ceph_pg_mapping
, node
);
436 c
= ceph_pg_compare(&pgid
, &pg
->pgid
);
442 dout("__lookup_pg_mapping %lld.%x got %p\n",
443 pgid
.pool
, pgid
.seed
, pg
);
450 static int __remove_pg_mapping(struct rb_root
*root
, struct ceph_pg pgid
)
452 struct ceph_pg_mapping
*pg
= __lookup_pg_mapping(root
, pgid
);
455 dout("__remove_pg_mapping %lld.%x %p\n", pgid
.pool
, pgid
.seed
,
457 rb_erase(&pg
->node
, root
);
461 dout("__remove_pg_mapping %lld.%x dne\n", pgid
.pool
, pgid
.seed
);
466 * rbtree of pg pool info
468 static int __insert_pg_pool(struct rb_root
*root
, struct ceph_pg_pool_info
*new)
470 struct rb_node
**p
= &root
->rb_node
;
471 struct rb_node
*parent
= NULL
;
472 struct ceph_pg_pool_info
*pi
= NULL
;
476 pi
= rb_entry(parent
, struct ceph_pg_pool_info
, node
);
477 if (new->id
< pi
->id
)
479 else if (new->id
> pi
->id
)
485 rb_link_node(&new->node
, parent
, p
);
486 rb_insert_color(&new->node
, root
);
490 static struct ceph_pg_pool_info
*__lookup_pg_pool(struct rb_root
*root
, u64 id
)
492 struct ceph_pg_pool_info
*pi
;
493 struct rb_node
*n
= root
->rb_node
;
496 pi
= rb_entry(n
, struct ceph_pg_pool_info
, node
);
499 else if (id
> pi
->id
)
507 struct ceph_pg_pool_info
*ceph_pg_pool_by_id(struct ceph_osdmap
*map
, u64 id
)
509 return __lookup_pg_pool(&map
->pg_pools
, id
);
512 const char *ceph_pg_pool_name_by_id(struct ceph_osdmap
*map
, u64 id
)
514 struct ceph_pg_pool_info
*pi
;
516 if (id
== CEPH_NOPOOL
)
519 if (WARN_ON_ONCE(id
> (u64
) INT_MAX
))
522 pi
= __lookup_pg_pool(&map
->pg_pools
, (int) id
);
524 return pi
? pi
->name
: NULL
;
526 EXPORT_SYMBOL(ceph_pg_pool_name_by_id
);
528 int ceph_pg_poolid_by_name(struct ceph_osdmap
*map
, const char *name
)
532 for (rbp
= rb_first(&map
->pg_pools
); rbp
; rbp
= rb_next(rbp
)) {
533 struct ceph_pg_pool_info
*pi
=
534 rb_entry(rbp
, struct ceph_pg_pool_info
, node
);
535 if (pi
->name
&& strcmp(pi
->name
, name
) == 0)
540 EXPORT_SYMBOL(ceph_pg_poolid_by_name
);
542 static void __remove_pg_pool(struct rb_root
*root
, struct ceph_pg_pool_info
*pi
)
544 rb_erase(&pi
->node
, root
);
549 static int decode_pool(void **p
, void *end
, struct ceph_pg_pool_info
*pi
)
555 ceph_decode_need(p
, end
, 2 + 4, bad
);
556 ev
= ceph_decode_8(p
); /* encoding version */
557 cv
= ceph_decode_8(p
); /* compat version */
559 pr_warn("got v %d < 5 cv %d of ceph_pg_pool\n", ev
, cv
);
563 pr_warn("got v %d cv %d > 9 of ceph_pg_pool\n", ev
, cv
);
566 len
= ceph_decode_32(p
);
567 ceph_decode_need(p
, end
, len
, bad
);
570 pi
->type
= ceph_decode_8(p
);
571 pi
->size
= ceph_decode_8(p
);
572 pi
->crush_ruleset
= ceph_decode_8(p
);
573 pi
->object_hash
= ceph_decode_8(p
);
575 pi
->pg_num
= ceph_decode_32(p
);
576 pi
->pgp_num
= ceph_decode_32(p
);
578 *p
+= 4 + 4; /* skip lpg* */
579 *p
+= 4; /* skip last_change */
580 *p
+= 8 + 4; /* skip snap_seq, snap_epoch */
583 num
= ceph_decode_32(p
);
585 *p
+= 8; /* snapid key */
586 *p
+= 1 + 1; /* versions */
587 len
= ceph_decode_32(p
);
591 /* skip removed_snaps */
592 num
= ceph_decode_32(p
);
595 *p
+= 8; /* skip auid */
596 pi
->flags
= ceph_decode_64(p
);
597 *p
+= 4; /* skip crash_replay_interval */
600 pi
->min_size
= ceph_decode_8(p
);
602 pi
->min_size
= pi
->size
- pi
->size
/ 2;
605 *p
+= 8 + 8; /* skip quota_max_* */
609 num
= ceph_decode_32(p
);
612 *p
+= 8; /* skip tier_of */
613 *p
+= 1; /* skip cache_mode */
615 pi
->read_tier
= ceph_decode_64(p
);
616 pi
->write_tier
= ceph_decode_64(p
);
623 /* skip properties */
624 num
= ceph_decode_32(p
);
626 len
= ceph_decode_32(p
);
628 len
= ceph_decode_32(p
);
634 /* skip hit_set_params */
635 *p
+= 1 + 1; /* versions */
636 len
= ceph_decode_32(p
);
639 *p
+= 4; /* skip hit_set_period */
640 *p
+= 4; /* skip hit_set_count */
644 *p
+= 4; /* skip stripe_width */
647 *p
+= 8; /* skip target_max_bytes */
648 *p
+= 8; /* skip target_max_objects */
649 *p
+= 4; /* skip cache_target_dirty_ratio_micro */
650 *p
+= 4; /* skip cache_target_full_ratio_micro */
651 *p
+= 4; /* skip cache_min_flush_age */
652 *p
+= 4; /* skip cache_min_evict_age */
656 /* skip erasure_code_profile */
657 len
= ceph_decode_32(p
);
662 pi
->last_force_request_resend
= ceph_decode_32(p
);
664 pi
->last_force_request_resend
= 0;
666 /* ignore the rest */
676 static int decode_pool_names(void **p
, void *end
, struct ceph_osdmap
*map
)
678 struct ceph_pg_pool_info
*pi
;
682 ceph_decode_32_safe(p
, end
, num
, bad
);
683 dout(" %d pool names\n", num
);
685 ceph_decode_64_safe(p
, end
, pool
, bad
);
686 ceph_decode_32_safe(p
, end
, len
, bad
);
687 dout(" pool %llu len %d\n", pool
, len
);
688 ceph_decode_need(p
, end
, len
, bad
);
689 pi
= __lookup_pg_pool(&map
->pg_pools
, pool
);
691 char *name
= kstrndup(*p
, len
, GFP_NOFS
);
697 dout(" name is %s\n", pi
->name
);
710 struct ceph_osdmap
*ceph_osdmap_alloc(void)
712 struct ceph_osdmap
*map
;
714 map
= kzalloc(sizeof(*map
), GFP_NOIO
);
718 map
->pg_pools
= RB_ROOT
;
720 map
->pg_temp
= RB_ROOT
;
721 map
->primary_temp
= RB_ROOT
;
722 mutex_init(&map
->crush_scratch_mutex
);
727 void ceph_osdmap_destroy(struct ceph_osdmap
*map
)
729 dout("osdmap_destroy %p\n", map
);
731 crush_destroy(map
->crush
);
732 while (!RB_EMPTY_ROOT(&map
->pg_temp
)) {
733 struct ceph_pg_mapping
*pg
=
734 rb_entry(rb_first(&map
->pg_temp
),
735 struct ceph_pg_mapping
, node
);
736 rb_erase(&pg
->node
, &map
->pg_temp
);
739 while (!RB_EMPTY_ROOT(&map
->primary_temp
)) {
740 struct ceph_pg_mapping
*pg
=
741 rb_entry(rb_first(&map
->primary_temp
),
742 struct ceph_pg_mapping
, node
);
743 rb_erase(&pg
->node
, &map
->primary_temp
);
746 while (!RB_EMPTY_ROOT(&map
->pg_pools
)) {
747 struct ceph_pg_pool_info
*pi
=
748 rb_entry(rb_first(&map
->pg_pools
),
749 struct ceph_pg_pool_info
, node
);
750 __remove_pg_pool(&map
->pg_pools
, pi
);
752 kfree(map
->osd_state
);
753 kfree(map
->osd_weight
);
754 kfree(map
->osd_addr
);
755 kfree(map
->osd_primary_affinity
);
760 * Adjust max_osd value, (re)allocate arrays.
762 * The new elements are properly initialized.
764 static int osdmap_set_max_osd(struct ceph_osdmap
*map
, int max
)
768 struct ceph_entity_addr
*addr
;
771 state
= krealloc(map
->osd_state
, max
*sizeof(*state
), GFP_NOFS
);
774 map
->osd_state
= state
;
776 weight
= krealloc(map
->osd_weight
, max
*sizeof(*weight
), GFP_NOFS
);
779 map
->osd_weight
= weight
;
781 addr
= krealloc(map
->osd_addr
, max
*sizeof(*addr
), GFP_NOFS
);
784 map
->osd_addr
= addr
;
786 for (i
= map
->max_osd
; i
< max
; i
++) {
787 map
->osd_state
[i
] = 0;
788 map
->osd_weight
[i
] = CEPH_OSD_OUT
;
789 memset(map
->osd_addr
+ i
, 0, sizeof(*map
->osd_addr
));
792 if (map
->osd_primary_affinity
) {
795 affinity
= krealloc(map
->osd_primary_affinity
,
796 max
*sizeof(*affinity
), GFP_NOFS
);
799 map
->osd_primary_affinity
= affinity
;
801 for (i
= map
->max_osd
; i
< max
; i
++)
802 map
->osd_primary_affinity
[i
] =
803 CEPH_OSD_DEFAULT_PRIMARY_AFFINITY
;
811 #define OSDMAP_WRAPPER_COMPAT_VER 7
812 #define OSDMAP_CLIENT_DATA_COMPAT_VER 1
815 * Return 0 or error. On success, *v is set to 0 for old (v6) osdmaps,
816 * to struct_v of the client_data section for new (v7 and above)
819 static int get_osdmap_client_data_v(void **p
, void *end
,
820 const char *prefix
, u8
*v
)
824 ceph_decode_8_safe(p
, end
, struct_v
, e_inval
);
828 ceph_decode_8_safe(p
, end
, struct_compat
, e_inval
);
829 if (struct_compat
> OSDMAP_WRAPPER_COMPAT_VER
) {
830 pr_warn("got v %d cv %d > %d of %s ceph_osdmap\n",
831 struct_v
, struct_compat
,
832 OSDMAP_WRAPPER_COMPAT_VER
, prefix
);
835 *p
+= 4; /* ignore wrapper struct_len */
837 ceph_decode_8_safe(p
, end
, struct_v
, e_inval
);
838 ceph_decode_8_safe(p
, end
, struct_compat
, e_inval
);
839 if (struct_compat
> OSDMAP_CLIENT_DATA_COMPAT_VER
) {
840 pr_warn("got v %d cv %d > %d of %s ceph_osdmap client data\n",
841 struct_v
, struct_compat
,
842 OSDMAP_CLIENT_DATA_COMPAT_VER
, prefix
);
845 *p
+= 4; /* ignore client data struct_len */
850 ceph_decode_16_safe(p
, end
, version
, e_inval
);
852 pr_warn("got v %d < 6 of %s ceph_osdmap\n",
857 /* old osdmap enconding */
868 static int __decode_pools(void **p
, void *end
, struct ceph_osdmap
*map
,
873 ceph_decode_32_safe(p
, end
, n
, e_inval
);
875 struct ceph_pg_pool_info
*pi
;
879 ceph_decode_64_safe(p
, end
, pool
, e_inval
);
881 pi
= __lookup_pg_pool(&map
->pg_pools
, pool
);
882 if (!incremental
|| !pi
) {
883 pi
= kzalloc(sizeof(*pi
), GFP_NOFS
);
889 ret
= __insert_pg_pool(&map
->pg_pools
, pi
);
896 ret
= decode_pool(p
, end
, pi
);
907 static int decode_pools(void **p
, void *end
, struct ceph_osdmap
*map
)
909 return __decode_pools(p
, end
, map
, false);
912 static int decode_new_pools(void **p
, void *end
, struct ceph_osdmap
*map
)
914 return __decode_pools(p
, end
, map
, true);
917 static int __decode_pg_temp(void **p
, void *end
, struct ceph_osdmap
*map
,
922 ceph_decode_32_safe(p
, end
, n
, e_inval
);
928 ret
= ceph_decode_pgid(p
, end
, &pgid
);
932 ceph_decode_32_safe(p
, end
, len
, e_inval
);
934 ret
= __remove_pg_mapping(&map
->pg_temp
, pgid
);
935 BUG_ON(!incremental
&& ret
!= -ENOENT
);
937 if (!incremental
|| len
> 0) {
938 struct ceph_pg_mapping
*pg
;
940 ceph_decode_need(p
, end
, len
*sizeof(u32
), e_inval
);
942 if (len
> (UINT_MAX
- sizeof(*pg
)) / sizeof(u32
))
945 pg
= kzalloc(sizeof(*pg
) + len
*sizeof(u32
), GFP_NOFS
);
950 pg
->pg_temp
.len
= len
;
951 for (i
= 0; i
< len
; i
++)
952 pg
->pg_temp
.osds
[i
] = ceph_decode_32(p
);
954 ret
= __insert_pg_mapping(pg
, &map
->pg_temp
);
968 static int decode_pg_temp(void **p
, void *end
, struct ceph_osdmap
*map
)
970 return __decode_pg_temp(p
, end
, map
, false);
973 static int decode_new_pg_temp(void **p
, void *end
, struct ceph_osdmap
*map
)
975 return __decode_pg_temp(p
, end
, map
, true);
978 static int __decode_primary_temp(void **p
, void *end
, struct ceph_osdmap
*map
,
983 ceph_decode_32_safe(p
, end
, n
, e_inval
);
989 ret
= ceph_decode_pgid(p
, end
, &pgid
);
993 ceph_decode_32_safe(p
, end
, osd
, e_inval
);
995 ret
= __remove_pg_mapping(&map
->primary_temp
, pgid
);
996 BUG_ON(!incremental
&& ret
!= -ENOENT
);
998 if (!incremental
|| osd
!= (u32
)-1) {
999 struct ceph_pg_mapping
*pg
;
1001 pg
= kzalloc(sizeof(*pg
), GFP_NOFS
);
1006 pg
->primary_temp
.osd
= osd
;
1008 ret
= __insert_pg_mapping(pg
, &map
->primary_temp
);
1022 static int decode_primary_temp(void **p
, void *end
, struct ceph_osdmap
*map
)
1024 return __decode_primary_temp(p
, end
, map
, false);
1027 static int decode_new_primary_temp(void **p
, void *end
,
1028 struct ceph_osdmap
*map
)
1030 return __decode_primary_temp(p
, end
, map
, true);
1033 u32
ceph_get_primary_affinity(struct ceph_osdmap
*map
, int osd
)
1035 BUG_ON(osd
>= map
->max_osd
);
1037 if (!map
->osd_primary_affinity
)
1038 return CEPH_OSD_DEFAULT_PRIMARY_AFFINITY
;
1040 return map
->osd_primary_affinity
[osd
];
1043 static int set_primary_affinity(struct ceph_osdmap
*map
, int osd
, u32 aff
)
1045 BUG_ON(osd
>= map
->max_osd
);
1047 if (!map
->osd_primary_affinity
) {
1050 map
->osd_primary_affinity
= kmalloc(map
->max_osd
*sizeof(u32
),
1052 if (!map
->osd_primary_affinity
)
1055 for (i
= 0; i
< map
->max_osd
; i
++)
1056 map
->osd_primary_affinity
[i
] =
1057 CEPH_OSD_DEFAULT_PRIMARY_AFFINITY
;
1060 map
->osd_primary_affinity
[osd
] = aff
;
1065 static int decode_primary_affinity(void **p
, void *end
,
1066 struct ceph_osdmap
*map
)
1070 ceph_decode_32_safe(p
, end
, len
, e_inval
);
1072 kfree(map
->osd_primary_affinity
);
1073 map
->osd_primary_affinity
= NULL
;
1076 if (len
!= map
->max_osd
)
1079 ceph_decode_need(p
, end
, map
->max_osd
*sizeof(u32
), e_inval
);
1081 for (i
= 0; i
< map
->max_osd
; i
++) {
1084 ret
= set_primary_affinity(map
, i
, ceph_decode_32(p
));
1095 static int decode_new_primary_affinity(void **p
, void *end
,
1096 struct ceph_osdmap
*map
)
1100 ceph_decode_32_safe(p
, end
, n
, e_inval
);
1105 ceph_decode_32_safe(p
, end
, osd
, e_inval
);
1106 ceph_decode_32_safe(p
, end
, aff
, e_inval
);
1108 ret
= set_primary_affinity(map
, osd
, aff
);
1112 pr_info("osd%d primary-affinity 0x%x\n", osd
, aff
);
1122 * decode a full map.
1124 static int osdmap_decode(void **p
, void *end
, struct ceph_osdmap
*map
)
1133 dout("%s %p to %p len %d\n", __func__
, *p
, end
, (int)(end
- *p
));
1135 err
= get_osdmap_client_data_v(p
, end
, "full", &struct_v
);
1139 /* fsid, epoch, created, modified */
1140 ceph_decode_need(p
, end
, sizeof(map
->fsid
) + sizeof(u32
) +
1141 sizeof(map
->created
) + sizeof(map
->modified
), e_inval
);
1142 ceph_decode_copy(p
, &map
->fsid
, sizeof(map
->fsid
));
1143 epoch
= map
->epoch
= ceph_decode_32(p
);
1144 ceph_decode_copy(p
, &map
->created
, sizeof(map
->created
));
1145 ceph_decode_copy(p
, &map
->modified
, sizeof(map
->modified
));
1148 err
= decode_pools(p
, end
, map
);
1153 err
= decode_pool_names(p
, end
, map
);
1157 ceph_decode_32_safe(p
, end
, map
->pool_max
, e_inval
);
1159 ceph_decode_32_safe(p
, end
, map
->flags
, e_inval
);
1162 ceph_decode_32_safe(p
, end
, max
, e_inval
);
1164 /* (re)alloc osd arrays */
1165 err
= osdmap_set_max_osd(map
, max
);
1169 /* osd_state, osd_weight, osd_addrs->client_addr */
1170 ceph_decode_need(p
, end
, 3*sizeof(u32
) +
1171 map
->max_osd
*(1 + sizeof(*map
->osd_weight
) +
1172 sizeof(*map
->osd_addr
)), e_inval
);
1174 if (ceph_decode_32(p
) != map
->max_osd
)
1177 ceph_decode_copy(p
, map
->osd_state
, map
->max_osd
);
1179 if (ceph_decode_32(p
) != map
->max_osd
)
1182 for (i
= 0; i
< map
->max_osd
; i
++)
1183 map
->osd_weight
[i
] = ceph_decode_32(p
);
1185 if (ceph_decode_32(p
) != map
->max_osd
)
1188 ceph_decode_copy(p
, map
->osd_addr
, map
->max_osd
*sizeof(*map
->osd_addr
));
1189 for (i
= 0; i
< map
->max_osd
; i
++)
1190 ceph_decode_addr(&map
->osd_addr
[i
]);
1193 err
= decode_pg_temp(p
, end
, map
);
1198 if (struct_v
>= 1) {
1199 err
= decode_primary_temp(p
, end
, map
);
1204 /* primary_affinity */
1205 if (struct_v
>= 2) {
1206 err
= decode_primary_affinity(p
, end
, map
);
1210 /* XXX can this happen? */
1211 kfree(map
->osd_primary_affinity
);
1212 map
->osd_primary_affinity
= NULL
;
1216 ceph_decode_32_safe(p
, end
, len
, e_inval
);
1217 map
->crush
= crush_decode(*p
, min(*p
+ len
, end
));
1218 if (IS_ERR(map
->crush
)) {
1219 err
= PTR_ERR(map
->crush
);
1225 /* ignore the rest */
1228 dout("full osdmap epoch %d max_osd %d\n", map
->epoch
, map
->max_osd
);
1234 pr_err("corrupt full osdmap (%d) epoch %d off %d (%p of %p-%p)\n",
1235 err
, epoch
, (int)(*p
- start
), *p
, start
, end
);
1236 print_hex_dump(KERN_DEBUG
, "osdmap: ",
1237 DUMP_PREFIX_OFFSET
, 16, 1,
1238 start
, end
- start
, true);
1243 * Allocate and decode a full map.
1245 struct ceph_osdmap
*ceph_osdmap_decode(void **p
, void *end
)
1247 struct ceph_osdmap
*map
;
1250 map
= ceph_osdmap_alloc();
1252 return ERR_PTR(-ENOMEM
);
1254 ret
= osdmap_decode(p
, end
, map
);
1256 ceph_osdmap_destroy(map
);
1257 return ERR_PTR(ret
);
1264 * decode and apply an incremental map update.
1266 struct ceph_osdmap
*osdmap_apply_incremental(void **p
, void *end
,
1267 struct ceph_osdmap
*map
)
1269 struct crush_map
*newcrush
= NULL
;
1270 struct ceph_fsid fsid
;
1272 struct ceph_timespec modified
;
1276 __s32 new_flags
, max
;
1281 dout("%s %p to %p len %d\n", __func__
, *p
, end
, (int)(end
- *p
));
1283 err
= get_osdmap_client_data_v(p
, end
, "inc", &struct_v
);
1287 /* fsid, epoch, modified, new_pool_max, new_flags */
1288 ceph_decode_need(p
, end
, sizeof(fsid
) + sizeof(u32
) + sizeof(modified
) +
1289 sizeof(u64
) + sizeof(u32
), e_inval
);
1290 ceph_decode_copy(p
, &fsid
, sizeof(fsid
));
1291 epoch
= ceph_decode_32(p
);
1292 BUG_ON(epoch
!= map
->epoch
+1);
1293 ceph_decode_copy(p
, &modified
, sizeof(modified
));
1294 new_pool_max
= ceph_decode_64(p
);
1295 new_flags
= ceph_decode_32(p
);
1298 ceph_decode_32_safe(p
, end
, len
, e_inval
);
1300 dout("apply_incremental full map len %d, %p to %p\n",
1302 return ceph_osdmap_decode(p
, min(*p
+len
, end
));
1306 ceph_decode_32_safe(p
, end
, len
, e_inval
);
1308 newcrush
= crush_decode(*p
, min(*p
+len
, end
));
1309 if (IS_ERR(newcrush
)) {
1310 err
= PTR_ERR(newcrush
);
1319 map
->flags
= new_flags
;
1320 if (new_pool_max
>= 0)
1321 map
->pool_max
= new_pool_max
;
1324 ceph_decode_32_safe(p
, end
, max
, e_inval
);
1326 err
= osdmap_set_max_osd(map
, max
);
1332 map
->modified
= modified
;
1335 crush_destroy(map
->crush
);
1336 map
->crush
= newcrush
;
1341 err
= decode_new_pools(p
, end
, map
);
1345 /* new_pool_names */
1346 err
= decode_pool_names(p
, end
, map
);
1351 ceph_decode_32_safe(p
, end
, len
, e_inval
);
1353 struct ceph_pg_pool_info
*pi
;
1355 ceph_decode_64_safe(p
, end
, pool
, e_inval
);
1356 pi
= __lookup_pg_pool(&map
->pg_pools
, pool
);
1358 __remove_pg_pool(&map
->pg_pools
, pi
);
1362 ceph_decode_32_safe(p
, end
, len
, e_inval
);
1365 struct ceph_entity_addr addr
;
1366 ceph_decode_32_safe(p
, end
, osd
, e_inval
);
1367 ceph_decode_copy_safe(p
, end
, &addr
, sizeof(addr
), e_inval
);
1368 ceph_decode_addr(&addr
);
1369 pr_info("osd%d up\n", osd
);
1370 BUG_ON(osd
>= map
->max_osd
);
1371 map
->osd_state
[osd
] |= CEPH_OSD_UP
| CEPH_OSD_EXISTS
;
1372 map
->osd_addr
[osd
] = addr
;
1376 ceph_decode_32_safe(p
, end
, len
, e_inval
);
1380 ceph_decode_32_safe(p
, end
, osd
, e_inval
);
1381 xorstate
= **(u8
**)p
;
1382 (*p
)++; /* clean flag */
1384 xorstate
= CEPH_OSD_UP
;
1385 if (xorstate
& CEPH_OSD_UP
)
1386 pr_info("osd%d down\n", osd
);
1387 if (osd
< map
->max_osd
)
1388 map
->osd_state
[osd
] ^= xorstate
;
1392 ceph_decode_32_safe(p
, end
, len
, e_inval
);
1395 ceph_decode_need(p
, end
, sizeof(u32
)*2, e_inval
);
1396 osd
= ceph_decode_32(p
);
1397 off
= ceph_decode_32(p
);
1398 pr_info("osd%d weight 0x%x %s\n", osd
, off
,
1399 off
== CEPH_OSD_IN
? "(in)" :
1400 (off
== CEPH_OSD_OUT
? "(out)" : ""));
1401 if (osd
< map
->max_osd
)
1402 map
->osd_weight
[osd
] = off
;
1406 err
= decode_new_pg_temp(p
, end
, map
);
1410 /* new_primary_temp */
1411 if (struct_v
>= 1) {
1412 err
= decode_new_primary_temp(p
, end
, map
);
1417 /* new_primary_affinity */
1418 if (struct_v
>= 2) {
1419 err
= decode_new_primary_affinity(p
, end
, map
);
1424 /* ignore the rest */
1427 dout("inc osdmap epoch %d max_osd %d\n", map
->epoch
, map
->max_osd
);
1433 pr_err("corrupt inc osdmap (%d) epoch %d off %d (%p of %p-%p)\n",
1434 err
, epoch
, (int)(*p
- start
), *p
, start
, end
);
1435 print_hex_dump(KERN_DEBUG
, "osdmap: ",
1436 DUMP_PREFIX_OFFSET
, 16, 1,
1437 start
, end
- start
, true);
1439 crush_destroy(newcrush
);
1440 return ERR_PTR(err
);
1443 void ceph_oid_copy(struct ceph_object_id
*dest
,
1444 const struct ceph_object_id
*src
)
1446 WARN_ON(!ceph_oid_empty(dest
));
1448 if (src
->name
!= src
->inline_name
) {
1449 /* very rare, see ceph_object_id definition */
1450 dest
->name
= kmalloc(src
->name_len
+ 1,
1451 GFP_NOIO
| __GFP_NOFAIL
);
1454 memcpy(dest
->name
, src
->name
, src
->name_len
+ 1);
1455 dest
->name_len
= src
->name_len
;
1457 EXPORT_SYMBOL(ceph_oid_copy
);
1459 static __printf(2, 0)
1460 int oid_printf_vargs(struct ceph_object_id
*oid
, const char *fmt
, va_list ap
)
1464 WARN_ON(!ceph_oid_empty(oid
));
1466 len
= vsnprintf(oid
->inline_name
, sizeof(oid
->inline_name
), fmt
, ap
);
1467 if (len
>= sizeof(oid
->inline_name
))
1470 oid
->name_len
= len
;
1475 * If oid doesn't fit into inline buffer, BUG.
1477 void ceph_oid_printf(struct ceph_object_id
*oid
, const char *fmt
, ...)
1482 BUG_ON(oid_printf_vargs(oid
, fmt
, ap
));
1485 EXPORT_SYMBOL(ceph_oid_printf
);
1487 static __printf(3, 0)
1488 int oid_aprintf_vargs(struct ceph_object_id
*oid
, gfp_t gfp
,
1489 const char *fmt
, va_list ap
)
1495 len
= oid_printf_vargs(oid
, fmt
, aq
);
1499 char *external_name
;
1501 external_name
= kmalloc(len
+ 1, gfp
);
1505 oid
->name
= external_name
;
1506 WARN_ON(vsnprintf(oid
->name
, len
+ 1, fmt
, ap
) != len
);
1507 oid
->name_len
= len
;
1514 * If oid doesn't fit into inline buffer, allocate.
1516 int ceph_oid_aprintf(struct ceph_object_id
*oid
, gfp_t gfp
,
1517 const char *fmt
, ...)
1523 ret
= oid_aprintf_vargs(oid
, gfp
, fmt
, ap
);
1528 EXPORT_SYMBOL(ceph_oid_aprintf
);
1530 void ceph_oid_destroy(struct ceph_object_id
*oid
)
1532 if (oid
->name
!= oid
->inline_name
)
1535 EXPORT_SYMBOL(ceph_oid_destroy
);
1540 static bool __osds_equal(const struct ceph_osds
*lhs
,
1541 const struct ceph_osds
*rhs
)
1543 if (lhs
->size
== rhs
->size
&&
1544 !memcmp(lhs
->osds
, rhs
->osds
, rhs
->size
* sizeof(rhs
->osds
[0])))
1553 static bool osds_equal(const struct ceph_osds
*lhs
,
1554 const struct ceph_osds
*rhs
)
1556 if (__osds_equal(lhs
, rhs
) &&
1557 lhs
->primary
== rhs
->primary
)
1563 static bool osds_valid(const struct ceph_osds
*set
)
1566 if (set
->size
> 0 && set
->primary
>= 0)
1569 /* empty can_shift_osds set */
1570 if (!set
->size
&& set
->primary
== -1)
1573 /* empty !can_shift_osds set - all NONE */
1574 if (set
->size
> 0 && set
->primary
== -1) {
1577 for (i
= 0; i
< set
->size
; i
++) {
1578 if (set
->osds
[i
] != CRUSH_ITEM_NONE
)
1588 void ceph_osds_copy(struct ceph_osds
*dest
, const struct ceph_osds
*src
)
1590 memcpy(dest
->osds
, src
->osds
, src
->size
* sizeof(src
->osds
[0]));
1591 dest
->size
= src
->size
;
1592 dest
->primary
= src
->primary
;
1595 static bool is_split(const struct ceph_pg
*pgid
,
1599 int old_bits
= calc_bits_of(old_pg_num
);
1600 int old_mask
= (1 << old_bits
) - 1;
1603 WARN_ON(pgid
->seed
>= old_pg_num
);
1604 if (new_pg_num
<= old_pg_num
)
1607 for (n
= 1; ; n
++) {
1608 int next_bit
= n
<< (old_bits
- 1);
1609 u32 s
= next_bit
| pgid
->seed
;
1611 if (s
< old_pg_num
|| s
== pgid
->seed
)
1613 if (s
>= new_pg_num
)
1616 s
= ceph_stable_mod(s
, old_pg_num
, old_mask
);
1617 if (s
== pgid
->seed
)
1624 bool ceph_is_new_interval(const struct ceph_osds
*old_acting
,
1625 const struct ceph_osds
*new_acting
,
1626 const struct ceph_osds
*old_up
,
1627 const struct ceph_osds
*new_up
,
1634 bool old_sort_bitwise
,
1635 bool new_sort_bitwise
,
1636 const struct ceph_pg
*pgid
)
1638 return !osds_equal(old_acting
, new_acting
) ||
1639 !osds_equal(old_up
, new_up
) ||
1640 old_size
!= new_size
||
1641 old_min_size
!= new_min_size
||
1642 is_split(pgid
, old_pg_num
, new_pg_num
) ||
1643 old_sort_bitwise
!= new_sort_bitwise
;
1646 static int calc_pg_rank(int osd
, const struct ceph_osds
*acting
)
1650 for (i
= 0; i
< acting
->size
; i
++) {
1651 if (acting
->osds
[i
] == osd
)
1658 static bool primary_changed(const struct ceph_osds
*old_acting
,
1659 const struct ceph_osds
*new_acting
)
1661 if (!old_acting
->size
&& !new_acting
->size
)
1662 return false; /* both still empty */
1664 if (!old_acting
->size
^ !new_acting
->size
)
1665 return true; /* was empty, now not, or vice versa */
1667 if (old_acting
->primary
!= new_acting
->primary
)
1668 return true; /* primary changed */
1670 if (calc_pg_rank(old_acting
->primary
, old_acting
) !=
1671 calc_pg_rank(new_acting
->primary
, new_acting
))
1674 return false; /* same primary (tho replicas may have changed) */
1677 bool ceph_osds_changed(const struct ceph_osds
*old_acting
,
1678 const struct ceph_osds
*new_acting
,
1681 if (primary_changed(old_acting
, new_acting
))
1684 if (any_change
&& !__osds_equal(old_acting
, new_acting
))
1691 * calculate file layout from given offset, length.
1692 * fill in correct oid, logical length, and object extent
1695 * for now, we write only a single su, until we can
1696 * pass a stride back to the caller.
1698 int ceph_calc_file_object_mapping(struct ceph_file_layout
*layout
,
1701 u64
*oxoff
, u64
*oxlen
)
1703 u32 osize
= le32_to_cpu(layout
->fl_object_size
);
1704 u32 su
= le32_to_cpu(layout
->fl_stripe_unit
);
1705 u32 sc
= le32_to_cpu(layout
->fl_stripe_count
);
1706 u32 bl
, stripeno
, stripepos
, objsetno
;
1710 dout("mapping %llu~%llu osize %u fl_su %u\n", off
, len
,
1712 if (su
== 0 || sc
== 0)
1714 su_per_object
= osize
/ su
;
1715 if (su_per_object
== 0)
1717 dout("osize %u / su %u = su_per_object %u\n", osize
, su
,
1720 if ((su
& ~PAGE_MASK
) != 0)
1723 /* bl = *off / su; */
1727 dout("off %llu / su %u = bl %u\n", off
, su
, bl
);
1730 stripepos
= bl
% sc
;
1731 objsetno
= stripeno
/ su_per_object
;
1733 *ono
= objsetno
* sc
+ stripepos
;
1734 dout("objset %u * sc %u = ono %u\n", objsetno
, sc
, (unsigned int)*ono
);
1736 /* *oxoff = *off % layout->fl_stripe_unit; # offset in su */
1738 su_offset
= do_div(t
, su
);
1739 *oxoff
= su_offset
+ (stripeno
% su_per_object
) * su
;
1742 * Calculate the length of the extent being written to the selected
1743 * object. This is the minimum of the full length requested (len) or
1744 * the remainder of the current stripe being written to.
1746 *oxlen
= min_t(u64
, len
, su
- su_offset
);
1748 dout(" obj extent %llu~%llu\n", *oxoff
, *oxlen
);
1752 dout(" invalid layout\n");
1758 EXPORT_SYMBOL(ceph_calc_file_object_mapping
);
1761 * Map an object into a PG.
1763 * Should only be called with target_oid and target_oloc (as opposed to
1764 * base_oid and base_oloc), since tiering isn't taken into account.
1766 int ceph_object_locator_to_pg(struct ceph_osdmap
*osdmap
,
1767 struct ceph_object_id
*oid
,
1768 struct ceph_object_locator
*oloc
,
1769 struct ceph_pg
*raw_pgid
)
1771 struct ceph_pg_pool_info
*pi
;
1773 pi
= ceph_pg_pool_by_id(osdmap
, oloc
->pool
);
1777 raw_pgid
->pool
= oloc
->pool
;
1778 raw_pgid
->seed
= ceph_str_hash(pi
->object_hash
, oid
->name
,
1781 dout("%s %s -> raw_pgid %llu.%x\n", __func__
, oid
->name
,
1782 raw_pgid
->pool
, raw_pgid
->seed
);
1785 EXPORT_SYMBOL(ceph_object_locator_to_pg
);
1788 * Map a raw PG (full precision ps) into an actual PG.
1790 static void raw_pg_to_pg(struct ceph_pg_pool_info
*pi
,
1791 const struct ceph_pg
*raw_pgid
,
1792 struct ceph_pg
*pgid
)
1794 pgid
->pool
= raw_pgid
->pool
;
1795 pgid
->seed
= ceph_stable_mod(raw_pgid
->seed
, pi
->pg_num
,
1800 * Map a raw PG (full precision ps) into a placement ps (placement
1801 * seed). Include pool id in that value so that different pools don't
1802 * use the same seeds.
1804 static u32
raw_pg_to_pps(struct ceph_pg_pool_info
*pi
,
1805 const struct ceph_pg
*raw_pgid
)
1807 if (pi
->flags
& CEPH_POOL_FLAG_HASHPSPOOL
) {
1808 /* hash pool id and seed so that pool PGs do not overlap */
1809 return crush_hash32_2(CRUSH_HASH_RJENKINS1
,
1810 ceph_stable_mod(raw_pgid
->seed
,
1816 * legacy behavior: add ps and pool together. this is
1817 * not a great approach because the PGs from each pool
1818 * will overlap on top of each other: 0.5 == 1.4 ==
1821 return ceph_stable_mod(raw_pgid
->seed
, pi
->pgp_num
,
1823 (unsigned)raw_pgid
->pool
;
1827 static int do_crush(struct ceph_osdmap
*map
, int ruleno
, int x
,
1828 int *result
, int result_max
,
1829 const __u32
*weight
, int weight_max
)
1833 BUG_ON(result_max
> CEPH_PG_MAX_SIZE
);
1835 mutex_lock(&map
->crush_scratch_mutex
);
1836 r
= crush_do_rule(map
->crush
, ruleno
, x
, result
, result_max
,
1837 weight
, weight_max
, map
->crush_scratch_ary
);
1838 mutex_unlock(&map
->crush_scratch_mutex
);
1844 * Calculate raw set (CRUSH output) for given PG. The result may
1845 * contain nonexistent OSDs. ->primary is undefined for a raw set.
1847 * Placement seed (CRUSH input) is returned through @ppps.
1849 static void pg_to_raw_osds(struct ceph_osdmap
*osdmap
,
1850 struct ceph_pg_pool_info
*pi
,
1851 const struct ceph_pg
*raw_pgid
,
1852 struct ceph_osds
*raw
,
1855 u32 pps
= raw_pg_to_pps(pi
, raw_pgid
);
1859 ceph_osds_init(raw
);
1863 ruleno
= crush_find_rule(osdmap
->crush
, pi
->crush_ruleset
, pi
->type
,
1866 pr_err("no crush rule: pool %lld ruleset %d type %d size %d\n",
1867 pi
->id
, pi
->crush_ruleset
, pi
->type
, pi
->size
);
1871 len
= do_crush(osdmap
, ruleno
, pps
, raw
->osds
,
1872 min_t(int, pi
->size
, ARRAY_SIZE(raw
->osds
)),
1873 osdmap
->osd_weight
, osdmap
->max_osd
);
1875 pr_err("error %d from crush rule %d: pool %lld ruleset %d type %d size %d\n",
1876 len
, ruleno
, pi
->id
, pi
->crush_ruleset
, pi
->type
,
1885 * Given raw set, calculate up set and up primary. By definition of an
1886 * up set, the result won't contain nonexistent or down OSDs.
1888 * This is done in-place - on return @set is the up set. If it's
1889 * empty, ->primary will remain undefined.
1891 static void raw_to_up_osds(struct ceph_osdmap
*osdmap
,
1892 struct ceph_pg_pool_info
*pi
,
1893 struct ceph_osds
*set
)
1897 /* ->primary is undefined for a raw set */
1898 BUG_ON(set
->primary
!= -1);
1900 if (ceph_can_shift_osds(pi
)) {
1904 for (i
= 0; i
< set
->size
; i
++) {
1905 if (ceph_osd_is_down(osdmap
, set
->osds
[i
])) {
1910 set
->osds
[i
- removed
] = set
->osds
[i
];
1912 set
->size
-= removed
;
1914 set
->primary
= set
->osds
[0];
1916 /* set down/dne devices to NONE */
1917 for (i
= set
->size
- 1; i
>= 0; i
--) {
1918 if (ceph_osd_is_down(osdmap
, set
->osds
[i
]))
1919 set
->osds
[i
] = CRUSH_ITEM_NONE
;
1921 set
->primary
= set
->osds
[i
];
1926 static void apply_primary_affinity(struct ceph_osdmap
*osdmap
,
1927 struct ceph_pg_pool_info
*pi
,
1929 struct ceph_osds
*up
)
1935 * Do we have any non-default primary_affinity values for these
1938 if (!osdmap
->osd_primary_affinity
)
1941 for (i
= 0; i
< up
->size
; i
++) {
1942 int osd
= up
->osds
[i
];
1944 if (osd
!= CRUSH_ITEM_NONE
&&
1945 osdmap
->osd_primary_affinity
[osd
] !=
1946 CEPH_OSD_DEFAULT_PRIMARY_AFFINITY
) {
1954 * Pick the primary. Feed both the seed (for the pg) and the
1955 * osd into the hash/rng so that a proportional fraction of an
1956 * osd's pgs get rejected as primary.
1958 for (i
= 0; i
< up
->size
; i
++) {
1959 int osd
= up
->osds
[i
];
1962 if (osd
== CRUSH_ITEM_NONE
)
1965 aff
= osdmap
->osd_primary_affinity
[osd
];
1966 if (aff
< CEPH_OSD_MAX_PRIMARY_AFFINITY
&&
1967 (crush_hash32_2(CRUSH_HASH_RJENKINS1
,
1968 pps
, osd
) >> 16) >= aff
) {
1970 * We chose not to use this primary. Note it
1971 * anyway as a fallback in case we don't pick
1972 * anyone else, but keep looking.
1984 up
->primary
= up
->osds
[pos
];
1986 if (ceph_can_shift_osds(pi
) && pos
> 0) {
1987 /* move the new primary to the front */
1988 for (i
= pos
; i
> 0; i
--)
1989 up
->osds
[i
] = up
->osds
[i
- 1];
1990 up
->osds
[0] = up
->primary
;
1995 * Get pg_temp and primary_temp mappings for given PG.
1997 * Note that a PG may have none, only pg_temp, only primary_temp or
1998 * both pg_temp and primary_temp mappings. This means @temp isn't
1999 * always a valid OSD set on return: in the "only primary_temp" case,
2000 * @temp will have its ->primary >= 0 but ->size == 0.
2002 static void get_temp_osds(struct ceph_osdmap
*osdmap
,
2003 struct ceph_pg_pool_info
*pi
,
2004 const struct ceph_pg
*raw_pgid
,
2005 struct ceph_osds
*temp
)
2007 struct ceph_pg pgid
;
2008 struct ceph_pg_mapping
*pg
;
2011 raw_pg_to_pg(pi
, raw_pgid
, &pgid
);
2012 ceph_osds_init(temp
);
2015 pg
= __lookup_pg_mapping(&osdmap
->pg_temp
, pgid
);
2017 for (i
= 0; i
< pg
->pg_temp
.len
; i
++) {
2018 if (ceph_osd_is_down(osdmap
, pg
->pg_temp
.osds
[i
])) {
2019 if (ceph_can_shift_osds(pi
))
2022 temp
->osds
[temp
->size
++] = CRUSH_ITEM_NONE
;
2024 temp
->osds
[temp
->size
++] = pg
->pg_temp
.osds
[i
];
2028 /* apply pg_temp's primary */
2029 for (i
= 0; i
< temp
->size
; i
++) {
2030 if (temp
->osds
[i
] != CRUSH_ITEM_NONE
) {
2031 temp
->primary
= temp
->osds
[i
];
2038 pg
= __lookup_pg_mapping(&osdmap
->primary_temp
, pgid
);
2040 temp
->primary
= pg
->primary_temp
.osd
;
2044 * Map a PG to its acting set as well as its up set.
2046 * Acting set is used for data mapping purposes, while up set can be
2047 * recorded for detecting interval changes and deciding whether to
2050 void ceph_pg_to_up_acting_osds(struct ceph_osdmap
*osdmap
,
2051 const struct ceph_pg
*raw_pgid
,
2052 struct ceph_osds
*up
,
2053 struct ceph_osds
*acting
)
2055 struct ceph_pg_pool_info
*pi
;
2058 pi
= ceph_pg_pool_by_id(osdmap
, raw_pgid
->pool
);
2061 ceph_osds_init(acting
);
2065 pg_to_raw_osds(osdmap
, pi
, raw_pgid
, up
, &pps
);
2066 raw_to_up_osds(osdmap
, pi
, up
);
2067 apply_primary_affinity(osdmap
, pi
, pps
, up
);
2068 get_temp_osds(osdmap
, pi
, raw_pgid
, acting
);
2069 if (!acting
->size
) {
2070 memcpy(acting
->osds
, up
->osds
, up
->size
* sizeof(up
->osds
[0]));
2071 acting
->size
= up
->size
;
2072 if (acting
->primary
== -1)
2073 acting
->primary
= up
->primary
;
2076 WARN_ON(!osds_valid(up
) || !osds_valid(acting
));
2080 * Return acting primary for given PG, or -1 if none.
2082 int ceph_pg_to_acting_primary(struct ceph_osdmap
*osdmap
,
2083 const struct ceph_pg
*raw_pgid
)
2085 struct ceph_osds up
, acting
;
2087 ceph_pg_to_up_acting_osds(osdmap
, raw_pgid
, &up
, &acting
);
2088 return acting
.primary
;
2090 EXPORT_SYMBOL(ceph_pg_to_acting_primary
);