2 #include <linux/ceph/ceph_debug.h>
4 #include <linux/module.h>
5 #include <linux/slab.h>
8 #include <linux/ceph/libceph.h>
9 #include <linux/ceph/osdmap.h>
10 #include <linux/ceph/decode.h>
11 #include <linux/crush/hash.h>
12 #include <linux/crush/mapper.h>
14 char *ceph_osdmap_state_str(char *str
, int len
, int state
)
19 if ((state
& CEPH_OSD_EXISTS
) && (state
& CEPH_OSD_UP
))
20 snprintf(str
, len
, "exists, up");
21 else if (state
& CEPH_OSD_EXISTS
)
22 snprintf(str
, len
, "exists");
23 else if (state
& CEPH_OSD_UP
)
24 snprintf(str
, len
, "up");
26 snprintf(str
, len
, "doesn't exist");
33 static int calc_bits_of(unsigned int t
)
44 * the foo_mask is the smallest value 2^n-1 that is >= foo.
46 static void calc_pg_masks(struct ceph_pg_pool_info
*pi
)
48 pi
->pg_num_mask
= (1 << calc_bits_of(pi
->pg_num
-1)) - 1;
49 pi
->pgp_num_mask
= (1 << calc_bits_of(pi
->pgp_num
-1)) - 1;
55 static int crush_decode_uniform_bucket(void **p
, void *end
,
56 struct crush_bucket_uniform
*b
)
58 dout("crush_decode_uniform_bucket %p to %p\n", *p
, end
);
59 ceph_decode_need(p
, end
, (1+b
->h
.size
) * sizeof(u32
), bad
);
60 b
->item_weight
= ceph_decode_32(p
);
66 static int crush_decode_list_bucket(void **p
, void *end
,
67 struct crush_bucket_list
*b
)
70 dout("crush_decode_list_bucket %p to %p\n", *p
, end
);
71 b
->item_weights
= kcalloc(b
->h
.size
, sizeof(u32
), GFP_NOFS
);
72 if (b
->item_weights
== NULL
)
74 b
->sum_weights
= kcalloc(b
->h
.size
, sizeof(u32
), GFP_NOFS
);
75 if (b
->sum_weights
== NULL
)
77 ceph_decode_need(p
, end
, 2 * b
->h
.size
* sizeof(u32
), bad
);
78 for (j
= 0; j
< b
->h
.size
; j
++) {
79 b
->item_weights
[j
] = ceph_decode_32(p
);
80 b
->sum_weights
[j
] = ceph_decode_32(p
);
87 static int crush_decode_tree_bucket(void **p
, void *end
,
88 struct crush_bucket_tree
*b
)
91 dout("crush_decode_tree_bucket %p to %p\n", *p
, end
);
92 ceph_decode_8_safe(p
, end
, b
->num_nodes
, bad
);
93 b
->node_weights
= kcalloc(b
->num_nodes
, sizeof(u32
), GFP_NOFS
);
94 if (b
->node_weights
== NULL
)
96 ceph_decode_need(p
, end
, b
->num_nodes
* sizeof(u32
), bad
);
97 for (j
= 0; j
< b
->num_nodes
; j
++)
98 b
->node_weights
[j
] = ceph_decode_32(p
);
104 static int crush_decode_straw_bucket(void **p
, void *end
,
105 struct crush_bucket_straw
*b
)
108 dout("crush_decode_straw_bucket %p to %p\n", *p
, end
);
109 b
->item_weights
= kcalloc(b
->h
.size
, sizeof(u32
), GFP_NOFS
);
110 if (b
->item_weights
== NULL
)
112 b
->straws
= kcalloc(b
->h
.size
, sizeof(u32
), GFP_NOFS
);
113 if (b
->straws
== NULL
)
115 ceph_decode_need(p
, end
, 2 * b
->h
.size
* sizeof(u32
), bad
);
116 for (j
= 0; j
< b
->h
.size
; j
++) {
117 b
->item_weights
[j
] = ceph_decode_32(p
);
118 b
->straws
[j
] = ceph_decode_32(p
);
125 static int crush_decode_straw2_bucket(void **p
, void *end
,
126 struct crush_bucket_straw2
*b
)
129 dout("crush_decode_straw2_bucket %p to %p\n", *p
, end
);
130 b
->item_weights
= kcalloc(b
->h
.size
, sizeof(u32
), GFP_NOFS
);
131 if (b
->item_weights
== NULL
)
133 ceph_decode_need(p
, end
, b
->h
.size
* sizeof(u32
), bad
);
134 for (j
= 0; j
< b
->h
.size
; j
++)
135 b
->item_weights
[j
] = ceph_decode_32(p
);
141 static int skip_name_map(void **p
, void *end
)
144 ceph_decode_32_safe(p
, end
, len
,bad
);
148 ceph_decode_32_safe(p
, end
, strlen
, bad
);
156 static struct crush_map
*crush_decode(void *pbyval
, void *end
)
162 void *start
= pbyval
;
166 dout("crush_decode %p to %p len %d\n", *p
, end
, (int)(end
- *p
));
168 c
= kzalloc(sizeof(*c
), GFP_NOFS
);
170 return ERR_PTR(-ENOMEM
);
172 /* set tunables to default values */
173 c
->choose_local_tries
= 2;
174 c
->choose_local_fallback_tries
= 5;
175 c
->choose_total_tries
= 19;
176 c
->chooseleaf_descend_once
= 0;
178 ceph_decode_need(p
, end
, 4*sizeof(u32
), bad
);
179 magic
= ceph_decode_32(p
);
180 if (magic
!= CRUSH_MAGIC
) {
181 pr_err("crush_decode magic %x != current %x\n",
182 (unsigned int)magic
, (unsigned int)CRUSH_MAGIC
);
185 c
->max_buckets
= ceph_decode_32(p
);
186 c
->max_rules
= ceph_decode_32(p
);
187 c
->max_devices
= ceph_decode_32(p
);
189 c
->buckets
= kcalloc(c
->max_buckets
, sizeof(*c
->buckets
), GFP_NOFS
);
190 if (c
->buckets
== NULL
)
192 c
->rules
= kcalloc(c
->max_rules
, sizeof(*c
->rules
), GFP_NOFS
);
193 if (c
->rules
== NULL
)
197 for (i
= 0; i
< c
->max_buckets
; i
++) {
200 struct crush_bucket
*b
;
202 ceph_decode_32_safe(p
, end
, alg
, bad
);
204 c
->buckets
[i
] = NULL
;
207 dout("crush_decode bucket %d off %x %p to %p\n",
208 i
, (int)(*p
-start
), *p
, end
);
211 case CRUSH_BUCKET_UNIFORM
:
212 size
= sizeof(struct crush_bucket_uniform
);
214 case CRUSH_BUCKET_LIST
:
215 size
= sizeof(struct crush_bucket_list
);
217 case CRUSH_BUCKET_TREE
:
218 size
= sizeof(struct crush_bucket_tree
);
220 case CRUSH_BUCKET_STRAW
:
221 size
= sizeof(struct crush_bucket_straw
);
223 case CRUSH_BUCKET_STRAW2
:
224 size
= sizeof(struct crush_bucket_straw2
);
231 b
= c
->buckets
[i
] = kzalloc(size
, GFP_NOFS
);
235 ceph_decode_need(p
, end
, 4*sizeof(u32
), bad
);
236 b
->id
= ceph_decode_32(p
);
237 b
->type
= ceph_decode_16(p
);
238 b
->alg
= ceph_decode_8(p
);
239 b
->hash
= ceph_decode_8(p
);
240 b
->weight
= ceph_decode_32(p
);
241 b
->size
= ceph_decode_32(p
);
243 dout("crush_decode bucket size %d off %x %p to %p\n",
244 b
->size
, (int)(*p
-start
), *p
, end
);
246 b
->items
= kcalloc(b
->size
, sizeof(__s32
), GFP_NOFS
);
247 if (b
->items
== NULL
)
249 b
->perm
= kcalloc(b
->size
, sizeof(u32
), GFP_NOFS
);
254 ceph_decode_need(p
, end
, b
->size
*sizeof(u32
), bad
);
255 for (j
= 0; j
< b
->size
; j
++)
256 b
->items
[j
] = ceph_decode_32(p
);
259 case CRUSH_BUCKET_UNIFORM
:
260 err
= crush_decode_uniform_bucket(p
, end
,
261 (struct crush_bucket_uniform
*)b
);
265 case CRUSH_BUCKET_LIST
:
266 err
= crush_decode_list_bucket(p
, end
,
267 (struct crush_bucket_list
*)b
);
271 case CRUSH_BUCKET_TREE
:
272 err
= crush_decode_tree_bucket(p
, end
,
273 (struct crush_bucket_tree
*)b
);
277 case CRUSH_BUCKET_STRAW
:
278 err
= crush_decode_straw_bucket(p
, end
,
279 (struct crush_bucket_straw
*)b
);
283 case CRUSH_BUCKET_STRAW2
:
284 err
= crush_decode_straw2_bucket(p
, end
,
285 (struct crush_bucket_straw2
*)b
);
293 dout("rule vec is %p\n", c
->rules
);
294 for (i
= 0; i
< c
->max_rules
; i
++) {
296 struct crush_rule
*r
;
299 ceph_decode_32_safe(p
, end
, yes
, bad
);
301 dout("crush_decode NO rule %d off %x %p to %p\n",
302 i
, (int)(*p
-start
), *p
, end
);
307 dout("crush_decode rule %d off %x %p to %p\n",
308 i
, (int)(*p
-start
), *p
, end
);
311 ceph_decode_32_safe(p
, end
, yes
, bad
);
312 #if BITS_PER_LONG == 32
314 if (yes
> (ULONG_MAX
- sizeof(*r
))
315 / sizeof(struct crush_rule_step
))
318 r
= c
->rules
[i
] = kmalloc(sizeof(*r
) +
319 yes
*sizeof(struct crush_rule_step
),
323 dout(" rule %d is at %p\n", i
, r
);
325 ceph_decode_copy_safe(p
, end
, &r
->mask
, 4, bad
); /* 4 u8's */
326 ceph_decode_need(p
, end
, r
->len
*3*sizeof(u32
), bad
);
327 for (j
= 0; j
< r
->len
; j
++) {
328 r
->steps
[j
].op
= ceph_decode_32(p
);
329 r
->steps
[j
].arg1
= ceph_decode_32(p
);
330 r
->steps
[j
].arg2
= ceph_decode_32(p
);
334 /* ignore trailing name maps. */
335 for (num_name_maps
= 0; num_name_maps
< 3; num_name_maps
++) {
336 err
= skip_name_map(p
, end
);
342 ceph_decode_need(p
, end
, 3*sizeof(u32
), done
);
343 c
->choose_local_tries
= ceph_decode_32(p
);
344 c
->choose_local_fallback_tries
= ceph_decode_32(p
);
345 c
->choose_total_tries
= ceph_decode_32(p
);
346 dout("crush decode tunable choose_local_tries = %d",
347 c
->choose_local_tries
);
348 dout("crush decode tunable choose_local_fallback_tries = %d",
349 c
->choose_local_fallback_tries
);
350 dout("crush decode tunable choose_total_tries = %d",
351 c
->choose_total_tries
);
353 ceph_decode_need(p
, end
, sizeof(u32
), done
);
354 c
->chooseleaf_descend_once
= ceph_decode_32(p
);
355 dout("crush decode tunable chooseleaf_descend_once = %d",
356 c
->chooseleaf_descend_once
);
358 ceph_decode_need(p
, end
, sizeof(u8
), done
);
359 c
->chooseleaf_vary_r
= ceph_decode_8(p
);
360 dout("crush decode tunable chooseleaf_vary_r = %d",
361 c
->chooseleaf_vary_r
);
364 dout("crush_decode success\n");
370 dout("crush_decode fail %d\n", err
);
376 * rbtree of pg_mapping for handling pg_temp (explicit mapping of pgid
377 * to a set of osds) and primary_temp (explicit primary setting)
379 static int pgid_cmp(struct ceph_pg l
, struct ceph_pg r
)
392 static int __insert_pg_mapping(struct ceph_pg_mapping
*new,
393 struct rb_root
*root
)
395 struct rb_node
**p
= &root
->rb_node
;
396 struct rb_node
*parent
= NULL
;
397 struct ceph_pg_mapping
*pg
= NULL
;
400 dout("__insert_pg_mapping %llx %p\n", *(u64
*)&new->pgid
, new);
403 pg
= rb_entry(parent
, struct ceph_pg_mapping
, node
);
404 c
= pgid_cmp(new->pgid
, pg
->pgid
);
413 rb_link_node(&new->node
, parent
, p
);
414 rb_insert_color(&new->node
, root
);
418 static struct ceph_pg_mapping
*__lookup_pg_mapping(struct rb_root
*root
,
421 struct rb_node
*n
= root
->rb_node
;
422 struct ceph_pg_mapping
*pg
;
426 pg
= rb_entry(n
, struct ceph_pg_mapping
, node
);
427 c
= pgid_cmp(pgid
, pg
->pgid
);
433 dout("__lookup_pg_mapping %lld.%x got %p\n",
434 pgid
.pool
, pgid
.seed
, pg
);
441 static int __remove_pg_mapping(struct rb_root
*root
, struct ceph_pg pgid
)
443 struct ceph_pg_mapping
*pg
= __lookup_pg_mapping(root
, pgid
);
446 dout("__remove_pg_mapping %lld.%x %p\n", pgid
.pool
, pgid
.seed
,
448 rb_erase(&pg
->node
, root
);
452 dout("__remove_pg_mapping %lld.%x dne\n", pgid
.pool
, pgid
.seed
);
457 * rbtree of pg pool info
459 static int __insert_pg_pool(struct rb_root
*root
, struct ceph_pg_pool_info
*new)
461 struct rb_node
**p
= &root
->rb_node
;
462 struct rb_node
*parent
= NULL
;
463 struct ceph_pg_pool_info
*pi
= NULL
;
467 pi
= rb_entry(parent
, struct ceph_pg_pool_info
, node
);
468 if (new->id
< pi
->id
)
470 else if (new->id
> pi
->id
)
476 rb_link_node(&new->node
, parent
, p
);
477 rb_insert_color(&new->node
, root
);
481 static struct ceph_pg_pool_info
*__lookup_pg_pool(struct rb_root
*root
, u64 id
)
483 struct ceph_pg_pool_info
*pi
;
484 struct rb_node
*n
= root
->rb_node
;
487 pi
= rb_entry(n
, struct ceph_pg_pool_info
, node
);
490 else if (id
> pi
->id
)
498 struct ceph_pg_pool_info
*ceph_pg_pool_by_id(struct ceph_osdmap
*map
, u64 id
)
500 return __lookup_pg_pool(&map
->pg_pools
, id
);
503 const char *ceph_pg_pool_name_by_id(struct ceph_osdmap
*map
, u64 id
)
505 struct ceph_pg_pool_info
*pi
;
507 if (id
== CEPH_NOPOOL
)
510 if (WARN_ON_ONCE(id
> (u64
) INT_MAX
))
513 pi
= __lookup_pg_pool(&map
->pg_pools
, (int) id
);
515 return pi
? pi
->name
: NULL
;
517 EXPORT_SYMBOL(ceph_pg_pool_name_by_id
);
519 int ceph_pg_poolid_by_name(struct ceph_osdmap
*map
, const char *name
)
523 for (rbp
= rb_first(&map
->pg_pools
); rbp
; rbp
= rb_next(rbp
)) {
524 struct ceph_pg_pool_info
*pi
=
525 rb_entry(rbp
, struct ceph_pg_pool_info
, node
);
526 if (pi
->name
&& strcmp(pi
->name
, name
) == 0)
531 EXPORT_SYMBOL(ceph_pg_poolid_by_name
);
533 static void __remove_pg_pool(struct rb_root
*root
, struct ceph_pg_pool_info
*pi
)
535 rb_erase(&pi
->node
, root
);
540 static int decode_pool(void **p
, void *end
, struct ceph_pg_pool_info
*pi
)
546 ceph_decode_need(p
, end
, 2 + 4, bad
);
547 ev
= ceph_decode_8(p
); /* encoding version */
548 cv
= ceph_decode_8(p
); /* compat version */
550 pr_warn("got v %d < 5 cv %d of ceph_pg_pool\n", ev
, cv
);
554 pr_warn("got v %d cv %d > 9 of ceph_pg_pool\n", ev
, cv
);
557 len
= ceph_decode_32(p
);
558 ceph_decode_need(p
, end
, len
, bad
);
561 pi
->type
= ceph_decode_8(p
);
562 pi
->size
= ceph_decode_8(p
);
563 pi
->crush_ruleset
= ceph_decode_8(p
);
564 pi
->object_hash
= ceph_decode_8(p
);
566 pi
->pg_num
= ceph_decode_32(p
);
567 pi
->pgp_num
= ceph_decode_32(p
);
569 *p
+= 4 + 4; /* skip lpg* */
570 *p
+= 4; /* skip last_change */
571 *p
+= 8 + 4; /* skip snap_seq, snap_epoch */
574 num
= ceph_decode_32(p
);
576 *p
+= 8; /* snapid key */
577 *p
+= 1 + 1; /* versions */
578 len
= ceph_decode_32(p
);
582 /* skip removed_snaps */
583 num
= ceph_decode_32(p
);
586 *p
+= 8; /* skip auid */
587 pi
->flags
= ceph_decode_64(p
);
588 *p
+= 4; /* skip crash_replay_interval */
591 *p
+= 1; /* skip min_size */
594 *p
+= 8 + 8; /* skip quota_max_* */
598 num
= ceph_decode_32(p
);
601 *p
+= 8; /* skip tier_of */
602 *p
+= 1; /* skip cache_mode */
604 pi
->read_tier
= ceph_decode_64(p
);
605 pi
->write_tier
= ceph_decode_64(p
);
611 /* ignore the rest */
621 static int decode_pool_names(void **p
, void *end
, struct ceph_osdmap
*map
)
623 struct ceph_pg_pool_info
*pi
;
627 ceph_decode_32_safe(p
, end
, num
, bad
);
628 dout(" %d pool names\n", num
);
630 ceph_decode_64_safe(p
, end
, pool
, bad
);
631 ceph_decode_32_safe(p
, end
, len
, bad
);
632 dout(" pool %llu len %d\n", pool
, len
);
633 ceph_decode_need(p
, end
, len
, bad
);
634 pi
= __lookup_pg_pool(&map
->pg_pools
, pool
);
636 char *name
= kstrndup(*p
, len
, GFP_NOFS
);
642 dout(" name is %s\n", pi
->name
);
655 void ceph_osdmap_destroy(struct ceph_osdmap
*map
)
657 dout("osdmap_destroy %p\n", map
);
659 crush_destroy(map
->crush
);
660 while (!RB_EMPTY_ROOT(&map
->pg_temp
)) {
661 struct ceph_pg_mapping
*pg
=
662 rb_entry(rb_first(&map
->pg_temp
),
663 struct ceph_pg_mapping
, node
);
664 rb_erase(&pg
->node
, &map
->pg_temp
);
667 while (!RB_EMPTY_ROOT(&map
->primary_temp
)) {
668 struct ceph_pg_mapping
*pg
=
669 rb_entry(rb_first(&map
->primary_temp
),
670 struct ceph_pg_mapping
, node
);
671 rb_erase(&pg
->node
, &map
->primary_temp
);
674 while (!RB_EMPTY_ROOT(&map
->pg_pools
)) {
675 struct ceph_pg_pool_info
*pi
=
676 rb_entry(rb_first(&map
->pg_pools
),
677 struct ceph_pg_pool_info
, node
);
678 __remove_pg_pool(&map
->pg_pools
, pi
);
680 kfree(map
->osd_state
);
681 kfree(map
->osd_weight
);
682 kfree(map
->osd_addr
);
683 kfree(map
->osd_primary_affinity
);
688 * Adjust max_osd value, (re)allocate arrays.
690 * The new elements are properly initialized.
692 static int osdmap_set_max_osd(struct ceph_osdmap
*map
, int max
)
696 struct ceph_entity_addr
*addr
;
699 state
= krealloc(map
->osd_state
, max
*sizeof(*state
), GFP_NOFS
);
702 map
->osd_state
= state
;
704 weight
= krealloc(map
->osd_weight
, max
*sizeof(*weight
), GFP_NOFS
);
707 map
->osd_weight
= weight
;
709 addr
= krealloc(map
->osd_addr
, max
*sizeof(*addr
), GFP_NOFS
);
712 map
->osd_addr
= addr
;
714 for (i
= map
->max_osd
; i
< max
; i
++) {
715 map
->osd_state
[i
] = 0;
716 map
->osd_weight
[i
] = CEPH_OSD_OUT
;
717 memset(map
->osd_addr
+ i
, 0, sizeof(*map
->osd_addr
));
720 if (map
->osd_primary_affinity
) {
723 affinity
= krealloc(map
->osd_primary_affinity
,
724 max
*sizeof(*affinity
), GFP_NOFS
);
727 map
->osd_primary_affinity
= affinity
;
729 for (i
= map
->max_osd
; i
< max
; i
++)
730 map
->osd_primary_affinity
[i
] =
731 CEPH_OSD_DEFAULT_PRIMARY_AFFINITY
;
739 #define OSDMAP_WRAPPER_COMPAT_VER 7
740 #define OSDMAP_CLIENT_DATA_COMPAT_VER 1
743 * Return 0 or error. On success, *v is set to 0 for old (v6) osdmaps,
744 * to struct_v of the client_data section for new (v7 and above)
747 static int get_osdmap_client_data_v(void **p
, void *end
,
748 const char *prefix
, u8
*v
)
752 ceph_decode_8_safe(p
, end
, struct_v
, e_inval
);
756 ceph_decode_8_safe(p
, end
, struct_compat
, e_inval
);
757 if (struct_compat
> OSDMAP_WRAPPER_COMPAT_VER
) {
758 pr_warn("got v %d cv %d > %d of %s ceph_osdmap\n",
759 struct_v
, struct_compat
,
760 OSDMAP_WRAPPER_COMPAT_VER
, prefix
);
763 *p
+= 4; /* ignore wrapper struct_len */
765 ceph_decode_8_safe(p
, end
, struct_v
, e_inval
);
766 ceph_decode_8_safe(p
, end
, struct_compat
, e_inval
);
767 if (struct_compat
> OSDMAP_CLIENT_DATA_COMPAT_VER
) {
768 pr_warn("got v %d cv %d > %d of %s ceph_osdmap client data\n",
769 struct_v
, struct_compat
,
770 OSDMAP_CLIENT_DATA_COMPAT_VER
, prefix
);
773 *p
+= 4; /* ignore client data struct_len */
778 ceph_decode_16_safe(p
, end
, version
, e_inval
);
780 pr_warn("got v %d < 6 of %s ceph_osdmap\n",
785 /* old osdmap enconding */
796 static int __decode_pools(void **p
, void *end
, struct ceph_osdmap
*map
,
801 ceph_decode_32_safe(p
, end
, n
, e_inval
);
803 struct ceph_pg_pool_info
*pi
;
807 ceph_decode_64_safe(p
, end
, pool
, e_inval
);
809 pi
= __lookup_pg_pool(&map
->pg_pools
, pool
);
810 if (!incremental
|| !pi
) {
811 pi
= kzalloc(sizeof(*pi
), GFP_NOFS
);
817 ret
= __insert_pg_pool(&map
->pg_pools
, pi
);
824 ret
= decode_pool(p
, end
, pi
);
835 static int decode_pools(void **p
, void *end
, struct ceph_osdmap
*map
)
837 return __decode_pools(p
, end
, map
, false);
840 static int decode_new_pools(void **p
, void *end
, struct ceph_osdmap
*map
)
842 return __decode_pools(p
, end
, map
, true);
845 static int __decode_pg_temp(void **p
, void *end
, struct ceph_osdmap
*map
,
850 ceph_decode_32_safe(p
, end
, n
, e_inval
);
856 ret
= ceph_decode_pgid(p
, end
, &pgid
);
860 ceph_decode_32_safe(p
, end
, len
, e_inval
);
862 ret
= __remove_pg_mapping(&map
->pg_temp
, pgid
);
863 BUG_ON(!incremental
&& ret
!= -ENOENT
);
865 if (!incremental
|| len
> 0) {
866 struct ceph_pg_mapping
*pg
;
868 ceph_decode_need(p
, end
, len
*sizeof(u32
), e_inval
);
870 if (len
> (UINT_MAX
- sizeof(*pg
)) / sizeof(u32
))
873 pg
= kzalloc(sizeof(*pg
) + len
*sizeof(u32
), GFP_NOFS
);
878 pg
->pg_temp
.len
= len
;
879 for (i
= 0; i
< len
; i
++)
880 pg
->pg_temp
.osds
[i
] = ceph_decode_32(p
);
882 ret
= __insert_pg_mapping(pg
, &map
->pg_temp
);
896 static int decode_pg_temp(void **p
, void *end
, struct ceph_osdmap
*map
)
898 return __decode_pg_temp(p
, end
, map
, false);
901 static int decode_new_pg_temp(void **p
, void *end
, struct ceph_osdmap
*map
)
903 return __decode_pg_temp(p
, end
, map
, true);
906 static int __decode_primary_temp(void **p
, void *end
, struct ceph_osdmap
*map
,
911 ceph_decode_32_safe(p
, end
, n
, e_inval
);
917 ret
= ceph_decode_pgid(p
, end
, &pgid
);
921 ceph_decode_32_safe(p
, end
, osd
, e_inval
);
923 ret
= __remove_pg_mapping(&map
->primary_temp
, pgid
);
924 BUG_ON(!incremental
&& ret
!= -ENOENT
);
926 if (!incremental
|| osd
!= (u32
)-1) {
927 struct ceph_pg_mapping
*pg
;
929 pg
= kzalloc(sizeof(*pg
), GFP_NOFS
);
934 pg
->primary_temp
.osd
= osd
;
936 ret
= __insert_pg_mapping(pg
, &map
->primary_temp
);
950 static int decode_primary_temp(void **p
, void *end
, struct ceph_osdmap
*map
)
952 return __decode_primary_temp(p
, end
, map
, false);
955 static int decode_new_primary_temp(void **p
, void *end
,
956 struct ceph_osdmap
*map
)
958 return __decode_primary_temp(p
, end
, map
, true);
961 u32
ceph_get_primary_affinity(struct ceph_osdmap
*map
, int osd
)
963 BUG_ON(osd
>= map
->max_osd
);
965 if (!map
->osd_primary_affinity
)
966 return CEPH_OSD_DEFAULT_PRIMARY_AFFINITY
;
968 return map
->osd_primary_affinity
[osd
];
971 static int set_primary_affinity(struct ceph_osdmap
*map
, int osd
, u32 aff
)
973 BUG_ON(osd
>= map
->max_osd
);
975 if (!map
->osd_primary_affinity
) {
978 map
->osd_primary_affinity
= kmalloc(map
->max_osd
*sizeof(u32
),
980 if (!map
->osd_primary_affinity
)
983 for (i
= 0; i
< map
->max_osd
; i
++)
984 map
->osd_primary_affinity
[i
] =
985 CEPH_OSD_DEFAULT_PRIMARY_AFFINITY
;
988 map
->osd_primary_affinity
[osd
] = aff
;
993 static int decode_primary_affinity(void **p
, void *end
,
994 struct ceph_osdmap
*map
)
998 ceph_decode_32_safe(p
, end
, len
, e_inval
);
1000 kfree(map
->osd_primary_affinity
);
1001 map
->osd_primary_affinity
= NULL
;
1004 if (len
!= map
->max_osd
)
1007 ceph_decode_need(p
, end
, map
->max_osd
*sizeof(u32
), e_inval
);
1009 for (i
= 0; i
< map
->max_osd
; i
++) {
1012 ret
= set_primary_affinity(map
, i
, ceph_decode_32(p
));
1023 static int decode_new_primary_affinity(void **p
, void *end
,
1024 struct ceph_osdmap
*map
)
1028 ceph_decode_32_safe(p
, end
, n
, e_inval
);
1033 ceph_decode_32_safe(p
, end
, osd
, e_inval
);
1034 ceph_decode_32_safe(p
, end
, aff
, e_inval
);
1036 ret
= set_primary_affinity(map
, osd
, aff
);
1040 pr_info("osd%d primary-affinity 0x%x\n", osd
, aff
);
1050 * decode a full map.
1052 static int osdmap_decode(void **p
, void *end
, struct ceph_osdmap
*map
)
1061 dout("%s %p to %p len %d\n", __func__
, *p
, end
, (int)(end
- *p
));
1063 err
= get_osdmap_client_data_v(p
, end
, "full", &struct_v
);
1067 /* fsid, epoch, created, modified */
1068 ceph_decode_need(p
, end
, sizeof(map
->fsid
) + sizeof(u32
) +
1069 sizeof(map
->created
) + sizeof(map
->modified
), e_inval
);
1070 ceph_decode_copy(p
, &map
->fsid
, sizeof(map
->fsid
));
1071 epoch
= map
->epoch
= ceph_decode_32(p
);
1072 ceph_decode_copy(p
, &map
->created
, sizeof(map
->created
));
1073 ceph_decode_copy(p
, &map
->modified
, sizeof(map
->modified
));
1076 err
= decode_pools(p
, end
, map
);
1081 err
= decode_pool_names(p
, end
, map
);
1085 ceph_decode_32_safe(p
, end
, map
->pool_max
, e_inval
);
1087 ceph_decode_32_safe(p
, end
, map
->flags
, e_inval
);
1090 ceph_decode_32_safe(p
, end
, max
, e_inval
);
1092 /* (re)alloc osd arrays */
1093 err
= osdmap_set_max_osd(map
, max
);
1097 /* osd_state, osd_weight, osd_addrs->client_addr */
1098 ceph_decode_need(p
, end
, 3*sizeof(u32
) +
1099 map
->max_osd
*(1 + sizeof(*map
->osd_weight
) +
1100 sizeof(*map
->osd_addr
)), e_inval
);
1102 if (ceph_decode_32(p
) != map
->max_osd
)
1105 ceph_decode_copy(p
, map
->osd_state
, map
->max_osd
);
1107 if (ceph_decode_32(p
) != map
->max_osd
)
1110 for (i
= 0; i
< map
->max_osd
; i
++)
1111 map
->osd_weight
[i
] = ceph_decode_32(p
);
1113 if (ceph_decode_32(p
) != map
->max_osd
)
1116 ceph_decode_copy(p
, map
->osd_addr
, map
->max_osd
*sizeof(*map
->osd_addr
));
1117 for (i
= 0; i
< map
->max_osd
; i
++)
1118 ceph_decode_addr(&map
->osd_addr
[i
]);
1121 err
= decode_pg_temp(p
, end
, map
);
1126 if (struct_v
>= 1) {
1127 err
= decode_primary_temp(p
, end
, map
);
1132 /* primary_affinity */
1133 if (struct_v
>= 2) {
1134 err
= decode_primary_affinity(p
, end
, map
);
1138 /* XXX can this happen? */
1139 kfree(map
->osd_primary_affinity
);
1140 map
->osd_primary_affinity
= NULL
;
1144 ceph_decode_32_safe(p
, end
, len
, e_inval
);
1145 map
->crush
= crush_decode(*p
, min(*p
+ len
, end
));
1146 if (IS_ERR(map
->crush
)) {
1147 err
= PTR_ERR(map
->crush
);
1153 /* ignore the rest */
1156 dout("full osdmap epoch %d max_osd %d\n", map
->epoch
, map
->max_osd
);
1162 pr_err("corrupt full osdmap (%d) epoch %d off %d (%p of %p-%p)\n",
1163 err
, epoch
, (int)(*p
- start
), *p
, start
, end
);
1164 print_hex_dump(KERN_DEBUG
, "osdmap: ",
1165 DUMP_PREFIX_OFFSET
, 16, 1,
1166 start
, end
- start
, true);
1171 * Allocate and decode a full map.
1173 struct ceph_osdmap
*ceph_osdmap_decode(void **p
, void *end
)
1175 struct ceph_osdmap
*map
;
1178 map
= kzalloc(sizeof(*map
), GFP_NOFS
);
1180 return ERR_PTR(-ENOMEM
);
1182 map
->pg_temp
= RB_ROOT
;
1183 map
->primary_temp
= RB_ROOT
;
1184 mutex_init(&map
->crush_scratch_mutex
);
1186 ret
= osdmap_decode(p
, end
, map
);
1188 ceph_osdmap_destroy(map
);
1189 return ERR_PTR(ret
);
1196 * Encoding order is (new_up_client, new_state, new_weight). Need to
1197 * apply in the (new_weight, new_state, new_up_client) order, because
1198 * an incremental map may look like e.g.
1200 * new_up_client: { osd=6, addr=... } # set osd_state and addr
1201 * new_state: { osd=6, xorstate=EXISTS } # clear osd_state
1203 static int decode_new_up_state_weight(void **p
, void *end
,
1204 struct ceph_osdmap
*map
)
1206 void *new_up_client
;
1208 void *new_weight_end
;
1212 ceph_decode_32_safe(p
, end
, len
, e_inval
);
1213 len
*= sizeof(u32
) + sizeof(struct ceph_entity_addr
);
1214 ceph_decode_need(p
, end
, len
, e_inval
);
1218 ceph_decode_32_safe(p
, end
, len
, e_inval
);
1219 len
*= sizeof(u32
) + sizeof(u8
);
1220 ceph_decode_need(p
, end
, len
, e_inval
);
1224 ceph_decode_32_safe(p
, end
, len
, e_inval
);
1229 ceph_decode_need(p
, end
, 2*sizeof(u32
), e_inval
);
1230 osd
= ceph_decode_32(p
);
1231 w
= ceph_decode_32(p
);
1232 BUG_ON(osd
>= map
->max_osd
);
1233 pr_info("osd%d weight 0x%x %s\n", osd
, w
,
1234 w
== CEPH_OSD_IN
? "(in)" :
1235 (w
== CEPH_OSD_OUT
? "(out)" : ""));
1236 map
->osd_weight
[osd
] = w
;
1239 * If we are marking in, set the EXISTS, and clear the
1240 * AUTOOUT and NEW bits.
1243 map
->osd_state
[osd
] |= CEPH_OSD_EXISTS
;
1244 map
->osd_state
[osd
] &= ~(CEPH_OSD_AUTOOUT
|
1248 new_weight_end
= *p
;
1250 /* new_state (up/down) */
1252 len
= ceph_decode_32(p
);
1258 osd
= ceph_decode_32(p
);
1259 xorstate
= ceph_decode_8(p
);
1261 xorstate
= CEPH_OSD_UP
;
1262 BUG_ON(osd
>= map
->max_osd
);
1263 if ((map
->osd_state
[osd
] & CEPH_OSD_UP
) &&
1264 (xorstate
& CEPH_OSD_UP
))
1265 pr_info("osd%d down\n", osd
);
1266 if ((map
->osd_state
[osd
] & CEPH_OSD_EXISTS
) &&
1267 (xorstate
& CEPH_OSD_EXISTS
)) {
1268 pr_info("osd%d does not exist\n", osd
);
1269 ret
= set_primary_affinity(map
, osd
,
1270 CEPH_OSD_DEFAULT_PRIMARY_AFFINITY
);
1273 memset(map
->osd_addr
+ osd
, 0, sizeof(*map
->osd_addr
));
1274 map
->osd_state
[osd
] = 0;
1276 map
->osd_state
[osd
] ^= xorstate
;
1282 len
= ceph_decode_32(p
);
1285 struct ceph_entity_addr addr
;
1287 osd
= ceph_decode_32(p
);
1288 ceph_decode_copy(p
, &addr
, sizeof(addr
));
1289 ceph_decode_addr(&addr
);
1290 BUG_ON(osd
>= map
->max_osd
);
1291 pr_info("osd%d up\n", osd
);
1292 map
->osd_state
[osd
] |= CEPH_OSD_EXISTS
| CEPH_OSD_UP
;
1293 map
->osd_addr
[osd
] = addr
;
1296 *p
= new_weight_end
;
1304 * decode and apply an incremental map update.
1306 struct ceph_osdmap
*osdmap_apply_incremental(void **p
, void *end
,
1307 struct ceph_osdmap
*map
,
1308 struct ceph_messenger
*msgr
)
1310 struct crush_map
*newcrush
= NULL
;
1311 struct ceph_fsid fsid
;
1313 struct ceph_timespec modified
;
1317 __s32 new_flags
, max
;
1322 dout("%s %p to %p len %d\n", __func__
, *p
, end
, (int)(end
- *p
));
1324 err
= get_osdmap_client_data_v(p
, end
, "inc", &struct_v
);
1328 /* fsid, epoch, modified, new_pool_max, new_flags */
1329 ceph_decode_need(p
, end
, sizeof(fsid
) + sizeof(u32
) + sizeof(modified
) +
1330 sizeof(u64
) + sizeof(u32
), e_inval
);
1331 ceph_decode_copy(p
, &fsid
, sizeof(fsid
));
1332 epoch
= ceph_decode_32(p
);
1333 BUG_ON(epoch
!= map
->epoch
+1);
1334 ceph_decode_copy(p
, &modified
, sizeof(modified
));
1335 new_pool_max
= ceph_decode_64(p
);
1336 new_flags
= ceph_decode_32(p
);
1339 ceph_decode_32_safe(p
, end
, len
, e_inval
);
1341 dout("apply_incremental full map len %d, %p to %p\n",
1343 return ceph_osdmap_decode(p
, min(*p
+len
, end
));
1347 ceph_decode_32_safe(p
, end
, len
, e_inval
);
1349 newcrush
= crush_decode(*p
, min(*p
+len
, end
));
1350 if (IS_ERR(newcrush
)) {
1351 err
= PTR_ERR(newcrush
);
1360 map
->flags
= new_flags
;
1361 if (new_pool_max
>= 0)
1362 map
->pool_max
= new_pool_max
;
1365 ceph_decode_32_safe(p
, end
, max
, e_inval
);
1367 err
= osdmap_set_max_osd(map
, max
);
1373 map
->modified
= modified
;
1376 crush_destroy(map
->crush
);
1377 map
->crush
= newcrush
;
1382 err
= decode_new_pools(p
, end
, map
);
1386 /* new_pool_names */
1387 err
= decode_pool_names(p
, end
, map
);
1392 ceph_decode_32_safe(p
, end
, len
, e_inval
);
1394 struct ceph_pg_pool_info
*pi
;
1396 ceph_decode_64_safe(p
, end
, pool
, e_inval
);
1397 pi
= __lookup_pg_pool(&map
->pg_pools
, pool
);
1399 __remove_pg_pool(&map
->pg_pools
, pi
);
1402 /* new_up_client, new_state, new_weight */
1403 err
= decode_new_up_state_weight(p
, end
, map
);
1408 err
= decode_new_pg_temp(p
, end
, map
);
1412 /* new_primary_temp */
1413 if (struct_v
>= 1) {
1414 err
= decode_new_primary_temp(p
, end
, map
);
1419 /* new_primary_affinity */
1420 if (struct_v
>= 2) {
1421 err
= decode_new_primary_affinity(p
, end
, map
);
1426 /* ignore the rest */
1429 dout("inc osdmap epoch %d max_osd %d\n", map
->epoch
, map
->max_osd
);
1435 pr_err("corrupt inc osdmap (%d) epoch %d off %d (%p of %p-%p)\n",
1436 err
, epoch
, (int)(*p
- start
), *p
, start
, end
);
1437 print_hex_dump(KERN_DEBUG
, "osdmap: ",
1438 DUMP_PREFIX_OFFSET
, 16, 1,
1439 start
, end
- start
, true);
1441 crush_destroy(newcrush
);
1442 return ERR_PTR(err
);
1449 * calculate file layout from given offset, length.
1450 * fill in correct oid, logical length, and object extent
1453 * for now, we write only a single su, until we can
1454 * pass a stride back to the caller.
1456 int ceph_calc_file_object_mapping(struct ceph_file_layout
*layout
,
1459 u64
*oxoff
, u64
*oxlen
)
1461 u32 osize
= le32_to_cpu(layout
->fl_object_size
);
1462 u32 su
= le32_to_cpu(layout
->fl_stripe_unit
);
1463 u32 sc
= le32_to_cpu(layout
->fl_stripe_count
);
1464 u32 bl
, stripeno
, stripepos
, objsetno
;
1468 dout("mapping %llu~%llu osize %u fl_su %u\n", off
, len
,
1470 if (su
== 0 || sc
== 0)
1472 su_per_object
= osize
/ su
;
1473 if (su_per_object
== 0)
1475 dout("osize %u / su %u = su_per_object %u\n", osize
, su
,
1478 if ((su
& ~PAGE_MASK
) != 0)
1481 /* bl = *off / su; */
1485 dout("off %llu / su %u = bl %u\n", off
, su
, bl
);
1488 stripepos
= bl
% sc
;
1489 objsetno
= stripeno
/ su_per_object
;
1491 *ono
= objsetno
* sc
+ stripepos
;
1492 dout("objset %u * sc %u = ono %u\n", objsetno
, sc
, (unsigned int)*ono
);
1494 /* *oxoff = *off % layout->fl_stripe_unit; # offset in su */
1496 su_offset
= do_div(t
, su
);
1497 *oxoff
= su_offset
+ (stripeno
% su_per_object
) * su
;
1500 * Calculate the length of the extent being written to the selected
1501 * object. This is the minimum of the full length requested (len) or
1502 * the remainder of the current stripe being written to.
1504 *oxlen
= min_t(u64
, len
, su
- su_offset
);
1506 dout(" obj extent %llu~%llu\n", *oxoff
, *oxlen
);
1510 dout(" invalid layout\n");
1516 EXPORT_SYMBOL(ceph_calc_file_object_mapping
);
1519 * Calculate mapping of a (oloc, oid) pair to a PG. Should only be
1520 * called with target's (oloc, oid), since tiering isn't taken into
1523 int ceph_oloc_oid_to_pg(struct ceph_osdmap
*osdmap
,
1524 struct ceph_object_locator
*oloc
,
1525 struct ceph_object_id
*oid
,
1526 struct ceph_pg
*pg_out
)
1528 struct ceph_pg_pool_info
*pi
;
1530 pi
= __lookup_pg_pool(&osdmap
->pg_pools
, oloc
->pool
);
1534 pg_out
->pool
= oloc
->pool
;
1535 pg_out
->seed
= ceph_str_hash(pi
->object_hash
, oid
->name
,
1538 dout("%s '%.*s' pgid %llu.%x\n", __func__
, oid
->name_len
, oid
->name
,
1539 pg_out
->pool
, pg_out
->seed
);
1542 EXPORT_SYMBOL(ceph_oloc_oid_to_pg
);
1544 static int do_crush(struct ceph_osdmap
*map
, int ruleno
, int x
,
1545 int *result
, int result_max
,
1546 const __u32
*weight
, int weight_max
)
1550 BUG_ON(result_max
> CEPH_PG_MAX_SIZE
);
1552 mutex_lock(&map
->crush_scratch_mutex
);
1553 r
= crush_do_rule(map
->crush
, ruleno
, x
, result
, result_max
,
1554 weight
, weight_max
, map
->crush_scratch_ary
);
1555 mutex_unlock(&map
->crush_scratch_mutex
);
1561 * Calculate raw (crush) set for given pgid.
1563 * Return raw set length, or error.
1565 static int pg_to_raw_osds(struct ceph_osdmap
*osdmap
,
1566 struct ceph_pg_pool_info
*pool
,
1567 struct ceph_pg pgid
, u32 pps
, int *osds
)
1573 ruleno
= crush_find_rule(osdmap
->crush
, pool
->crush_ruleset
,
1574 pool
->type
, pool
->size
);
1576 pr_err("no crush rule: pool %lld ruleset %d type %d size %d\n",
1577 pgid
.pool
, pool
->crush_ruleset
, pool
->type
,
1582 len
= do_crush(osdmap
, ruleno
, pps
, osds
,
1583 min_t(int, pool
->size
, CEPH_PG_MAX_SIZE
),
1584 osdmap
->osd_weight
, osdmap
->max_osd
);
1586 pr_err("error %d from crush rule %d: pool %lld ruleset %d type %d size %d\n",
1587 len
, ruleno
, pgid
.pool
, pool
->crush_ruleset
,
1588 pool
->type
, pool
->size
);
1596 * Given raw set, calculate up set and up primary.
1598 * Return up set length. *primary is set to up primary osd id, or -1
1599 * if up set is empty.
1601 static int raw_to_up_osds(struct ceph_osdmap
*osdmap
,
1602 struct ceph_pg_pool_info
*pool
,
1603 int *osds
, int len
, int *primary
)
1605 int up_primary
= -1;
1608 if (ceph_can_shift_osds(pool
)) {
1611 for (i
= 0; i
< len
; i
++) {
1612 if (ceph_osd_is_down(osdmap
, osds
[i
])) {
1617 osds
[i
- removed
] = osds
[i
];
1622 up_primary
= osds
[0];
1624 for (i
= len
- 1; i
>= 0; i
--) {
1625 if (ceph_osd_is_down(osdmap
, osds
[i
]))
1626 osds
[i
] = CRUSH_ITEM_NONE
;
1628 up_primary
= osds
[i
];
1632 *primary
= up_primary
;
1636 static void apply_primary_affinity(struct ceph_osdmap
*osdmap
, u32 pps
,
1637 struct ceph_pg_pool_info
*pool
,
1638 int *osds
, int len
, int *primary
)
1644 * Do we have any non-default primary_affinity values for these
1647 if (!osdmap
->osd_primary_affinity
)
1650 for (i
= 0; i
< len
; i
++) {
1653 if (osd
!= CRUSH_ITEM_NONE
&&
1654 osdmap
->osd_primary_affinity
[osd
] !=
1655 CEPH_OSD_DEFAULT_PRIMARY_AFFINITY
) {
1663 * Pick the primary. Feed both the seed (for the pg) and the
1664 * osd into the hash/rng so that a proportional fraction of an
1665 * osd's pgs get rejected as primary.
1667 for (i
= 0; i
< len
; i
++) {
1671 if (osd
== CRUSH_ITEM_NONE
)
1674 aff
= osdmap
->osd_primary_affinity
[osd
];
1675 if (aff
< CEPH_OSD_MAX_PRIMARY_AFFINITY
&&
1676 (crush_hash32_2(CRUSH_HASH_RJENKINS1
,
1677 pps
, osd
) >> 16) >= aff
) {
1679 * We chose not to use this primary. Note it
1680 * anyway as a fallback in case we don't pick
1681 * anyone else, but keep looking.
1693 *primary
= osds
[pos
];
1695 if (ceph_can_shift_osds(pool
) && pos
> 0) {
1696 /* move the new primary to the front */
1697 for (i
= pos
; i
> 0; i
--)
1698 osds
[i
] = osds
[i
- 1];
1704 * Given up set, apply pg_temp and primary_temp mappings.
1706 * Return acting set length. *primary is set to acting primary osd id,
1707 * or -1 if acting set is empty.
1709 static int apply_temps(struct ceph_osdmap
*osdmap
,
1710 struct ceph_pg_pool_info
*pool
, struct ceph_pg pgid
,
1711 int *osds
, int len
, int *primary
)
1713 struct ceph_pg_mapping
*pg
;
1719 pgid
.seed
= ceph_stable_mod(pgid
.seed
, pool
->pg_num
,
1723 pg
= __lookup_pg_mapping(&osdmap
->pg_temp
, pgid
);
1728 for (i
= 0; i
< pg
->pg_temp
.len
; i
++) {
1729 if (ceph_osd_is_down(osdmap
, pg
->pg_temp
.osds
[i
])) {
1730 if (ceph_can_shift_osds(pool
))
1733 osds
[temp_len
++] = CRUSH_ITEM_NONE
;
1735 osds
[temp_len
++] = pg
->pg_temp
.osds
[i
];
1739 /* apply pg_temp's primary */
1740 for (i
= 0; i
< temp_len
; i
++) {
1741 if (osds
[i
] != CRUSH_ITEM_NONE
) {
1742 temp_primary
= osds
[i
];
1748 temp_primary
= *primary
;
1752 pg
= __lookup_pg_mapping(&osdmap
->primary_temp
, pgid
);
1754 temp_primary
= pg
->primary_temp
.osd
;
1756 *primary
= temp_primary
;
1761 * Calculate acting set for given pgid.
1763 * Return acting set length, or error. *primary is set to acting
1764 * primary osd id, or -1 if acting set is empty or on error.
1766 int ceph_calc_pg_acting(struct ceph_osdmap
*osdmap
, struct ceph_pg pgid
,
1767 int *osds
, int *primary
)
1769 struct ceph_pg_pool_info
*pool
;
1773 pool
= __lookup_pg_pool(&osdmap
->pg_pools
, pgid
.pool
);
1779 if (pool
->flags
& CEPH_POOL_FLAG_HASHPSPOOL
) {
1780 /* hash pool id and seed so that pool PGs do not overlap */
1781 pps
= crush_hash32_2(CRUSH_HASH_RJENKINS1
,
1782 ceph_stable_mod(pgid
.seed
, pool
->pgp_num
,
1783 pool
->pgp_num_mask
),
1787 * legacy behavior: add ps and pool together. this is
1788 * not a great approach because the PGs from each pool
1789 * will overlap on top of each other: 0.5 == 1.4 ==
1792 pps
= ceph_stable_mod(pgid
.seed
, pool
->pgp_num
,
1793 pool
->pgp_num_mask
) +
1794 (unsigned)pgid
.pool
;
1797 len
= pg_to_raw_osds(osdmap
, pool
, pgid
, pps
, osds
);
1803 len
= raw_to_up_osds(osdmap
, pool
, osds
, len
, primary
);
1805 apply_primary_affinity(osdmap
, pps
, pool
, osds
, len
, primary
);
1807 len
= apply_temps(osdmap
, pool
, pgid
, osds
, len
, primary
);
1813 * Return primary osd for given pgid, or -1 if none.
1815 int ceph_calc_pg_primary(struct ceph_osdmap
*osdmap
, struct ceph_pg pgid
)
1817 int osds
[CEPH_PG_MAX_SIZE
];
1820 ceph_calc_pg_acting(osdmap
, pgid
, osds
, &primary
);
1824 EXPORT_SYMBOL(ceph_calc_pg_primary
);