tty: serial: lpuart: avoid leaking struct tty_struct
[linux/fpc-iii.git] / net / ceph / osdmap.c
blob98c0ff3d644174fe9f5ce30fedfd617c5236539b
1 // SPDX-License-Identifier: GPL-2.0
3 #include <linux/ceph/ceph_debug.h>
5 #include <linux/module.h>
6 #include <linux/slab.h>
8 #include <linux/ceph/libceph.h>
9 #include <linux/ceph/osdmap.h>
10 #include <linux/ceph/decode.h>
11 #include <linux/crush/hash.h>
12 #include <linux/crush/mapper.h>
14 char *ceph_osdmap_state_str(char *str, int len, u32 state)
16 if (!len)
17 return str;
19 if ((state & CEPH_OSD_EXISTS) && (state & CEPH_OSD_UP))
20 snprintf(str, len, "exists, up");
21 else if (state & CEPH_OSD_EXISTS)
22 snprintf(str, len, "exists");
23 else if (state & CEPH_OSD_UP)
24 snprintf(str, len, "up");
25 else
26 snprintf(str, len, "doesn't exist");
28 return str;
31 /* maps */
33 static int calc_bits_of(unsigned int t)
35 int b = 0;
36 while (t) {
37 t = t >> 1;
38 b++;
40 return b;
44 * the foo_mask is the smallest value 2^n-1 that is >= foo.
46 static void calc_pg_masks(struct ceph_pg_pool_info *pi)
48 pi->pg_num_mask = (1 << calc_bits_of(pi->pg_num-1)) - 1;
49 pi->pgp_num_mask = (1 << calc_bits_of(pi->pgp_num-1)) - 1;
53 * decode crush map
55 static int crush_decode_uniform_bucket(void **p, void *end,
56 struct crush_bucket_uniform *b)
58 dout("crush_decode_uniform_bucket %p to %p\n", *p, end);
59 ceph_decode_need(p, end, (1+b->h.size) * sizeof(u32), bad);
60 b->item_weight = ceph_decode_32(p);
61 return 0;
62 bad:
63 return -EINVAL;
66 static int crush_decode_list_bucket(void **p, void *end,
67 struct crush_bucket_list *b)
69 int j;
70 dout("crush_decode_list_bucket %p to %p\n", *p, end);
71 b->item_weights = kcalloc(b->h.size, sizeof(u32), GFP_NOFS);
72 if (b->item_weights == NULL)
73 return -ENOMEM;
74 b->sum_weights = kcalloc(b->h.size, sizeof(u32), GFP_NOFS);
75 if (b->sum_weights == NULL)
76 return -ENOMEM;
77 ceph_decode_need(p, end, 2 * b->h.size * sizeof(u32), bad);
78 for (j = 0; j < b->h.size; j++) {
79 b->item_weights[j] = ceph_decode_32(p);
80 b->sum_weights[j] = ceph_decode_32(p);
82 return 0;
83 bad:
84 return -EINVAL;
87 static int crush_decode_tree_bucket(void **p, void *end,
88 struct crush_bucket_tree *b)
90 int j;
91 dout("crush_decode_tree_bucket %p to %p\n", *p, end);
92 ceph_decode_8_safe(p, end, b->num_nodes, bad);
93 b->node_weights = kcalloc(b->num_nodes, sizeof(u32), GFP_NOFS);
94 if (b->node_weights == NULL)
95 return -ENOMEM;
96 ceph_decode_need(p, end, b->num_nodes * sizeof(u32), bad);
97 for (j = 0; j < b->num_nodes; j++)
98 b->node_weights[j] = ceph_decode_32(p);
99 return 0;
100 bad:
101 return -EINVAL;
104 static int crush_decode_straw_bucket(void **p, void *end,
105 struct crush_bucket_straw *b)
107 int j;
108 dout("crush_decode_straw_bucket %p to %p\n", *p, end);
109 b->item_weights = kcalloc(b->h.size, sizeof(u32), GFP_NOFS);
110 if (b->item_weights == NULL)
111 return -ENOMEM;
112 b->straws = kcalloc(b->h.size, sizeof(u32), GFP_NOFS);
113 if (b->straws == NULL)
114 return -ENOMEM;
115 ceph_decode_need(p, end, 2 * b->h.size * sizeof(u32), bad);
116 for (j = 0; j < b->h.size; j++) {
117 b->item_weights[j] = ceph_decode_32(p);
118 b->straws[j] = ceph_decode_32(p);
120 return 0;
121 bad:
122 return -EINVAL;
125 static int crush_decode_straw2_bucket(void **p, void *end,
126 struct crush_bucket_straw2 *b)
128 int j;
129 dout("crush_decode_straw2_bucket %p to %p\n", *p, end);
130 b->item_weights = kcalloc(b->h.size, sizeof(u32), GFP_NOFS);
131 if (b->item_weights == NULL)
132 return -ENOMEM;
133 ceph_decode_need(p, end, b->h.size * sizeof(u32), bad);
134 for (j = 0; j < b->h.size; j++)
135 b->item_weights[j] = ceph_decode_32(p);
136 return 0;
137 bad:
138 return -EINVAL;
141 static struct crush_choose_arg_map *alloc_choose_arg_map(void)
143 struct crush_choose_arg_map *arg_map;
145 arg_map = kzalloc(sizeof(*arg_map), GFP_NOIO);
146 if (!arg_map)
147 return NULL;
149 RB_CLEAR_NODE(&arg_map->node);
150 return arg_map;
153 static void free_choose_arg_map(struct crush_choose_arg_map *arg_map)
155 if (arg_map) {
156 int i, j;
158 WARN_ON(!RB_EMPTY_NODE(&arg_map->node));
160 for (i = 0; i < arg_map->size; i++) {
161 struct crush_choose_arg *arg = &arg_map->args[i];
163 for (j = 0; j < arg->weight_set_size; j++)
164 kfree(arg->weight_set[j].weights);
165 kfree(arg->weight_set);
166 kfree(arg->ids);
168 kfree(arg_map->args);
169 kfree(arg_map);
173 DEFINE_RB_FUNCS(choose_arg_map, struct crush_choose_arg_map, choose_args_index,
174 node);
176 void clear_choose_args(struct crush_map *c)
178 while (!RB_EMPTY_ROOT(&c->choose_args)) {
179 struct crush_choose_arg_map *arg_map =
180 rb_entry(rb_first(&c->choose_args),
181 struct crush_choose_arg_map, node);
183 erase_choose_arg_map(&c->choose_args, arg_map);
184 free_choose_arg_map(arg_map);
188 static u32 *decode_array_32_alloc(void **p, void *end, u32 *plen)
190 u32 *a = NULL;
191 u32 len;
192 int ret;
194 ceph_decode_32_safe(p, end, len, e_inval);
195 if (len) {
196 u32 i;
198 a = kmalloc_array(len, sizeof(u32), GFP_NOIO);
199 if (!a) {
200 ret = -ENOMEM;
201 goto fail;
204 ceph_decode_need(p, end, len * sizeof(u32), e_inval);
205 for (i = 0; i < len; i++)
206 a[i] = ceph_decode_32(p);
209 *plen = len;
210 return a;
212 e_inval:
213 ret = -EINVAL;
214 fail:
215 kfree(a);
216 return ERR_PTR(ret);
220 * Assumes @arg is zero-initialized.
222 static int decode_choose_arg(void **p, void *end, struct crush_choose_arg *arg)
224 int ret;
226 ceph_decode_32_safe(p, end, arg->weight_set_size, e_inval);
227 if (arg->weight_set_size) {
228 u32 i;
230 arg->weight_set = kmalloc_array(arg->weight_set_size,
231 sizeof(*arg->weight_set),
232 GFP_NOIO);
233 if (!arg->weight_set)
234 return -ENOMEM;
236 for (i = 0; i < arg->weight_set_size; i++) {
237 struct crush_weight_set *w = &arg->weight_set[i];
239 w->weights = decode_array_32_alloc(p, end, &w->size);
240 if (IS_ERR(w->weights)) {
241 ret = PTR_ERR(w->weights);
242 w->weights = NULL;
243 return ret;
248 arg->ids = decode_array_32_alloc(p, end, &arg->ids_size);
249 if (IS_ERR(arg->ids)) {
250 ret = PTR_ERR(arg->ids);
251 arg->ids = NULL;
252 return ret;
255 return 0;
257 e_inval:
258 return -EINVAL;
261 static int decode_choose_args(void **p, void *end, struct crush_map *c)
263 struct crush_choose_arg_map *arg_map = NULL;
264 u32 num_choose_arg_maps, num_buckets;
265 int ret;
267 ceph_decode_32_safe(p, end, num_choose_arg_maps, e_inval);
268 while (num_choose_arg_maps--) {
269 arg_map = alloc_choose_arg_map();
270 if (!arg_map) {
271 ret = -ENOMEM;
272 goto fail;
275 ceph_decode_64_safe(p, end, arg_map->choose_args_index,
276 e_inval);
277 arg_map->size = c->max_buckets;
278 arg_map->args = kcalloc(arg_map->size, sizeof(*arg_map->args),
279 GFP_NOIO);
280 if (!arg_map->args) {
281 ret = -ENOMEM;
282 goto fail;
285 ceph_decode_32_safe(p, end, num_buckets, e_inval);
286 while (num_buckets--) {
287 struct crush_choose_arg *arg;
288 u32 bucket_index;
290 ceph_decode_32_safe(p, end, bucket_index, e_inval);
291 if (bucket_index >= arg_map->size)
292 goto e_inval;
294 arg = &arg_map->args[bucket_index];
295 ret = decode_choose_arg(p, end, arg);
296 if (ret)
297 goto fail;
299 if (arg->ids_size &&
300 arg->ids_size != c->buckets[bucket_index]->size)
301 goto e_inval;
304 insert_choose_arg_map(&c->choose_args, arg_map);
307 return 0;
309 e_inval:
310 ret = -EINVAL;
311 fail:
312 free_choose_arg_map(arg_map);
313 return ret;
316 static void crush_finalize(struct crush_map *c)
318 __s32 b;
320 /* Space for the array of pointers to per-bucket workspace */
321 c->working_size = sizeof(struct crush_work) +
322 c->max_buckets * sizeof(struct crush_work_bucket *);
324 for (b = 0; b < c->max_buckets; b++) {
325 if (!c->buckets[b])
326 continue;
328 switch (c->buckets[b]->alg) {
329 default:
331 * The base case, permutation variables and
332 * the pointer to the permutation array.
334 c->working_size += sizeof(struct crush_work_bucket);
335 break;
337 /* Every bucket has a permutation array. */
338 c->working_size += c->buckets[b]->size * sizeof(__u32);
342 static struct crush_map *crush_decode(void *pbyval, void *end)
344 struct crush_map *c;
345 int err;
346 int i, j;
347 void **p = &pbyval;
348 void *start = pbyval;
349 u32 magic;
351 dout("crush_decode %p to %p len %d\n", *p, end, (int)(end - *p));
353 c = kzalloc(sizeof(*c), GFP_NOFS);
354 if (c == NULL)
355 return ERR_PTR(-ENOMEM);
357 c->choose_args = RB_ROOT;
359 /* set tunables to default values */
360 c->choose_local_tries = 2;
361 c->choose_local_fallback_tries = 5;
362 c->choose_total_tries = 19;
363 c->chooseleaf_descend_once = 0;
365 ceph_decode_need(p, end, 4*sizeof(u32), bad);
366 magic = ceph_decode_32(p);
367 if (magic != CRUSH_MAGIC) {
368 pr_err("crush_decode magic %x != current %x\n",
369 (unsigned int)magic, (unsigned int)CRUSH_MAGIC);
370 goto bad;
372 c->max_buckets = ceph_decode_32(p);
373 c->max_rules = ceph_decode_32(p);
374 c->max_devices = ceph_decode_32(p);
376 c->buckets = kcalloc(c->max_buckets, sizeof(*c->buckets), GFP_NOFS);
377 if (c->buckets == NULL)
378 goto badmem;
379 c->rules = kcalloc(c->max_rules, sizeof(*c->rules), GFP_NOFS);
380 if (c->rules == NULL)
381 goto badmem;
383 /* buckets */
384 for (i = 0; i < c->max_buckets; i++) {
385 int size = 0;
386 u32 alg;
387 struct crush_bucket *b;
389 ceph_decode_32_safe(p, end, alg, bad);
390 if (alg == 0) {
391 c->buckets[i] = NULL;
392 continue;
394 dout("crush_decode bucket %d off %x %p to %p\n",
395 i, (int)(*p-start), *p, end);
397 switch (alg) {
398 case CRUSH_BUCKET_UNIFORM:
399 size = sizeof(struct crush_bucket_uniform);
400 break;
401 case CRUSH_BUCKET_LIST:
402 size = sizeof(struct crush_bucket_list);
403 break;
404 case CRUSH_BUCKET_TREE:
405 size = sizeof(struct crush_bucket_tree);
406 break;
407 case CRUSH_BUCKET_STRAW:
408 size = sizeof(struct crush_bucket_straw);
409 break;
410 case CRUSH_BUCKET_STRAW2:
411 size = sizeof(struct crush_bucket_straw2);
412 break;
413 default:
414 goto bad;
416 BUG_ON(size == 0);
417 b = c->buckets[i] = kzalloc(size, GFP_NOFS);
418 if (b == NULL)
419 goto badmem;
421 ceph_decode_need(p, end, 4*sizeof(u32), bad);
422 b->id = ceph_decode_32(p);
423 b->type = ceph_decode_16(p);
424 b->alg = ceph_decode_8(p);
425 b->hash = ceph_decode_8(p);
426 b->weight = ceph_decode_32(p);
427 b->size = ceph_decode_32(p);
429 dout("crush_decode bucket size %d off %x %p to %p\n",
430 b->size, (int)(*p-start), *p, end);
432 b->items = kcalloc(b->size, sizeof(__s32), GFP_NOFS);
433 if (b->items == NULL)
434 goto badmem;
436 ceph_decode_need(p, end, b->size*sizeof(u32), bad);
437 for (j = 0; j < b->size; j++)
438 b->items[j] = ceph_decode_32(p);
440 switch (b->alg) {
441 case CRUSH_BUCKET_UNIFORM:
442 err = crush_decode_uniform_bucket(p, end,
443 (struct crush_bucket_uniform *)b);
444 if (err < 0)
445 goto fail;
446 break;
447 case CRUSH_BUCKET_LIST:
448 err = crush_decode_list_bucket(p, end,
449 (struct crush_bucket_list *)b);
450 if (err < 0)
451 goto fail;
452 break;
453 case CRUSH_BUCKET_TREE:
454 err = crush_decode_tree_bucket(p, end,
455 (struct crush_bucket_tree *)b);
456 if (err < 0)
457 goto fail;
458 break;
459 case CRUSH_BUCKET_STRAW:
460 err = crush_decode_straw_bucket(p, end,
461 (struct crush_bucket_straw *)b);
462 if (err < 0)
463 goto fail;
464 break;
465 case CRUSH_BUCKET_STRAW2:
466 err = crush_decode_straw2_bucket(p, end,
467 (struct crush_bucket_straw2 *)b);
468 if (err < 0)
469 goto fail;
470 break;
474 /* rules */
475 dout("rule vec is %p\n", c->rules);
476 for (i = 0; i < c->max_rules; i++) {
477 u32 yes;
478 struct crush_rule *r;
480 ceph_decode_32_safe(p, end, yes, bad);
481 if (!yes) {
482 dout("crush_decode NO rule %d off %x %p to %p\n",
483 i, (int)(*p-start), *p, end);
484 c->rules[i] = NULL;
485 continue;
488 dout("crush_decode rule %d off %x %p to %p\n",
489 i, (int)(*p-start), *p, end);
491 /* len */
492 ceph_decode_32_safe(p, end, yes, bad);
493 #if BITS_PER_LONG == 32
494 if (yes > (ULONG_MAX - sizeof(*r))
495 / sizeof(struct crush_rule_step))
496 goto bad;
497 #endif
498 r = c->rules[i] = kmalloc(sizeof(*r) +
499 yes*sizeof(struct crush_rule_step),
500 GFP_NOFS);
501 if (r == NULL)
502 goto badmem;
503 dout(" rule %d is at %p\n", i, r);
504 r->len = yes;
505 ceph_decode_copy_safe(p, end, &r->mask, 4, bad); /* 4 u8's */
506 ceph_decode_need(p, end, r->len*3*sizeof(u32), bad);
507 for (j = 0; j < r->len; j++) {
508 r->steps[j].op = ceph_decode_32(p);
509 r->steps[j].arg1 = ceph_decode_32(p);
510 r->steps[j].arg2 = ceph_decode_32(p);
514 ceph_decode_skip_map(p, end, 32, string, bad); /* type_map */
515 ceph_decode_skip_map(p, end, 32, string, bad); /* name_map */
516 ceph_decode_skip_map(p, end, 32, string, bad); /* rule_name_map */
518 /* tunables */
519 ceph_decode_need(p, end, 3*sizeof(u32), done);
520 c->choose_local_tries = ceph_decode_32(p);
521 c->choose_local_fallback_tries = ceph_decode_32(p);
522 c->choose_total_tries = ceph_decode_32(p);
523 dout("crush decode tunable choose_local_tries = %d\n",
524 c->choose_local_tries);
525 dout("crush decode tunable choose_local_fallback_tries = %d\n",
526 c->choose_local_fallback_tries);
527 dout("crush decode tunable choose_total_tries = %d\n",
528 c->choose_total_tries);
530 ceph_decode_need(p, end, sizeof(u32), done);
531 c->chooseleaf_descend_once = ceph_decode_32(p);
532 dout("crush decode tunable chooseleaf_descend_once = %d\n",
533 c->chooseleaf_descend_once);
535 ceph_decode_need(p, end, sizeof(u8), done);
536 c->chooseleaf_vary_r = ceph_decode_8(p);
537 dout("crush decode tunable chooseleaf_vary_r = %d\n",
538 c->chooseleaf_vary_r);
540 /* skip straw_calc_version, allowed_bucket_algs */
541 ceph_decode_need(p, end, sizeof(u8) + sizeof(u32), done);
542 *p += sizeof(u8) + sizeof(u32);
544 ceph_decode_need(p, end, sizeof(u8), done);
545 c->chooseleaf_stable = ceph_decode_8(p);
546 dout("crush decode tunable chooseleaf_stable = %d\n",
547 c->chooseleaf_stable);
549 if (*p != end) {
550 /* class_map */
551 ceph_decode_skip_map(p, end, 32, 32, bad);
552 /* class_name */
553 ceph_decode_skip_map(p, end, 32, string, bad);
554 /* class_bucket */
555 ceph_decode_skip_map_of_map(p, end, 32, 32, 32, bad);
558 if (*p != end) {
559 err = decode_choose_args(p, end, c);
560 if (err)
561 goto fail;
564 done:
565 crush_finalize(c);
566 dout("crush_decode success\n");
567 return c;
569 badmem:
570 err = -ENOMEM;
571 fail:
572 dout("crush_decode fail %d\n", err);
573 crush_destroy(c);
574 return ERR_PTR(err);
576 bad:
577 err = -EINVAL;
578 goto fail;
581 int ceph_pg_compare(const struct ceph_pg *lhs, const struct ceph_pg *rhs)
583 if (lhs->pool < rhs->pool)
584 return -1;
585 if (lhs->pool > rhs->pool)
586 return 1;
587 if (lhs->seed < rhs->seed)
588 return -1;
589 if (lhs->seed > rhs->seed)
590 return 1;
592 return 0;
595 int ceph_spg_compare(const struct ceph_spg *lhs, const struct ceph_spg *rhs)
597 int ret;
599 ret = ceph_pg_compare(&lhs->pgid, &rhs->pgid);
600 if (ret)
601 return ret;
603 if (lhs->shard < rhs->shard)
604 return -1;
605 if (lhs->shard > rhs->shard)
606 return 1;
608 return 0;
611 static struct ceph_pg_mapping *alloc_pg_mapping(size_t payload_len)
613 struct ceph_pg_mapping *pg;
615 pg = kmalloc(sizeof(*pg) + payload_len, GFP_NOIO);
616 if (!pg)
617 return NULL;
619 RB_CLEAR_NODE(&pg->node);
620 return pg;
623 static void free_pg_mapping(struct ceph_pg_mapping *pg)
625 WARN_ON(!RB_EMPTY_NODE(&pg->node));
627 kfree(pg);
631 * rbtree of pg_mapping for handling pg_temp (explicit mapping of pgid
632 * to a set of osds) and primary_temp (explicit primary setting)
634 DEFINE_RB_FUNCS2(pg_mapping, struct ceph_pg_mapping, pgid, ceph_pg_compare,
635 RB_BYPTR, const struct ceph_pg *, node)
638 * rbtree of pg pool info
640 static int __insert_pg_pool(struct rb_root *root, struct ceph_pg_pool_info *new)
642 struct rb_node **p = &root->rb_node;
643 struct rb_node *parent = NULL;
644 struct ceph_pg_pool_info *pi = NULL;
646 while (*p) {
647 parent = *p;
648 pi = rb_entry(parent, struct ceph_pg_pool_info, node);
649 if (new->id < pi->id)
650 p = &(*p)->rb_left;
651 else if (new->id > pi->id)
652 p = &(*p)->rb_right;
653 else
654 return -EEXIST;
657 rb_link_node(&new->node, parent, p);
658 rb_insert_color(&new->node, root);
659 return 0;
662 static struct ceph_pg_pool_info *__lookup_pg_pool(struct rb_root *root, u64 id)
664 struct ceph_pg_pool_info *pi;
665 struct rb_node *n = root->rb_node;
667 while (n) {
668 pi = rb_entry(n, struct ceph_pg_pool_info, node);
669 if (id < pi->id)
670 n = n->rb_left;
671 else if (id > pi->id)
672 n = n->rb_right;
673 else
674 return pi;
676 return NULL;
679 struct ceph_pg_pool_info *ceph_pg_pool_by_id(struct ceph_osdmap *map, u64 id)
681 return __lookup_pg_pool(&map->pg_pools, id);
684 const char *ceph_pg_pool_name_by_id(struct ceph_osdmap *map, u64 id)
686 struct ceph_pg_pool_info *pi;
688 if (id == CEPH_NOPOOL)
689 return NULL;
691 if (WARN_ON_ONCE(id > (u64) INT_MAX))
692 return NULL;
694 pi = __lookup_pg_pool(&map->pg_pools, (int) id);
696 return pi ? pi->name : NULL;
698 EXPORT_SYMBOL(ceph_pg_pool_name_by_id);
700 int ceph_pg_poolid_by_name(struct ceph_osdmap *map, const char *name)
702 struct rb_node *rbp;
704 for (rbp = rb_first(&map->pg_pools); rbp; rbp = rb_next(rbp)) {
705 struct ceph_pg_pool_info *pi =
706 rb_entry(rbp, struct ceph_pg_pool_info, node);
707 if (pi->name && strcmp(pi->name, name) == 0)
708 return pi->id;
710 return -ENOENT;
712 EXPORT_SYMBOL(ceph_pg_poolid_by_name);
714 static void __remove_pg_pool(struct rb_root *root, struct ceph_pg_pool_info *pi)
716 rb_erase(&pi->node, root);
717 kfree(pi->name);
718 kfree(pi);
721 static int decode_pool(void **p, void *end, struct ceph_pg_pool_info *pi)
723 u8 ev, cv;
724 unsigned len, num;
725 void *pool_end;
727 ceph_decode_need(p, end, 2 + 4, bad);
728 ev = ceph_decode_8(p); /* encoding version */
729 cv = ceph_decode_8(p); /* compat version */
730 if (ev < 5) {
731 pr_warn("got v %d < 5 cv %d of ceph_pg_pool\n", ev, cv);
732 return -EINVAL;
734 if (cv > 9) {
735 pr_warn("got v %d cv %d > 9 of ceph_pg_pool\n", ev, cv);
736 return -EINVAL;
738 len = ceph_decode_32(p);
739 ceph_decode_need(p, end, len, bad);
740 pool_end = *p + len;
742 pi->type = ceph_decode_8(p);
743 pi->size = ceph_decode_8(p);
744 pi->crush_ruleset = ceph_decode_8(p);
745 pi->object_hash = ceph_decode_8(p);
747 pi->pg_num = ceph_decode_32(p);
748 pi->pgp_num = ceph_decode_32(p);
750 *p += 4 + 4; /* skip lpg* */
751 *p += 4; /* skip last_change */
752 *p += 8 + 4; /* skip snap_seq, snap_epoch */
754 /* skip snaps */
755 num = ceph_decode_32(p);
756 while (num--) {
757 *p += 8; /* snapid key */
758 *p += 1 + 1; /* versions */
759 len = ceph_decode_32(p);
760 *p += len;
763 /* skip removed_snaps */
764 num = ceph_decode_32(p);
765 *p += num * (8 + 8);
767 *p += 8; /* skip auid */
768 pi->flags = ceph_decode_64(p);
769 *p += 4; /* skip crash_replay_interval */
771 if (ev >= 7)
772 pi->min_size = ceph_decode_8(p);
773 else
774 pi->min_size = pi->size - pi->size / 2;
776 if (ev >= 8)
777 *p += 8 + 8; /* skip quota_max_* */
779 if (ev >= 9) {
780 /* skip tiers */
781 num = ceph_decode_32(p);
782 *p += num * 8;
784 *p += 8; /* skip tier_of */
785 *p += 1; /* skip cache_mode */
787 pi->read_tier = ceph_decode_64(p);
788 pi->write_tier = ceph_decode_64(p);
789 } else {
790 pi->read_tier = -1;
791 pi->write_tier = -1;
794 if (ev >= 10) {
795 /* skip properties */
796 num = ceph_decode_32(p);
797 while (num--) {
798 len = ceph_decode_32(p);
799 *p += len; /* key */
800 len = ceph_decode_32(p);
801 *p += len; /* val */
805 if (ev >= 11) {
806 /* skip hit_set_params */
807 *p += 1 + 1; /* versions */
808 len = ceph_decode_32(p);
809 *p += len;
811 *p += 4; /* skip hit_set_period */
812 *p += 4; /* skip hit_set_count */
815 if (ev >= 12)
816 *p += 4; /* skip stripe_width */
818 if (ev >= 13) {
819 *p += 8; /* skip target_max_bytes */
820 *p += 8; /* skip target_max_objects */
821 *p += 4; /* skip cache_target_dirty_ratio_micro */
822 *p += 4; /* skip cache_target_full_ratio_micro */
823 *p += 4; /* skip cache_min_flush_age */
824 *p += 4; /* skip cache_min_evict_age */
827 if (ev >= 14) {
828 /* skip erasure_code_profile */
829 len = ceph_decode_32(p);
830 *p += len;
834 * last_force_op_resend_preluminous, will be overridden if the
835 * map was encoded with RESEND_ON_SPLIT
837 if (ev >= 15)
838 pi->last_force_request_resend = ceph_decode_32(p);
839 else
840 pi->last_force_request_resend = 0;
842 if (ev >= 16)
843 *p += 4; /* skip min_read_recency_for_promote */
845 if (ev >= 17)
846 *p += 8; /* skip expected_num_objects */
848 if (ev >= 19)
849 *p += 4; /* skip cache_target_dirty_high_ratio_micro */
851 if (ev >= 20)
852 *p += 4; /* skip min_write_recency_for_promote */
854 if (ev >= 21)
855 *p += 1; /* skip use_gmt_hitset */
857 if (ev >= 22)
858 *p += 1; /* skip fast_read */
860 if (ev >= 23) {
861 *p += 4; /* skip hit_set_grade_decay_rate */
862 *p += 4; /* skip hit_set_search_last_n */
865 if (ev >= 24) {
866 /* skip opts */
867 *p += 1 + 1; /* versions */
868 len = ceph_decode_32(p);
869 *p += len;
872 if (ev >= 25)
873 pi->last_force_request_resend = ceph_decode_32(p);
875 /* ignore the rest */
877 *p = pool_end;
878 calc_pg_masks(pi);
879 return 0;
881 bad:
882 return -EINVAL;
885 static int decode_pool_names(void **p, void *end, struct ceph_osdmap *map)
887 struct ceph_pg_pool_info *pi;
888 u32 num, len;
889 u64 pool;
891 ceph_decode_32_safe(p, end, num, bad);
892 dout(" %d pool names\n", num);
893 while (num--) {
894 ceph_decode_64_safe(p, end, pool, bad);
895 ceph_decode_32_safe(p, end, len, bad);
896 dout(" pool %llu len %d\n", pool, len);
897 ceph_decode_need(p, end, len, bad);
898 pi = __lookup_pg_pool(&map->pg_pools, pool);
899 if (pi) {
900 char *name = kstrndup(*p, len, GFP_NOFS);
902 if (!name)
903 return -ENOMEM;
904 kfree(pi->name);
905 pi->name = name;
906 dout(" name is %s\n", pi->name);
908 *p += len;
910 return 0;
912 bad:
913 return -EINVAL;
917 * osd map
919 struct ceph_osdmap *ceph_osdmap_alloc(void)
921 struct ceph_osdmap *map;
923 map = kzalloc(sizeof(*map), GFP_NOIO);
924 if (!map)
925 return NULL;
927 map->pg_pools = RB_ROOT;
928 map->pool_max = -1;
929 map->pg_temp = RB_ROOT;
930 map->primary_temp = RB_ROOT;
931 map->pg_upmap = RB_ROOT;
932 map->pg_upmap_items = RB_ROOT;
933 mutex_init(&map->crush_workspace_mutex);
935 return map;
938 void ceph_osdmap_destroy(struct ceph_osdmap *map)
940 dout("osdmap_destroy %p\n", map);
941 if (map->crush)
942 crush_destroy(map->crush);
943 while (!RB_EMPTY_ROOT(&map->pg_temp)) {
944 struct ceph_pg_mapping *pg =
945 rb_entry(rb_first(&map->pg_temp),
946 struct ceph_pg_mapping, node);
947 erase_pg_mapping(&map->pg_temp, pg);
948 free_pg_mapping(pg);
950 while (!RB_EMPTY_ROOT(&map->primary_temp)) {
951 struct ceph_pg_mapping *pg =
952 rb_entry(rb_first(&map->primary_temp),
953 struct ceph_pg_mapping, node);
954 erase_pg_mapping(&map->primary_temp, pg);
955 free_pg_mapping(pg);
957 while (!RB_EMPTY_ROOT(&map->pg_upmap)) {
958 struct ceph_pg_mapping *pg =
959 rb_entry(rb_first(&map->pg_upmap),
960 struct ceph_pg_mapping, node);
961 rb_erase(&pg->node, &map->pg_upmap);
962 kfree(pg);
964 while (!RB_EMPTY_ROOT(&map->pg_upmap_items)) {
965 struct ceph_pg_mapping *pg =
966 rb_entry(rb_first(&map->pg_upmap_items),
967 struct ceph_pg_mapping, node);
968 rb_erase(&pg->node, &map->pg_upmap_items);
969 kfree(pg);
971 while (!RB_EMPTY_ROOT(&map->pg_pools)) {
972 struct ceph_pg_pool_info *pi =
973 rb_entry(rb_first(&map->pg_pools),
974 struct ceph_pg_pool_info, node);
975 __remove_pg_pool(&map->pg_pools, pi);
977 kfree(map->osd_state);
978 kfree(map->osd_weight);
979 kfree(map->osd_addr);
980 kfree(map->osd_primary_affinity);
981 kfree(map->crush_workspace);
982 kfree(map);
986 * Adjust max_osd value, (re)allocate arrays.
988 * The new elements are properly initialized.
990 static int osdmap_set_max_osd(struct ceph_osdmap *map, int max)
992 u32 *state;
993 u32 *weight;
994 struct ceph_entity_addr *addr;
995 int i;
997 state = krealloc(map->osd_state, max*sizeof(*state), GFP_NOFS);
998 if (!state)
999 return -ENOMEM;
1000 map->osd_state = state;
1002 weight = krealloc(map->osd_weight, max*sizeof(*weight), GFP_NOFS);
1003 if (!weight)
1004 return -ENOMEM;
1005 map->osd_weight = weight;
1007 addr = krealloc(map->osd_addr, max*sizeof(*addr), GFP_NOFS);
1008 if (!addr)
1009 return -ENOMEM;
1010 map->osd_addr = addr;
1012 for (i = map->max_osd; i < max; i++) {
1013 map->osd_state[i] = 0;
1014 map->osd_weight[i] = CEPH_OSD_OUT;
1015 memset(map->osd_addr + i, 0, sizeof(*map->osd_addr));
1018 if (map->osd_primary_affinity) {
1019 u32 *affinity;
1021 affinity = krealloc(map->osd_primary_affinity,
1022 max*sizeof(*affinity), GFP_NOFS);
1023 if (!affinity)
1024 return -ENOMEM;
1025 map->osd_primary_affinity = affinity;
1027 for (i = map->max_osd; i < max; i++)
1028 map->osd_primary_affinity[i] =
1029 CEPH_OSD_DEFAULT_PRIMARY_AFFINITY;
1032 map->max_osd = max;
1034 return 0;
1037 static int osdmap_set_crush(struct ceph_osdmap *map, struct crush_map *crush)
1039 void *workspace;
1040 size_t work_size;
1042 if (IS_ERR(crush))
1043 return PTR_ERR(crush);
1045 work_size = crush_work_size(crush, CEPH_PG_MAX_SIZE);
1046 dout("%s work_size %zu bytes\n", __func__, work_size);
1047 workspace = kmalloc(work_size, GFP_NOIO);
1048 if (!workspace) {
1049 crush_destroy(crush);
1050 return -ENOMEM;
1052 crush_init_workspace(crush, workspace);
1054 if (map->crush)
1055 crush_destroy(map->crush);
1056 kfree(map->crush_workspace);
1057 map->crush = crush;
1058 map->crush_workspace = workspace;
1059 return 0;
1062 #define OSDMAP_WRAPPER_COMPAT_VER 7
1063 #define OSDMAP_CLIENT_DATA_COMPAT_VER 1
1066 * Return 0 or error. On success, *v is set to 0 for old (v6) osdmaps,
1067 * to struct_v of the client_data section for new (v7 and above)
1068 * osdmaps.
1070 static int get_osdmap_client_data_v(void **p, void *end,
1071 const char *prefix, u8 *v)
1073 u8 struct_v;
1075 ceph_decode_8_safe(p, end, struct_v, e_inval);
1076 if (struct_v >= 7) {
1077 u8 struct_compat;
1079 ceph_decode_8_safe(p, end, struct_compat, e_inval);
1080 if (struct_compat > OSDMAP_WRAPPER_COMPAT_VER) {
1081 pr_warn("got v %d cv %d > %d of %s ceph_osdmap\n",
1082 struct_v, struct_compat,
1083 OSDMAP_WRAPPER_COMPAT_VER, prefix);
1084 return -EINVAL;
1086 *p += 4; /* ignore wrapper struct_len */
1088 ceph_decode_8_safe(p, end, struct_v, e_inval);
1089 ceph_decode_8_safe(p, end, struct_compat, e_inval);
1090 if (struct_compat > OSDMAP_CLIENT_DATA_COMPAT_VER) {
1091 pr_warn("got v %d cv %d > %d of %s ceph_osdmap client data\n",
1092 struct_v, struct_compat,
1093 OSDMAP_CLIENT_DATA_COMPAT_VER, prefix);
1094 return -EINVAL;
1096 *p += 4; /* ignore client data struct_len */
1097 } else {
1098 u16 version;
1100 *p -= 1;
1101 ceph_decode_16_safe(p, end, version, e_inval);
1102 if (version < 6) {
1103 pr_warn("got v %d < 6 of %s ceph_osdmap\n",
1104 version, prefix);
1105 return -EINVAL;
1108 /* old osdmap enconding */
1109 struct_v = 0;
1112 *v = struct_v;
1113 return 0;
1115 e_inval:
1116 return -EINVAL;
1119 static int __decode_pools(void **p, void *end, struct ceph_osdmap *map,
1120 bool incremental)
1122 u32 n;
1124 ceph_decode_32_safe(p, end, n, e_inval);
1125 while (n--) {
1126 struct ceph_pg_pool_info *pi;
1127 u64 pool;
1128 int ret;
1130 ceph_decode_64_safe(p, end, pool, e_inval);
1132 pi = __lookup_pg_pool(&map->pg_pools, pool);
1133 if (!incremental || !pi) {
1134 pi = kzalloc(sizeof(*pi), GFP_NOFS);
1135 if (!pi)
1136 return -ENOMEM;
1138 pi->id = pool;
1140 ret = __insert_pg_pool(&map->pg_pools, pi);
1141 if (ret) {
1142 kfree(pi);
1143 return ret;
1147 ret = decode_pool(p, end, pi);
1148 if (ret)
1149 return ret;
1152 return 0;
1154 e_inval:
1155 return -EINVAL;
1158 static int decode_pools(void **p, void *end, struct ceph_osdmap *map)
1160 return __decode_pools(p, end, map, false);
1163 static int decode_new_pools(void **p, void *end, struct ceph_osdmap *map)
1165 return __decode_pools(p, end, map, true);
1168 typedef struct ceph_pg_mapping *(*decode_mapping_fn_t)(void **, void *, bool);
1170 static int decode_pg_mapping(void **p, void *end, struct rb_root *mapping_root,
1171 decode_mapping_fn_t fn, bool incremental)
1173 u32 n;
1175 WARN_ON(!incremental && !fn);
1177 ceph_decode_32_safe(p, end, n, e_inval);
1178 while (n--) {
1179 struct ceph_pg_mapping *pg;
1180 struct ceph_pg pgid;
1181 int ret;
1183 ret = ceph_decode_pgid(p, end, &pgid);
1184 if (ret)
1185 return ret;
1187 pg = lookup_pg_mapping(mapping_root, &pgid);
1188 if (pg) {
1189 WARN_ON(!incremental);
1190 erase_pg_mapping(mapping_root, pg);
1191 free_pg_mapping(pg);
1194 if (fn) {
1195 pg = fn(p, end, incremental);
1196 if (IS_ERR(pg))
1197 return PTR_ERR(pg);
1199 if (pg) {
1200 pg->pgid = pgid; /* struct */
1201 insert_pg_mapping(mapping_root, pg);
1206 return 0;
1208 e_inval:
1209 return -EINVAL;
1212 static struct ceph_pg_mapping *__decode_pg_temp(void **p, void *end,
1213 bool incremental)
1215 struct ceph_pg_mapping *pg;
1216 u32 len, i;
1218 ceph_decode_32_safe(p, end, len, e_inval);
1219 if (len == 0 && incremental)
1220 return NULL; /* new_pg_temp: [] to remove */
1221 if (len > (SIZE_MAX - sizeof(*pg)) / sizeof(u32))
1222 return ERR_PTR(-EINVAL);
1224 ceph_decode_need(p, end, len * sizeof(u32), e_inval);
1225 pg = alloc_pg_mapping(len * sizeof(u32));
1226 if (!pg)
1227 return ERR_PTR(-ENOMEM);
1229 pg->pg_temp.len = len;
1230 for (i = 0; i < len; i++)
1231 pg->pg_temp.osds[i] = ceph_decode_32(p);
1233 return pg;
1235 e_inval:
1236 return ERR_PTR(-EINVAL);
1239 static int decode_pg_temp(void **p, void *end, struct ceph_osdmap *map)
1241 return decode_pg_mapping(p, end, &map->pg_temp, __decode_pg_temp,
1242 false);
1245 static int decode_new_pg_temp(void **p, void *end, struct ceph_osdmap *map)
1247 return decode_pg_mapping(p, end, &map->pg_temp, __decode_pg_temp,
1248 true);
1251 static struct ceph_pg_mapping *__decode_primary_temp(void **p, void *end,
1252 bool incremental)
1254 struct ceph_pg_mapping *pg;
1255 u32 osd;
1257 ceph_decode_32_safe(p, end, osd, e_inval);
1258 if (osd == (u32)-1 && incremental)
1259 return NULL; /* new_primary_temp: -1 to remove */
1261 pg = alloc_pg_mapping(0);
1262 if (!pg)
1263 return ERR_PTR(-ENOMEM);
1265 pg->primary_temp.osd = osd;
1266 return pg;
1268 e_inval:
1269 return ERR_PTR(-EINVAL);
1272 static int decode_primary_temp(void **p, void *end, struct ceph_osdmap *map)
1274 return decode_pg_mapping(p, end, &map->primary_temp,
1275 __decode_primary_temp, false);
1278 static int decode_new_primary_temp(void **p, void *end,
1279 struct ceph_osdmap *map)
1281 return decode_pg_mapping(p, end, &map->primary_temp,
1282 __decode_primary_temp, true);
1285 u32 ceph_get_primary_affinity(struct ceph_osdmap *map, int osd)
1287 BUG_ON(osd >= map->max_osd);
1289 if (!map->osd_primary_affinity)
1290 return CEPH_OSD_DEFAULT_PRIMARY_AFFINITY;
1292 return map->osd_primary_affinity[osd];
1295 static int set_primary_affinity(struct ceph_osdmap *map, int osd, u32 aff)
1297 BUG_ON(osd >= map->max_osd);
1299 if (!map->osd_primary_affinity) {
1300 int i;
1302 map->osd_primary_affinity = kmalloc_array(map->max_osd,
1303 sizeof(u32),
1304 GFP_NOFS);
1305 if (!map->osd_primary_affinity)
1306 return -ENOMEM;
1308 for (i = 0; i < map->max_osd; i++)
1309 map->osd_primary_affinity[i] =
1310 CEPH_OSD_DEFAULT_PRIMARY_AFFINITY;
1313 map->osd_primary_affinity[osd] = aff;
1315 return 0;
1318 static int decode_primary_affinity(void **p, void *end,
1319 struct ceph_osdmap *map)
1321 u32 len, i;
1323 ceph_decode_32_safe(p, end, len, e_inval);
1324 if (len == 0) {
1325 kfree(map->osd_primary_affinity);
1326 map->osd_primary_affinity = NULL;
1327 return 0;
1329 if (len != map->max_osd)
1330 goto e_inval;
1332 ceph_decode_need(p, end, map->max_osd*sizeof(u32), e_inval);
1334 for (i = 0; i < map->max_osd; i++) {
1335 int ret;
1337 ret = set_primary_affinity(map, i, ceph_decode_32(p));
1338 if (ret)
1339 return ret;
1342 return 0;
1344 e_inval:
1345 return -EINVAL;
1348 static int decode_new_primary_affinity(void **p, void *end,
1349 struct ceph_osdmap *map)
1351 u32 n;
1353 ceph_decode_32_safe(p, end, n, e_inval);
1354 while (n--) {
1355 u32 osd, aff;
1356 int ret;
1358 ceph_decode_32_safe(p, end, osd, e_inval);
1359 ceph_decode_32_safe(p, end, aff, e_inval);
1361 ret = set_primary_affinity(map, osd, aff);
1362 if (ret)
1363 return ret;
1365 pr_info("osd%d primary-affinity 0x%x\n", osd, aff);
1368 return 0;
1370 e_inval:
1371 return -EINVAL;
1374 static struct ceph_pg_mapping *__decode_pg_upmap(void **p, void *end,
1375 bool __unused)
1377 return __decode_pg_temp(p, end, false);
1380 static int decode_pg_upmap(void **p, void *end, struct ceph_osdmap *map)
1382 return decode_pg_mapping(p, end, &map->pg_upmap, __decode_pg_upmap,
1383 false);
1386 static int decode_new_pg_upmap(void **p, void *end, struct ceph_osdmap *map)
1388 return decode_pg_mapping(p, end, &map->pg_upmap, __decode_pg_upmap,
1389 true);
1392 static int decode_old_pg_upmap(void **p, void *end, struct ceph_osdmap *map)
1394 return decode_pg_mapping(p, end, &map->pg_upmap, NULL, true);
1397 static struct ceph_pg_mapping *__decode_pg_upmap_items(void **p, void *end,
1398 bool __unused)
1400 struct ceph_pg_mapping *pg;
1401 u32 len, i;
1403 ceph_decode_32_safe(p, end, len, e_inval);
1404 if (len > (SIZE_MAX - sizeof(*pg)) / (2 * sizeof(u32)))
1405 return ERR_PTR(-EINVAL);
1407 ceph_decode_need(p, end, 2 * len * sizeof(u32), e_inval);
1408 pg = alloc_pg_mapping(2 * len * sizeof(u32));
1409 if (!pg)
1410 return ERR_PTR(-ENOMEM);
1412 pg->pg_upmap_items.len = len;
1413 for (i = 0; i < len; i++) {
1414 pg->pg_upmap_items.from_to[i][0] = ceph_decode_32(p);
1415 pg->pg_upmap_items.from_to[i][1] = ceph_decode_32(p);
1418 return pg;
1420 e_inval:
1421 return ERR_PTR(-EINVAL);
1424 static int decode_pg_upmap_items(void **p, void *end, struct ceph_osdmap *map)
1426 return decode_pg_mapping(p, end, &map->pg_upmap_items,
1427 __decode_pg_upmap_items, false);
1430 static int decode_new_pg_upmap_items(void **p, void *end,
1431 struct ceph_osdmap *map)
1433 return decode_pg_mapping(p, end, &map->pg_upmap_items,
1434 __decode_pg_upmap_items, true);
1437 static int decode_old_pg_upmap_items(void **p, void *end,
1438 struct ceph_osdmap *map)
1440 return decode_pg_mapping(p, end, &map->pg_upmap_items, NULL, true);
1444 * decode a full map.
1446 static int osdmap_decode(void **p, void *end, struct ceph_osdmap *map)
1448 u8 struct_v;
1449 u32 epoch = 0;
1450 void *start = *p;
1451 u32 max;
1452 u32 len, i;
1453 int err;
1455 dout("%s %p to %p len %d\n", __func__, *p, end, (int)(end - *p));
1457 err = get_osdmap_client_data_v(p, end, "full", &struct_v);
1458 if (err)
1459 goto bad;
1461 /* fsid, epoch, created, modified */
1462 ceph_decode_need(p, end, sizeof(map->fsid) + sizeof(u32) +
1463 sizeof(map->created) + sizeof(map->modified), e_inval);
1464 ceph_decode_copy(p, &map->fsid, sizeof(map->fsid));
1465 epoch = map->epoch = ceph_decode_32(p);
1466 ceph_decode_copy(p, &map->created, sizeof(map->created));
1467 ceph_decode_copy(p, &map->modified, sizeof(map->modified));
1469 /* pools */
1470 err = decode_pools(p, end, map);
1471 if (err)
1472 goto bad;
1474 /* pool_name */
1475 err = decode_pool_names(p, end, map);
1476 if (err)
1477 goto bad;
1479 ceph_decode_32_safe(p, end, map->pool_max, e_inval);
1481 ceph_decode_32_safe(p, end, map->flags, e_inval);
1483 /* max_osd */
1484 ceph_decode_32_safe(p, end, max, e_inval);
1486 /* (re)alloc osd arrays */
1487 err = osdmap_set_max_osd(map, max);
1488 if (err)
1489 goto bad;
1491 /* osd_state, osd_weight, osd_addrs->client_addr */
1492 ceph_decode_need(p, end, 3*sizeof(u32) +
1493 map->max_osd*((struct_v >= 5 ? sizeof(u32) :
1494 sizeof(u8)) +
1495 sizeof(*map->osd_weight) +
1496 sizeof(*map->osd_addr)), e_inval);
1498 if (ceph_decode_32(p) != map->max_osd)
1499 goto e_inval;
1501 if (struct_v >= 5) {
1502 for (i = 0; i < map->max_osd; i++)
1503 map->osd_state[i] = ceph_decode_32(p);
1504 } else {
1505 for (i = 0; i < map->max_osd; i++)
1506 map->osd_state[i] = ceph_decode_8(p);
1509 if (ceph_decode_32(p) != map->max_osd)
1510 goto e_inval;
1512 for (i = 0; i < map->max_osd; i++)
1513 map->osd_weight[i] = ceph_decode_32(p);
1515 if (ceph_decode_32(p) != map->max_osd)
1516 goto e_inval;
1518 ceph_decode_copy(p, map->osd_addr, map->max_osd*sizeof(*map->osd_addr));
1519 for (i = 0; i < map->max_osd; i++)
1520 ceph_decode_addr(&map->osd_addr[i]);
1522 /* pg_temp */
1523 err = decode_pg_temp(p, end, map);
1524 if (err)
1525 goto bad;
1527 /* primary_temp */
1528 if (struct_v >= 1) {
1529 err = decode_primary_temp(p, end, map);
1530 if (err)
1531 goto bad;
1534 /* primary_affinity */
1535 if (struct_v >= 2) {
1536 err = decode_primary_affinity(p, end, map);
1537 if (err)
1538 goto bad;
1539 } else {
1540 WARN_ON(map->osd_primary_affinity);
1543 /* crush */
1544 ceph_decode_32_safe(p, end, len, e_inval);
1545 err = osdmap_set_crush(map, crush_decode(*p, min(*p + len, end)));
1546 if (err)
1547 goto bad;
1549 *p += len;
1550 if (struct_v >= 3) {
1551 /* erasure_code_profiles */
1552 ceph_decode_skip_map_of_map(p, end, string, string, string,
1553 e_inval);
1556 if (struct_v >= 4) {
1557 err = decode_pg_upmap(p, end, map);
1558 if (err)
1559 goto bad;
1561 err = decode_pg_upmap_items(p, end, map);
1562 if (err)
1563 goto bad;
1564 } else {
1565 WARN_ON(!RB_EMPTY_ROOT(&map->pg_upmap));
1566 WARN_ON(!RB_EMPTY_ROOT(&map->pg_upmap_items));
1569 /* ignore the rest */
1570 *p = end;
1572 dout("full osdmap epoch %d max_osd %d\n", map->epoch, map->max_osd);
1573 return 0;
1575 e_inval:
1576 err = -EINVAL;
1577 bad:
1578 pr_err("corrupt full osdmap (%d) epoch %d off %d (%p of %p-%p)\n",
1579 err, epoch, (int)(*p - start), *p, start, end);
1580 print_hex_dump(KERN_DEBUG, "osdmap: ",
1581 DUMP_PREFIX_OFFSET, 16, 1,
1582 start, end - start, true);
1583 return err;
1587 * Allocate and decode a full map.
1589 struct ceph_osdmap *ceph_osdmap_decode(void **p, void *end)
1591 struct ceph_osdmap *map;
1592 int ret;
1594 map = ceph_osdmap_alloc();
1595 if (!map)
1596 return ERR_PTR(-ENOMEM);
1598 ret = osdmap_decode(p, end, map);
1599 if (ret) {
1600 ceph_osdmap_destroy(map);
1601 return ERR_PTR(ret);
1604 return map;
1608 * Encoding order is (new_up_client, new_state, new_weight). Need to
1609 * apply in the (new_weight, new_state, new_up_client) order, because
1610 * an incremental map may look like e.g.
1612 * new_up_client: { osd=6, addr=... } # set osd_state and addr
1613 * new_state: { osd=6, xorstate=EXISTS } # clear osd_state
1615 static int decode_new_up_state_weight(void **p, void *end, u8 struct_v,
1616 struct ceph_osdmap *map)
1618 void *new_up_client;
1619 void *new_state;
1620 void *new_weight_end;
1621 u32 len;
1623 new_up_client = *p;
1624 ceph_decode_32_safe(p, end, len, e_inval);
1625 len *= sizeof(u32) + sizeof(struct ceph_entity_addr);
1626 ceph_decode_need(p, end, len, e_inval);
1627 *p += len;
1629 new_state = *p;
1630 ceph_decode_32_safe(p, end, len, e_inval);
1631 len *= sizeof(u32) + (struct_v >= 5 ? sizeof(u32) : sizeof(u8));
1632 ceph_decode_need(p, end, len, e_inval);
1633 *p += len;
1635 /* new_weight */
1636 ceph_decode_32_safe(p, end, len, e_inval);
1637 while (len--) {
1638 s32 osd;
1639 u32 w;
1641 ceph_decode_need(p, end, 2*sizeof(u32), e_inval);
1642 osd = ceph_decode_32(p);
1643 w = ceph_decode_32(p);
1644 BUG_ON(osd >= map->max_osd);
1645 pr_info("osd%d weight 0x%x %s\n", osd, w,
1646 w == CEPH_OSD_IN ? "(in)" :
1647 (w == CEPH_OSD_OUT ? "(out)" : ""));
1648 map->osd_weight[osd] = w;
1651 * If we are marking in, set the EXISTS, and clear the
1652 * AUTOOUT and NEW bits.
1654 if (w) {
1655 map->osd_state[osd] |= CEPH_OSD_EXISTS;
1656 map->osd_state[osd] &= ~(CEPH_OSD_AUTOOUT |
1657 CEPH_OSD_NEW);
1660 new_weight_end = *p;
1662 /* new_state (up/down) */
1663 *p = new_state;
1664 len = ceph_decode_32(p);
1665 while (len--) {
1666 s32 osd;
1667 u32 xorstate;
1668 int ret;
1670 osd = ceph_decode_32(p);
1671 if (struct_v >= 5)
1672 xorstate = ceph_decode_32(p);
1673 else
1674 xorstate = ceph_decode_8(p);
1675 if (xorstate == 0)
1676 xorstate = CEPH_OSD_UP;
1677 BUG_ON(osd >= map->max_osd);
1678 if ((map->osd_state[osd] & CEPH_OSD_UP) &&
1679 (xorstate & CEPH_OSD_UP))
1680 pr_info("osd%d down\n", osd);
1681 if ((map->osd_state[osd] & CEPH_OSD_EXISTS) &&
1682 (xorstate & CEPH_OSD_EXISTS)) {
1683 pr_info("osd%d does not exist\n", osd);
1684 ret = set_primary_affinity(map, osd,
1685 CEPH_OSD_DEFAULT_PRIMARY_AFFINITY);
1686 if (ret)
1687 return ret;
1688 memset(map->osd_addr + osd, 0, sizeof(*map->osd_addr));
1689 map->osd_state[osd] = 0;
1690 } else {
1691 map->osd_state[osd] ^= xorstate;
1695 /* new_up_client */
1696 *p = new_up_client;
1697 len = ceph_decode_32(p);
1698 while (len--) {
1699 s32 osd;
1700 struct ceph_entity_addr addr;
1702 osd = ceph_decode_32(p);
1703 ceph_decode_copy(p, &addr, sizeof(addr));
1704 ceph_decode_addr(&addr);
1705 BUG_ON(osd >= map->max_osd);
1706 pr_info("osd%d up\n", osd);
1707 map->osd_state[osd] |= CEPH_OSD_EXISTS | CEPH_OSD_UP;
1708 map->osd_addr[osd] = addr;
1711 *p = new_weight_end;
1712 return 0;
1714 e_inval:
1715 return -EINVAL;
1719 * decode and apply an incremental map update.
1721 struct ceph_osdmap *osdmap_apply_incremental(void **p, void *end,
1722 struct ceph_osdmap *map)
1724 struct ceph_fsid fsid;
1725 u32 epoch = 0;
1726 struct ceph_timespec modified;
1727 s32 len;
1728 u64 pool;
1729 __s64 new_pool_max;
1730 __s32 new_flags, max;
1731 void *start = *p;
1732 int err;
1733 u8 struct_v;
1735 dout("%s %p to %p len %d\n", __func__, *p, end, (int)(end - *p));
1737 err = get_osdmap_client_data_v(p, end, "inc", &struct_v);
1738 if (err)
1739 goto bad;
1741 /* fsid, epoch, modified, new_pool_max, new_flags */
1742 ceph_decode_need(p, end, sizeof(fsid) + sizeof(u32) + sizeof(modified) +
1743 sizeof(u64) + sizeof(u32), e_inval);
1744 ceph_decode_copy(p, &fsid, sizeof(fsid));
1745 epoch = ceph_decode_32(p);
1746 BUG_ON(epoch != map->epoch+1);
1747 ceph_decode_copy(p, &modified, sizeof(modified));
1748 new_pool_max = ceph_decode_64(p);
1749 new_flags = ceph_decode_32(p);
1751 /* full map? */
1752 ceph_decode_32_safe(p, end, len, e_inval);
1753 if (len > 0) {
1754 dout("apply_incremental full map len %d, %p to %p\n",
1755 len, *p, end);
1756 return ceph_osdmap_decode(p, min(*p+len, end));
1759 /* new crush? */
1760 ceph_decode_32_safe(p, end, len, e_inval);
1761 if (len > 0) {
1762 err = osdmap_set_crush(map,
1763 crush_decode(*p, min(*p + len, end)));
1764 if (err)
1765 goto bad;
1766 *p += len;
1769 /* new flags? */
1770 if (new_flags >= 0)
1771 map->flags = new_flags;
1772 if (new_pool_max >= 0)
1773 map->pool_max = new_pool_max;
1775 /* new max? */
1776 ceph_decode_32_safe(p, end, max, e_inval);
1777 if (max >= 0) {
1778 err = osdmap_set_max_osd(map, max);
1779 if (err)
1780 goto bad;
1783 map->epoch++;
1784 map->modified = modified;
1786 /* new_pools */
1787 err = decode_new_pools(p, end, map);
1788 if (err)
1789 goto bad;
1791 /* new_pool_names */
1792 err = decode_pool_names(p, end, map);
1793 if (err)
1794 goto bad;
1796 /* old_pool */
1797 ceph_decode_32_safe(p, end, len, e_inval);
1798 while (len--) {
1799 struct ceph_pg_pool_info *pi;
1801 ceph_decode_64_safe(p, end, pool, e_inval);
1802 pi = __lookup_pg_pool(&map->pg_pools, pool);
1803 if (pi)
1804 __remove_pg_pool(&map->pg_pools, pi);
1807 /* new_up_client, new_state, new_weight */
1808 err = decode_new_up_state_weight(p, end, struct_v, map);
1809 if (err)
1810 goto bad;
1812 /* new_pg_temp */
1813 err = decode_new_pg_temp(p, end, map);
1814 if (err)
1815 goto bad;
1817 /* new_primary_temp */
1818 if (struct_v >= 1) {
1819 err = decode_new_primary_temp(p, end, map);
1820 if (err)
1821 goto bad;
1824 /* new_primary_affinity */
1825 if (struct_v >= 2) {
1826 err = decode_new_primary_affinity(p, end, map);
1827 if (err)
1828 goto bad;
1831 if (struct_v >= 3) {
1832 /* new_erasure_code_profiles */
1833 ceph_decode_skip_map_of_map(p, end, string, string, string,
1834 e_inval);
1835 /* old_erasure_code_profiles */
1836 ceph_decode_skip_set(p, end, string, e_inval);
1839 if (struct_v >= 4) {
1840 err = decode_new_pg_upmap(p, end, map);
1841 if (err)
1842 goto bad;
1844 err = decode_old_pg_upmap(p, end, map);
1845 if (err)
1846 goto bad;
1848 err = decode_new_pg_upmap_items(p, end, map);
1849 if (err)
1850 goto bad;
1852 err = decode_old_pg_upmap_items(p, end, map);
1853 if (err)
1854 goto bad;
1857 /* ignore the rest */
1858 *p = end;
1860 dout("inc osdmap epoch %d max_osd %d\n", map->epoch, map->max_osd);
1861 return map;
1863 e_inval:
1864 err = -EINVAL;
1865 bad:
1866 pr_err("corrupt inc osdmap (%d) epoch %d off %d (%p of %p-%p)\n",
1867 err, epoch, (int)(*p - start), *p, start, end);
1868 print_hex_dump(KERN_DEBUG, "osdmap: ",
1869 DUMP_PREFIX_OFFSET, 16, 1,
1870 start, end - start, true);
1871 return ERR_PTR(err);
1874 void ceph_oloc_copy(struct ceph_object_locator *dest,
1875 const struct ceph_object_locator *src)
1877 ceph_oloc_destroy(dest);
1879 dest->pool = src->pool;
1880 if (src->pool_ns)
1881 dest->pool_ns = ceph_get_string(src->pool_ns);
1882 else
1883 dest->pool_ns = NULL;
1885 EXPORT_SYMBOL(ceph_oloc_copy);
1887 void ceph_oloc_destroy(struct ceph_object_locator *oloc)
1889 ceph_put_string(oloc->pool_ns);
1891 EXPORT_SYMBOL(ceph_oloc_destroy);
1893 void ceph_oid_copy(struct ceph_object_id *dest,
1894 const struct ceph_object_id *src)
1896 ceph_oid_destroy(dest);
1898 if (src->name != src->inline_name) {
1899 /* very rare, see ceph_object_id definition */
1900 dest->name = kmalloc(src->name_len + 1,
1901 GFP_NOIO | __GFP_NOFAIL);
1902 } else {
1903 dest->name = dest->inline_name;
1905 memcpy(dest->name, src->name, src->name_len + 1);
1906 dest->name_len = src->name_len;
1908 EXPORT_SYMBOL(ceph_oid_copy);
1910 static __printf(2, 0)
1911 int oid_printf_vargs(struct ceph_object_id *oid, const char *fmt, va_list ap)
1913 int len;
1915 WARN_ON(!ceph_oid_empty(oid));
1917 len = vsnprintf(oid->inline_name, sizeof(oid->inline_name), fmt, ap);
1918 if (len >= sizeof(oid->inline_name))
1919 return len;
1921 oid->name_len = len;
1922 return 0;
1926 * If oid doesn't fit into inline buffer, BUG.
1928 void ceph_oid_printf(struct ceph_object_id *oid, const char *fmt, ...)
1930 va_list ap;
1932 va_start(ap, fmt);
1933 BUG_ON(oid_printf_vargs(oid, fmt, ap));
1934 va_end(ap);
1936 EXPORT_SYMBOL(ceph_oid_printf);
1938 static __printf(3, 0)
1939 int oid_aprintf_vargs(struct ceph_object_id *oid, gfp_t gfp,
1940 const char *fmt, va_list ap)
1942 va_list aq;
1943 int len;
1945 va_copy(aq, ap);
1946 len = oid_printf_vargs(oid, fmt, aq);
1947 va_end(aq);
1949 if (len) {
1950 char *external_name;
1952 external_name = kmalloc(len + 1, gfp);
1953 if (!external_name)
1954 return -ENOMEM;
1956 oid->name = external_name;
1957 WARN_ON(vsnprintf(oid->name, len + 1, fmt, ap) != len);
1958 oid->name_len = len;
1961 return 0;
1965 * If oid doesn't fit into inline buffer, allocate.
1967 int ceph_oid_aprintf(struct ceph_object_id *oid, gfp_t gfp,
1968 const char *fmt, ...)
1970 va_list ap;
1971 int ret;
1973 va_start(ap, fmt);
1974 ret = oid_aprintf_vargs(oid, gfp, fmt, ap);
1975 va_end(ap);
1977 return ret;
1979 EXPORT_SYMBOL(ceph_oid_aprintf);
1981 void ceph_oid_destroy(struct ceph_object_id *oid)
1983 if (oid->name != oid->inline_name)
1984 kfree(oid->name);
1986 EXPORT_SYMBOL(ceph_oid_destroy);
1989 * osds only
1991 static bool __osds_equal(const struct ceph_osds *lhs,
1992 const struct ceph_osds *rhs)
1994 if (lhs->size == rhs->size &&
1995 !memcmp(lhs->osds, rhs->osds, rhs->size * sizeof(rhs->osds[0])))
1996 return true;
1998 return false;
2002 * osds + primary
2004 static bool osds_equal(const struct ceph_osds *lhs,
2005 const struct ceph_osds *rhs)
2007 if (__osds_equal(lhs, rhs) &&
2008 lhs->primary == rhs->primary)
2009 return true;
2011 return false;
2014 static bool osds_valid(const struct ceph_osds *set)
2016 /* non-empty set */
2017 if (set->size > 0 && set->primary >= 0)
2018 return true;
2020 /* empty can_shift_osds set */
2021 if (!set->size && set->primary == -1)
2022 return true;
2024 /* empty !can_shift_osds set - all NONE */
2025 if (set->size > 0 && set->primary == -1) {
2026 int i;
2028 for (i = 0; i < set->size; i++) {
2029 if (set->osds[i] != CRUSH_ITEM_NONE)
2030 break;
2032 if (i == set->size)
2033 return true;
2036 return false;
2039 void ceph_osds_copy(struct ceph_osds *dest, const struct ceph_osds *src)
2041 memcpy(dest->osds, src->osds, src->size * sizeof(src->osds[0]));
2042 dest->size = src->size;
2043 dest->primary = src->primary;
2046 bool ceph_pg_is_split(const struct ceph_pg *pgid, u32 old_pg_num,
2047 u32 new_pg_num)
2049 int old_bits = calc_bits_of(old_pg_num);
2050 int old_mask = (1 << old_bits) - 1;
2051 int n;
2053 WARN_ON(pgid->seed >= old_pg_num);
2054 if (new_pg_num <= old_pg_num)
2055 return false;
2057 for (n = 1; ; n++) {
2058 int next_bit = n << (old_bits - 1);
2059 u32 s = next_bit | pgid->seed;
2061 if (s < old_pg_num || s == pgid->seed)
2062 continue;
2063 if (s >= new_pg_num)
2064 break;
2066 s = ceph_stable_mod(s, old_pg_num, old_mask);
2067 if (s == pgid->seed)
2068 return true;
2071 return false;
2074 bool ceph_is_new_interval(const struct ceph_osds *old_acting,
2075 const struct ceph_osds *new_acting,
2076 const struct ceph_osds *old_up,
2077 const struct ceph_osds *new_up,
2078 int old_size,
2079 int new_size,
2080 int old_min_size,
2081 int new_min_size,
2082 u32 old_pg_num,
2083 u32 new_pg_num,
2084 bool old_sort_bitwise,
2085 bool new_sort_bitwise,
2086 bool old_recovery_deletes,
2087 bool new_recovery_deletes,
2088 const struct ceph_pg *pgid)
2090 return !osds_equal(old_acting, new_acting) ||
2091 !osds_equal(old_up, new_up) ||
2092 old_size != new_size ||
2093 old_min_size != new_min_size ||
2094 ceph_pg_is_split(pgid, old_pg_num, new_pg_num) ||
2095 old_sort_bitwise != new_sort_bitwise ||
2096 old_recovery_deletes != new_recovery_deletes;
2099 static int calc_pg_rank(int osd, const struct ceph_osds *acting)
2101 int i;
2103 for (i = 0; i < acting->size; i++) {
2104 if (acting->osds[i] == osd)
2105 return i;
2108 return -1;
2111 static bool primary_changed(const struct ceph_osds *old_acting,
2112 const struct ceph_osds *new_acting)
2114 if (!old_acting->size && !new_acting->size)
2115 return false; /* both still empty */
2117 if (!old_acting->size ^ !new_acting->size)
2118 return true; /* was empty, now not, or vice versa */
2120 if (old_acting->primary != new_acting->primary)
2121 return true; /* primary changed */
2123 if (calc_pg_rank(old_acting->primary, old_acting) !=
2124 calc_pg_rank(new_acting->primary, new_acting))
2125 return true;
2127 return false; /* same primary (tho replicas may have changed) */
2130 bool ceph_osds_changed(const struct ceph_osds *old_acting,
2131 const struct ceph_osds *new_acting,
2132 bool any_change)
2134 if (primary_changed(old_acting, new_acting))
2135 return true;
2137 if (any_change && !__osds_equal(old_acting, new_acting))
2138 return true;
2140 return false;
2144 * Map an object into a PG.
2146 * Should only be called with target_oid and target_oloc (as opposed to
2147 * base_oid and base_oloc), since tiering isn't taken into account.
2149 void __ceph_object_locator_to_pg(struct ceph_pg_pool_info *pi,
2150 const struct ceph_object_id *oid,
2151 const struct ceph_object_locator *oloc,
2152 struct ceph_pg *raw_pgid)
2154 WARN_ON(pi->id != oloc->pool);
2156 if (!oloc->pool_ns) {
2157 raw_pgid->pool = oloc->pool;
2158 raw_pgid->seed = ceph_str_hash(pi->object_hash, oid->name,
2159 oid->name_len);
2160 dout("%s %s -> raw_pgid %llu.%x\n", __func__, oid->name,
2161 raw_pgid->pool, raw_pgid->seed);
2162 } else {
2163 char stack_buf[256];
2164 char *buf = stack_buf;
2165 int nsl = oloc->pool_ns->len;
2166 size_t total = nsl + 1 + oid->name_len;
2168 if (total > sizeof(stack_buf))
2169 buf = kmalloc(total, GFP_NOIO | __GFP_NOFAIL);
2170 memcpy(buf, oloc->pool_ns->str, nsl);
2171 buf[nsl] = '\037';
2172 memcpy(buf + nsl + 1, oid->name, oid->name_len);
2173 raw_pgid->pool = oloc->pool;
2174 raw_pgid->seed = ceph_str_hash(pi->object_hash, buf, total);
2175 if (buf != stack_buf)
2176 kfree(buf);
2177 dout("%s %s ns %.*s -> raw_pgid %llu.%x\n", __func__,
2178 oid->name, nsl, oloc->pool_ns->str,
2179 raw_pgid->pool, raw_pgid->seed);
2183 int ceph_object_locator_to_pg(struct ceph_osdmap *osdmap,
2184 const struct ceph_object_id *oid,
2185 const struct ceph_object_locator *oloc,
2186 struct ceph_pg *raw_pgid)
2188 struct ceph_pg_pool_info *pi;
2190 pi = ceph_pg_pool_by_id(osdmap, oloc->pool);
2191 if (!pi)
2192 return -ENOENT;
2194 __ceph_object_locator_to_pg(pi, oid, oloc, raw_pgid);
2195 return 0;
2197 EXPORT_SYMBOL(ceph_object_locator_to_pg);
2200 * Map a raw PG (full precision ps) into an actual PG.
2202 static void raw_pg_to_pg(struct ceph_pg_pool_info *pi,
2203 const struct ceph_pg *raw_pgid,
2204 struct ceph_pg *pgid)
2206 pgid->pool = raw_pgid->pool;
2207 pgid->seed = ceph_stable_mod(raw_pgid->seed, pi->pg_num,
2208 pi->pg_num_mask);
2212 * Map a raw PG (full precision ps) into a placement ps (placement
2213 * seed). Include pool id in that value so that different pools don't
2214 * use the same seeds.
2216 static u32 raw_pg_to_pps(struct ceph_pg_pool_info *pi,
2217 const struct ceph_pg *raw_pgid)
2219 if (pi->flags & CEPH_POOL_FLAG_HASHPSPOOL) {
2220 /* hash pool id and seed so that pool PGs do not overlap */
2221 return crush_hash32_2(CRUSH_HASH_RJENKINS1,
2222 ceph_stable_mod(raw_pgid->seed,
2223 pi->pgp_num,
2224 pi->pgp_num_mask),
2225 raw_pgid->pool);
2226 } else {
2228 * legacy behavior: add ps and pool together. this is
2229 * not a great approach because the PGs from each pool
2230 * will overlap on top of each other: 0.5 == 1.4 ==
2231 * 2.3 == ...
2233 return ceph_stable_mod(raw_pgid->seed, pi->pgp_num,
2234 pi->pgp_num_mask) +
2235 (unsigned)raw_pgid->pool;
2240 * Magic value used for a "default" fallback choose_args, used if the
2241 * crush_choose_arg_map passed to do_crush() does not exist. If this
2242 * also doesn't exist, fall back to canonical weights.
2244 #define CEPH_DEFAULT_CHOOSE_ARGS -1
2246 static int do_crush(struct ceph_osdmap *map, int ruleno, int x,
2247 int *result, int result_max,
2248 const __u32 *weight, int weight_max,
2249 s64 choose_args_index)
2251 struct crush_choose_arg_map *arg_map;
2252 int r;
2254 BUG_ON(result_max > CEPH_PG_MAX_SIZE);
2256 arg_map = lookup_choose_arg_map(&map->crush->choose_args,
2257 choose_args_index);
2258 if (!arg_map)
2259 arg_map = lookup_choose_arg_map(&map->crush->choose_args,
2260 CEPH_DEFAULT_CHOOSE_ARGS);
2262 mutex_lock(&map->crush_workspace_mutex);
2263 r = crush_do_rule(map->crush, ruleno, x, result, result_max,
2264 weight, weight_max, map->crush_workspace,
2265 arg_map ? arg_map->args : NULL);
2266 mutex_unlock(&map->crush_workspace_mutex);
2268 return r;
2271 static void remove_nonexistent_osds(struct ceph_osdmap *osdmap,
2272 struct ceph_pg_pool_info *pi,
2273 struct ceph_osds *set)
2275 int i;
2277 if (ceph_can_shift_osds(pi)) {
2278 int removed = 0;
2280 /* shift left */
2281 for (i = 0; i < set->size; i++) {
2282 if (!ceph_osd_exists(osdmap, set->osds[i])) {
2283 removed++;
2284 continue;
2286 if (removed)
2287 set->osds[i - removed] = set->osds[i];
2289 set->size -= removed;
2290 } else {
2291 /* set dne devices to NONE */
2292 for (i = 0; i < set->size; i++) {
2293 if (!ceph_osd_exists(osdmap, set->osds[i]))
2294 set->osds[i] = CRUSH_ITEM_NONE;
2300 * Calculate raw set (CRUSH output) for given PG and filter out
2301 * nonexistent OSDs. ->primary is undefined for a raw set.
2303 * Placement seed (CRUSH input) is returned through @ppps.
2305 static void pg_to_raw_osds(struct ceph_osdmap *osdmap,
2306 struct ceph_pg_pool_info *pi,
2307 const struct ceph_pg *raw_pgid,
2308 struct ceph_osds *raw,
2309 u32 *ppps)
2311 u32 pps = raw_pg_to_pps(pi, raw_pgid);
2312 int ruleno;
2313 int len;
2315 ceph_osds_init(raw);
2316 if (ppps)
2317 *ppps = pps;
2319 ruleno = crush_find_rule(osdmap->crush, pi->crush_ruleset, pi->type,
2320 pi->size);
2321 if (ruleno < 0) {
2322 pr_err("no crush rule: pool %lld ruleset %d type %d size %d\n",
2323 pi->id, pi->crush_ruleset, pi->type, pi->size);
2324 return;
2327 if (pi->size > ARRAY_SIZE(raw->osds)) {
2328 pr_err_ratelimited("pool %lld ruleset %d type %d too wide: size %d > %zu\n",
2329 pi->id, pi->crush_ruleset, pi->type, pi->size,
2330 ARRAY_SIZE(raw->osds));
2331 return;
2334 len = do_crush(osdmap, ruleno, pps, raw->osds, pi->size,
2335 osdmap->osd_weight, osdmap->max_osd, pi->id);
2336 if (len < 0) {
2337 pr_err("error %d from crush rule %d: pool %lld ruleset %d type %d size %d\n",
2338 len, ruleno, pi->id, pi->crush_ruleset, pi->type,
2339 pi->size);
2340 return;
2343 raw->size = len;
2344 remove_nonexistent_osds(osdmap, pi, raw);
2347 /* apply pg_upmap[_items] mappings */
2348 static void apply_upmap(struct ceph_osdmap *osdmap,
2349 const struct ceph_pg *pgid,
2350 struct ceph_osds *raw)
2352 struct ceph_pg_mapping *pg;
2353 int i, j;
2355 pg = lookup_pg_mapping(&osdmap->pg_upmap, pgid);
2356 if (pg) {
2357 /* make sure targets aren't marked out */
2358 for (i = 0; i < pg->pg_upmap.len; i++) {
2359 int osd = pg->pg_upmap.osds[i];
2361 if (osd != CRUSH_ITEM_NONE &&
2362 osd < osdmap->max_osd &&
2363 osdmap->osd_weight[osd] == 0) {
2364 /* reject/ignore explicit mapping */
2365 return;
2368 for (i = 0; i < pg->pg_upmap.len; i++)
2369 raw->osds[i] = pg->pg_upmap.osds[i];
2370 raw->size = pg->pg_upmap.len;
2371 /* check and apply pg_upmap_items, if any */
2374 pg = lookup_pg_mapping(&osdmap->pg_upmap_items, pgid);
2375 if (pg) {
2377 * Note: this approach does not allow a bidirectional swap,
2378 * e.g., [[1,2],[2,1]] applied to [0,1,2] -> [0,2,1].
2380 for (i = 0; i < pg->pg_upmap_items.len; i++) {
2381 int from = pg->pg_upmap_items.from_to[i][0];
2382 int to = pg->pg_upmap_items.from_to[i][1];
2383 int pos = -1;
2384 bool exists = false;
2386 /* make sure replacement doesn't already appear */
2387 for (j = 0; j < raw->size; j++) {
2388 int osd = raw->osds[j];
2390 if (osd == to) {
2391 exists = true;
2392 break;
2394 /* ignore mapping if target is marked out */
2395 if (osd == from && pos < 0 &&
2396 !(to != CRUSH_ITEM_NONE &&
2397 to < osdmap->max_osd &&
2398 osdmap->osd_weight[to] == 0)) {
2399 pos = j;
2402 if (!exists && pos >= 0)
2403 raw->osds[pos] = to;
2409 * Given raw set, calculate up set and up primary. By definition of an
2410 * up set, the result won't contain nonexistent or down OSDs.
2412 * This is done in-place - on return @set is the up set. If it's
2413 * empty, ->primary will remain undefined.
2415 static void raw_to_up_osds(struct ceph_osdmap *osdmap,
2416 struct ceph_pg_pool_info *pi,
2417 struct ceph_osds *set)
2419 int i;
2421 /* ->primary is undefined for a raw set */
2422 BUG_ON(set->primary != -1);
2424 if (ceph_can_shift_osds(pi)) {
2425 int removed = 0;
2427 /* shift left */
2428 for (i = 0; i < set->size; i++) {
2429 if (ceph_osd_is_down(osdmap, set->osds[i])) {
2430 removed++;
2431 continue;
2433 if (removed)
2434 set->osds[i - removed] = set->osds[i];
2436 set->size -= removed;
2437 if (set->size > 0)
2438 set->primary = set->osds[0];
2439 } else {
2440 /* set down/dne devices to NONE */
2441 for (i = set->size - 1; i >= 0; i--) {
2442 if (ceph_osd_is_down(osdmap, set->osds[i]))
2443 set->osds[i] = CRUSH_ITEM_NONE;
2444 else
2445 set->primary = set->osds[i];
2450 static void apply_primary_affinity(struct ceph_osdmap *osdmap,
2451 struct ceph_pg_pool_info *pi,
2452 u32 pps,
2453 struct ceph_osds *up)
2455 int i;
2456 int pos = -1;
2459 * Do we have any non-default primary_affinity values for these
2460 * osds?
2462 if (!osdmap->osd_primary_affinity)
2463 return;
2465 for (i = 0; i < up->size; i++) {
2466 int osd = up->osds[i];
2468 if (osd != CRUSH_ITEM_NONE &&
2469 osdmap->osd_primary_affinity[osd] !=
2470 CEPH_OSD_DEFAULT_PRIMARY_AFFINITY) {
2471 break;
2474 if (i == up->size)
2475 return;
2478 * Pick the primary. Feed both the seed (for the pg) and the
2479 * osd into the hash/rng so that a proportional fraction of an
2480 * osd's pgs get rejected as primary.
2482 for (i = 0; i < up->size; i++) {
2483 int osd = up->osds[i];
2484 u32 aff;
2486 if (osd == CRUSH_ITEM_NONE)
2487 continue;
2489 aff = osdmap->osd_primary_affinity[osd];
2490 if (aff < CEPH_OSD_MAX_PRIMARY_AFFINITY &&
2491 (crush_hash32_2(CRUSH_HASH_RJENKINS1,
2492 pps, osd) >> 16) >= aff) {
2494 * We chose not to use this primary. Note it
2495 * anyway as a fallback in case we don't pick
2496 * anyone else, but keep looking.
2498 if (pos < 0)
2499 pos = i;
2500 } else {
2501 pos = i;
2502 break;
2505 if (pos < 0)
2506 return;
2508 up->primary = up->osds[pos];
2510 if (ceph_can_shift_osds(pi) && pos > 0) {
2511 /* move the new primary to the front */
2512 for (i = pos; i > 0; i--)
2513 up->osds[i] = up->osds[i - 1];
2514 up->osds[0] = up->primary;
2519 * Get pg_temp and primary_temp mappings for given PG.
2521 * Note that a PG may have none, only pg_temp, only primary_temp or
2522 * both pg_temp and primary_temp mappings. This means @temp isn't
2523 * always a valid OSD set on return: in the "only primary_temp" case,
2524 * @temp will have its ->primary >= 0 but ->size == 0.
2526 static void get_temp_osds(struct ceph_osdmap *osdmap,
2527 struct ceph_pg_pool_info *pi,
2528 const struct ceph_pg *pgid,
2529 struct ceph_osds *temp)
2531 struct ceph_pg_mapping *pg;
2532 int i;
2534 ceph_osds_init(temp);
2536 /* pg_temp? */
2537 pg = lookup_pg_mapping(&osdmap->pg_temp, pgid);
2538 if (pg) {
2539 for (i = 0; i < pg->pg_temp.len; i++) {
2540 if (ceph_osd_is_down(osdmap, pg->pg_temp.osds[i])) {
2541 if (ceph_can_shift_osds(pi))
2542 continue;
2544 temp->osds[temp->size++] = CRUSH_ITEM_NONE;
2545 } else {
2546 temp->osds[temp->size++] = pg->pg_temp.osds[i];
2550 /* apply pg_temp's primary */
2551 for (i = 0; i < temp->size; i++) {
2552 if (temp->osds[i] != CRUSH_ITEM_NONE) {
2553 temp->primary = temp->osds[i];
2554 break;
2559 /* primary_temp? */
2560 pg = lookup_pg_mapping(&osdmap->primary_temp, pgid);
2561 if (pg)
2562 temp->primary = pg->primary_temp.osd;
2566 * Map a PG to its acting set as well as its up set.
2568 * Acting set is used for data mapping purposes, while up set can be
2569 * recorded for detecting interval changes and deciding whether to
2570 * resend a request.
2572 void ceph_pg_to_up_acting_osds(struct ceph_osdmap *osdmap,
2573 struct ceph_pg_pool_info *pi,
2574 const struct ceph_pg *raw_pgid,
2575 struct ceph_osds *up,
2576 struct ceph_osds *acting)
2578 struct ceph_pg pgid;
2579 u32 pps;
2581 WARN_ON(pi->id != raw_pgid->pool);
2582 raw_pg_to_pg(pi, raw_pgid, &pgid);
2584 pg_to_raw_osds(osdmap, pi, raw_pgid, up, &pps);
2585 apply_upmap(osdmap, &pgid, up);
2586 raw_to_up_osds(osdmap, pi, up);
2587 apply_primary_affinity(osdmap, pi, pps, up);
2588 get_temp_osds(osdmap, pi, &pgid, acting);
2589 if (!acting->size) {
2590 memcpy(acting->osds, up->osds, up->size * sizeof(up->osds[0]));
2591 acting->size = up->size;
2592 if (acting->primary == -1)
2593 acting->primary = up->primary;
2595 WARN_ON(!osds_valid(up) || !osds_valid(acting));
2598 bool ceph_pg_to_primary_shard(struct ceph_osdmap *osdmap,
2599 struct ceph_pg_pool_info *pi,
2600 const struct ceph_pg *raw_pgid,
2601 struct ceph_spg *spgid)
2603 struct ceph_pg pgid;
2604 struct ceph_osds up, acting;
2605 int i;
2607 WARN_ON(pi->id != raw_pgid->pool);
2608 raw_pg_to_pg(pi, raw_pgid, &pgid);
2610 if (ceph_can_shift_osds(pi)) {
2611 spgid->pgid = pgid; /* struct */
2612 spgid->shard = CEPH_SPG_NOSHARD;
2613 return true;
2616 ceph_pg_to_up_acting_osds(osdmap, pi, &pgid, &up, &acting);
2617 for (i = 0; i < acting.size; i++) {
2618 if (acting.osds[i] == acting.primary) {
2619 spgid->pgid = pgid; /* struct */
2620 spgid->shard = i;
2621 return true;
2625 return false;
2629 * Return acting primary for given PG, or -1 if none.
2631 int ceph_pg_to_acting_primary(struct ceph_osdmap *osdmap,
2632 const struct ceph_pg *raw_pgid)
2634 struct ceph_pg_pool_info *pi;
2635 struct ceph_osds up, acting;
2637 pi = ceph_pg_pool_by_id(osdmap, raw_pgid->pool);
2638 if (!pi)
2639 return -1;
2641 ceph_pg_to_up_acting_osds(osdmap, pi, raw_pgid, &up, &acting);
2642 return acting.primary;
2644 EXPORT_SYMBOL(ceph_pg_to_acting_primary);