2 #include "ceph_debug.h"
4 #include <linux/slab.h>
9 #include "crush/hash.h"
10 #include "crush/mapper.h"
13 char *ceph_osdmap_state_str(char *str
, int len
, int state
)
22 if (state
& CEPH_OSD_EXISTS
) {
23 snprintf(str
, len
, "exists");
26 if (state
& CEPH_OSD_UP
) {
27 snprintf(str
, len
, "%s%s%s", str
, (flag
? ", " : ""),
32 snprintf(str
, len
, "doesn't exist");
40 static int calc_bits_of(unsigned t
)
51 * the foo_mask is the smallest value 2^n-1 that is >= foo.
53 static void calc_pg_masks(struct ceph_pg_pool_info
*pi
)
55 pi
->pg_num_mask
= (1 << calc_bits_of(le32_to_cpu(pi
->v
.pg_num
)-1)) - 1;
57 (1 << calc_bits_of(le32_to_cpu(pi
->v
.pgp_num
)-1)) - 1;
59 (1 << calc_bits_of(le32_to_cpu(pi
->v
.lpg_num
)-1)) - 1;
61 (1 << calc_bits_of(le32_to_cpu(pi
->v
.lpgp_num
)-1)) - 1;
67 static int crush_decode_uniform_bucket(void **p
, void *end
,
68 struct crush_bucket_uniform
*b
)
70 dout("crush_decode_uniform_bucket %p to %p\n", *p
, end
);
71 ceph_decode_need(p
, end
, (1+b
->h
.size
) * sizeof(u32
), bad
);
72 b
->item_weight
= ceph_decode_32(p
);
78 static int crush_decode_list_bucket(void **p
, void *end
,
79 struct crush_bucket_list
*b
)
82 dout("crush_decode_list_bucket %p to %p\n", *p
, end
);
83 b
->item_weights
= kcalloc(b
->h
.size
, sizeof(u32
), GFP_NOFS
);
84 if (b
->item_weights
== NULL
)
86 b
->sum_weights
= kcalloc(b
->h
.size
, sizeof(u32
), GFP_NOFS
);
87 if (b
->sum_weights
== NULL
)
89 ceph_decode_need(p
, end
, 2 * b
->h
.size
* sizeof(u32
), bad
);
90 for (j
= 0; j
< b
->h
.size
; j
++) {
91 b
->item_weights
[j
] = ceph_decode_32(p
);
92 b
->sum_weights
[j
] = ceph_decode_32(p
);
99 static int crush_decode_tree_bucket(void **p
, void *end
,
100 struct crush_bucket_tree
*b
)
103 dout("crush_decode_tree_bucket %p to %p\n", *p
, end
);
104 ceph_decode_32_safe(p
, end
, b
->num_nodes
, bad
);
105 b
->node_weights
= kcalloc(b
->num_nodes
, sizeof(u32
), GFP_NOFS
);
106 if (b
->node_weights
== NULL
)
108 ceph_decode_need(p
, end
, b
->num_nodes
* sizeof(u32
), bad
);
109 for (j
= 0; j
< b
->num_nodes
; j
++)
110 b
->node_weights
[j
] = ceph_decode_32(p
);
116 static int crush_decode_straw_bucket(void **p
, void *end
,
117 struct crush_bucket_straw
*b
)
120 dout("crush_decode_straw_bucket %p to %p\n", *p
, end
);
121 b
->item_weights
= kcalloc(b
->h
.size
, sizeof(u32
), GFP_NOFS
);
122 if (b
->item_weights
== NULL
)
124 b
->straws
= kcalloc(b
->h
.size
, sizeof(u32
), GFP_NOFS
);
125 if (b
->straws
== NULL
)
127 ceph_decode_need(p
, end
, 2 * b
->h
.size
* sizeof(u32
), bad
);
128 for (j
= 0; j
< b
->h
.size
; j
++) {
129 b
->item_weights
[j
] = ceph_decode_32(p
);
130 b
->straws
[j
] = ceph_decode_32(p
);
137 static struct crush_map
*crush_decode(void *pbyval
, void *end
)
143 void *start
= pbyval
;
146 dout("crush_decode %p to %p len %d\n", *p
, end
, (int)(end
- *p
));
148 c
= kzalloc(sizeof(*c
), GFP_NOFS
);
150 return ERR_PTR(-ENOMEM
);
152 ceph_decode_need(p
, end
, 4*sizeof(u32
), bad
);
153 magic
= ceph_decode_32(p
);
154 if (magic
!= CRUSH_MAGIC
) {
155 pr_err("crush_decode magic %x != current %x\n",
156 (unsigned)magic
, (unsigned)CRUSH_MAGIC
);
159 c
->max_buckets
= ceph_decode_32(p
);
160 c
->max_rules
= ceph_decode_32(p
);
161 c
->max_devices
= ceph_decode_32(p
);
163 c
->device_parents
= kcalloc(c
->max_devices
, sizeof(u32
), GFP_NOFS
);
164 if (c
->device_parents
== NULL
)
166 c
->bucket_parents
= kcalloc(c
->max_buckets
, sizeof(u32
), GFP_NOFS
);
167 if (c
->bucket_parents
== NULL
)
170 c
->buckets
= kcalloc(c
->max_buckets
, sizeof(*c
->buckets
), GFP_NOFS
);
171 if (c
->buckets
== NULL
)
173 c
->rules
= kcalloc(c
->max_rules
, sizeof(*c
->rules
), GFP_NOFS
);
174 if (c
->rules
== NULL
)
178 for (i
= 0; i
< c
->max_buckets
; i
++) {
181 struct crush_bucket
*b
;
183 ceph_decode_32_safe(p
, end
, alg
, bad
);
185 c
->buckets
[i
] = NULL
;
188 dout("crush_decode bucket %d off %x %p to %p\n",
189 i
, (int)(*p
-start
), *p
, end
);
192 case CRUSH_BUCKET_UNIFORM
:
193 size
= sizeof(struct crush_bucket_uniform
);
195 case CRUSH_BUCKET_LIST
:
196 size
= sizeof(struct crush_bucket_list
);
198 case CRUSH_BUCKET_TREE
:
199 size
= sizeof(struct crush_bucket_tree
);
201 case CRUSH_BUCKET_STRAW
:
202 size
= sizeof(struct crush_bucket_straw
);
209 b
= c
->buckets
[i
] = kzalloc(size
, GFP_NOFS
);
213 ceph_decode_need(p
, end
, 4*sizeof(u32
), bad
);
214 b
->id
= ceph_decode_32(p
);
215 b
->type
= ceph_decode_16(p
);
216 b
->alg
= ceph_decode_8(p
);
217 b
->hash
= ceph_decode_8(p
);
218 b
->weight
= ceph_decode_32(p
);
219 b
->size
= ceph_decode_32(p
);
221 dout("crush_decode bucket size %d off %x %p to %p\n",
222 b
->size
, (int)(*p
-start
), *p
, end
);
224 b
->items
= kcalloc(b
->size
, sizeof(__s32
), GFP_NOFS
);
225 if (b
->items
== NULL
)
227 b
->perm
= kcalloc(b
->size
, sizeof(u32
), GFP_NOFS
);
232 ceph_decode_need(p
, end
, b
->size
*sizeof(u32
), bad
);
233 for (j
= 0; j
< b
->size
; j
++)
234 b
->items
[j
] = ceph_decode_32(p
);
237 case CRUSH_BUCKET_UNIFORM
:
238 err
= crush_decode_uniform_bucket(p
, end
,
239 (struct crush_bucket_uniform
*)b
);
243 case CRUSH_BUCKET_LIST
:
244 err
= crush_decode_list_bucket(p
, end
,
245 (struct crush_bucket_list
*)b
);
249 case CRUSH_BUCKET_TREE
:
250 err
= crush_decode_tree_bucket(p
, end
,
251 (struct crush_bucket_tree
*)b
);
255 case CRUSH_BUCKET_STRAW
:
256 err
= crush_decode_straw_bucket(p
, end
,
257 (struct crush_bucket_straw
*)b
);
265 dout("rule vec is %p\n", c
->rules
);
266 for (i
= 0; i
< c
->max_rules
; i
++) {
268 struct crush_rule
*r
;
270 ceph_decode_32_safe(p
, end
, yes
, bad
);
272 dout("crush_decode NO rule %d off %x %p to %p\n",
273 i
, (int)(*p
-start
), *p
, end
);
278 dout("crush_decode rule %d off %x %p to %p\n",
279 i
, (int)(*p
-start
), *p
, end
);
282 ceph_decode_32_safe(p
, end
, yes
, bad
);
283 #if BITS_PER_LONG == 32
285 if (yes
> ULONG_MAX
/ sizeof(struct crush_rule_step
))
288 r
= c
->rules
[i
] = kmalloc(sizeof(*r
) +
289 yes
*sizeof(struct crush_rule_step
),
293 dout(" rule %d is at %p\n", i
, r
);
295 ceph_decode_copy_safe(p
, end
, &r
->mask
, 4, bad
); /* 4 u8's */
296 ceph_decode_need(p
, end
, r
->len
*3*sizeof(u32
), bad
);
297 for (j
= 0; j
< r
->len
; j
++) {
298 r
->steps
[j
].op
= ceph_decode_32(p
);
299 r
->steps
[j
].arg1
= ceph_decode_32(p
);
300 r
->steps
[j
].arg2
= ceph_decode_32(p
);
304 /* ignore trailing name maps. */
306 dout("crush_decode success\n");
312 dout("crush_decode fail %d\n", err
);
318 * rbtree of pg_mapping for handling pg_temp (explicit mapping of pgid
321 static int pgid_cmp(struct ceph_pg l
, struct ceph_pg r
)
333 static int __insert_pg_mapping(struct ceph_pg_mapping
*new,
334 struct rb_root
*root
)
336 struct rb_node
**p
= &root
->rb_node
;
337 struct rb_node
*parent
= NULL
;
338 struct ceph_pg_mapping
*pg
= NULL
;
343 pg
= rb_entry(parent
, struct ceph_pg_mapping
, node
);
344 c
= pgid_cmp(new->pgid
, pg
->pgid
);
353 rb_link_node(&new->node
, parent
, p
);
354 rb_insert_color(&new->node
, root
);
358 static struct ceph_pg_mapping
*__lookup_pg_mapping(struct rb_root
*root
,
361 struct rb_node
*n
= root
->rb_node
;
362 struct ceph_pg_mapping
*pg
;
366 pg
= rb_entry(n
, struct ceph_pg_mapping
, node
);
367 c
= pgid_cmp(pgid
, pg
->pgid
);
379 * rbtree of pg pool info
381 static int __insert_pg_pool(struct rb_root
*root
, struct ceph_pg_pool_info
*new)
383 struct rb_node
**p
= &root
->rb_node
;
384 struct rb_node
*parent
= NULL
;
385 struct ceph_pg_pool_info
*pi
= NULL
;
389 pi
= rb_entry(parent
, struct ceph_pg_pool_info
, node
);
390 if (new->id
< pi
->id
)
392 else if (new->id
> pi
->id
)
398 rb_link_node(&new->node
, parent
, p
);
399 rb_insert_color(&new->node
, root
);
403 static struct ceph_pg_pool_info
*__lookup_pg_pool(struct rb_root
*root
, int id
)
405 struct ceph_pg_pool_info
*pi
;
406 struct rb_node
*n
= root
->rb_node
;
409 pi
= rb_entry(n
, struct ceph_pg_pool_info
, node
);
412 else if (id
> pi
->id
)
420 static void __remove_pg_pool(struct rb_root
*root
, struct ceph_pg_pool_info
*pi
)
422 rb_erase(&pi
->node
, root
);
427 void __decode_pool(void **p
, struct ceph_pg_pool_info
*pi
)
429 ceph_decode_copy(p
, &pi
->v
, sizeof(pi
->v
));
431 *p
+= le32_to_cpu(pi
->v
.num_snaps
) * sizeof(u64
);
432 *p
+= le32_to_cpu(pi
->v
.num_removed_snap_intervals
) * sizeof(u64
) * 2;
435 static int __decode_pool_names(void **p
, void *end
, struct ceph_osdmap
*map
)
437 struct ceph_pg_pool_info
*pi
;
440 ceph_decode_32_safe(p
, end
, num
, bad
);
441 dout(" %d pool names\n", num
);
443 ceph_decode_32_safe(p
, end
, pool
, bad
);
444 ceph_decode_32_safe(p
, end
, len
, bad
);
445 dout(" pool %d len %d\n", pool
, len
);
446 pi
= __lookup_pg_pool(&map
->pg_pools
, pool
);
449 pi
->name
= kmalloc(len
+ 1, GFP_NOFS
);
451 memcpy(pi
->name
, *p
, len
);
452 pi
->name
[len
] = '\0';
453 dout(" name is %s\n", pi
->name
);
467 void ceph_osdmap_destroy(struct ceph_osdmap
*map
)
469 dout("osdmap_destroy %p\n", map
);
471 crush_destroy(map
->crush
);
472 while (!RB_EMPTY_ROOT(&map
->pg_temp
)) {
473 struct ceph_pg_mapping
*pg
=
474 rb_entry(rb_first(&map
->pg_temp
),
475 struct ceph_pg_mapping
, node
);
476 rb_erase(&pg
->node
, &map
->pg_temp
);
479 while (!RB_EMPTY_ROOT(&map
->pg_pools
)) {
480 struct ceph_pg_pool_info
*pi
=
481 rb_entry(rb_first(&map
->pg_pools
),
482 struct ceph_pg_pool_info
, node
);
483 __remove_pg_pool(&map
->pg_pools
, pi
);
485 kfree(map
->osd_state
);
486 kfree(map
->osd_weight
);
487 kfree(map
->osd_addr
);
492 * adjust max osd value. reallocate arrays.
494 static int osdmap_set_max_osd(struct ceph_osdmap
*map
, int max
)
497 struct ceph_entity_addr
*addr
;
500 state
= kcalloc(max
, sizeof(*state
), GFP_NOFS
);
501 addr
= kcalloc(max
, sizeof(*addr
), GFP_NOFS
);
502 weight
= kcalloc(max
, sizeof(*weight
), GFP_NOFS
);
503 if (state
== NULL
|| addr
== NULL
|| weight
== NULL
) {
511 if (map
->osd_state
) {
512 memcpy(state
, map
->osd_state
, map
->max_osd
*sizeof(*state
));
513 memcpy(addr
, map
->osd_addr
, map
->max_osd
*sizeof(*addr
));
514 memcpy(weight
, map
->osd_weight
, map
->max_osd
*sizeof(*weight
));
515 kfree(map
->osd_state
);
516 kfree(map
->osd_addr
);
517 kfree(map
->osd_weight
);
520 map
->osd_state
= state
;
521 map
->osd_weight
= weight
;
522 map
->osd_addr
= addr
;
530 struct ceph_osdmap
*osdmap_decode(void **p
, void *end
)
532 struct ceph_osdmap
*map
;
538 struct ceph_pg_pool_info
*pi
;
540 dout("osdmap_decode %p to %p len %d\n", *p
, end
, (int)(end
- *p
));
542 map
= kzalloc(sizeof(*map
), GFP_NOFS
);
544 return ERR_PTR(-ENOMEM
);
545 map
->pg_temp
= RB_ROOT
;
547 ceph_decode_16_safe(p
, end
, version
, bad
);
548 if (version
> CEPH_OSDMAP_VERSION
) {
549 pr_warning("got unknown v %d > %d of osdmap\n", version
,
550 CEPH_OSDMAP_VERSION
);
554 ceph_decode_need(p
, end
, 2*sizeof(u64
)+6*sizeof(u32
), bad
);
555 ceph_decode_copy(p
, &map
->fsid
, sizeof(map
->fsid
));
556 map
->epoch
= ceph_decode_32(p
);
557 ceph_decode_copy(p
, &map
->created
, sizeof(map
->created
));
558 ceph_decode_copy(p
, &map
->modified
, sizeof(map
->modified
));
560 ceph_decode_32_safe(p
, end
, max
, bad
);
562 ceph_decode_need(p
, end
, 4 + 1 + sizeof(pi
->v
), bad
);
563 pi
= kzalloc(sizeof(*pi
), GFP_NOFS
);
566 pi
->id
= ceph_decode_32(p
);
567 ev
= ceph_decode_8(p
); /* encoding version */
568 if (ev
> CEPH_PG_POOL_VERSION
) {
569 pr_warning("got unknown v %d > %d of ceph_pg_pool\n",
570 ev
, CEPH_PG_POOL_VERSION
);
574 __decode_pool(p
, pi
);
575 __insert_pg_pool(&map
->pg_pools
, pi
);
578 if (version
>= 5 && __decode_pool_names(p
, end
, map
) < 0)
581 ceph_decode_32_safe(p
, end
, map
->pool_max
, bad
);
583 ceph_decode_32_safe(p
, end
, map
->flags
, bad
);
585 max
= ceph_decode_32(p
);
587 /* (re)alloc osd arrays */
588 err
= osdmap_set_max_osd(map
, max
);
591 dout("osdmap_decode max_osd = %d\n", map
->max_osd
);
595 ceph_decode_need(p
, end
, 3*sizeof(u32
) +
596 map
->max_osd
*(1 + sizeof(*map
->osd_weight
) +
597 sizeof(*map
->osd_addr
)), bad
);
598 *p
+= 4; /* skip length field (should match max) */
599 ceph_decode_copy(p
, map
->osd_state
, map
->max_osd
);
601 *p
+= 4; /* skip length field (should match max) */
602 for (i
= 0; i
< map
->max_osd
; i
++)
603 map
->osd_weight
[i
] = ceph_decode_32(p
);
605 *p
+= 4; /* skip length field (should match max) */
606 ceph_decode_copy(p
, map
->osd_addr
, map
->max_osd
*sizeof(*map
->osd_addr
));
607 for (i
= 0; i
< map
->max_osd
; i
++)
608 ceph_decode_addr(&map
->osd_addr
[i
]);
611 ceph_decode_32_safe(p
, end
, len
, bad
);
612 for (i
= 0; i
< len
; i
++) {
615 struct ceph_pg_mapping
*pg
;
617 ceph_decode_need(p
, end
, sizeof(u32
) + sizeof(u64
), bad
);
618 ceph_decode_copy(p
, &pgid
, sizeof(pgid
));
619 n
= ceph_decode_32(p
);
620 ceph_decode_need(p
, end
, n
* sizeof(u32
), bad
);
622 pg
= kmalloc(sizeof(*pg
) + n
*sizeof(u32
), GFP_NOFS
);
627 for (j
= 0; j
< n
; j
++)
628 pg
->osds
[j
] = ceph_decode_32(p
);
630 err
= __insert_pg_mapping(pg
, &map
->pg_temp
);
633 dout(" added pg_temp %llx len %d\n", *(u64
*)&pgid
, len
);
637 ceph_decode_32_safe(p
, end
, len
, bad
);
638 dout("osdmap_decode crush len %d from off 0x%x\n", len
,
640 ceph_decode_need(p
, end
, len
, bad
);
641 map
->crush
= crush_decode(*p
, end
);
643 if (IS_ERR(map
->crush
)) {
644 err
= PTR_ERR(map
->crush
);
649 /* ignore the rest of the map */
652 dout("osdmap_decode done %p %p\n", *p
, end
);
656 dout("osdmap_decode fail\n");
657 ceph_osdmap_destroy(map
);
662 * decode and apply an incremental map update.
664 struct ceph_osdmap
*osdmap_apply_incremental(void **p
, void *end
,
665 struct ceph_osdmap
*map
,
666 struct ceph_messenger
*msgr
)
668 struct crush_map
*newcrush
= NULL
;
669 struct ceph_fsid fsid
;
671 struct ceph_timespec modified
;
673 __s32 new_pool_max
, new_flags
, max
;
679 ceph_decode_16_safe(p
, end
, version
, bad
);
680 if (version
> CEPH_OSDMAP_INC_VERSION
) {
681 pr_warning("got unknown v %d > %d of inc osdmap\n", version
,
682 CEPH_OSDMAP_INC_VERSION
);
686 ceph_decode_need(p
, end
, sizeof(fsid
)+sizeof(modified
)+2*sizeof(u32
),
688 ceph_decode_copy(p
, &fsid
, sizeof(fsid
));
689 epoch
= ceph_decode_32(p
);
690 BUG_ON(epoch
!= map
->epoch
+1);
691 ceph_decode_copy(p
, &modified
, sizeof(modified
));
692 new_pool_max
= ceph_decode_32(p
);
693 new_flags
= ceph_decode_32(p
);
696 ceph_decode_32_safe(p
, end
, len
, bad
);
698 dout("apply_incremental full map len %d, %p to %p\n",
700 return osdmap_decode(p
, min(*p
+len
, end
));
704 ceph_decode_32_safe(p
, end
, len
, bad
);
706 dout("apply_incremental new crush map len %d, %p to %p\n",
708 newcrush
= crush_decode(*p
, min(*p
+len
, end
));
709 if (IS_ERR(newcrush
))
710 return ERR_CAST(newcrush
);
716 map
->flags
= new_flags
;
717 if (new_pool_max
>= 0)
718 map
->pool_max
= new_pool_max
;
720 ceph_decode_need(p
, end
, 5*sizeof(u32
), bad
);
723 max
= ceph_decode_32(p
);
725 err
= osdmap_set_max_osd(map
, max
);
731 map
->modified
= map
->modified
;
734 crush_destroy(map
->crush
);
735 map
->crush
= newcrush
;
740 ceph_decode_32_safe(p
, end
, len
, bad
);
743 struct ceph_pg_pool_info
*pi
;
745 ceph_decode_32_safe(p
, end
, pool
, bad
);
746 ceph_decode_need(p
, end
, 1 + sizeof(pi
->v
), bad
);
747 ev
= ceph_decode_8(p
); /* encoding version */
748 if (ev
> CEPH_PG_POOL_VERSION
) {
749 pr_warning("got unknown v %d > %d of ceph_pg_pool\n",
750 ev
, CEPH_PG_POOL_VERSION
);
753 pi
= __lookup_pg_pool(&map
->pg_pools
, pool
);
755 pi
= kzalloc(sizeof(*pi
), GFP_NOFS
);
761 __insert_pg_pool(&map
->pg_pools
, pi
);
763 __decode_pool(p
, pi
);
765 if (version
>= 5 && __decode_pool_names(p
, end
, map
) < 0)
769 ceph_decode_32_safe(p
, end
, len
, bad
);
771 struct ceph_pg_pool_info
*pi
;
773 ceph_decode_32_safe(p
, end
, pool
, bad
);
774 pi
= __lookup_pg_pool(&map
->pg_pools
, pool
);
776 __remove_pg_pool(&map
->pg_pools
, pi
);
781 ceph_decode_32_safe(p
, end
, len
, bad
);
784 struct ceph_entity_addr addr
;
785 ceph_decode_32_safe(p
, end
, osd
, bad
);
786 ceph_decode_copy_safe(p
, end
, &addr
, sizeof(addr
), bad
);
787 ceph_decode_addr(&addr
);
788 pr_info("osd%d up\n", osd
);
789 BUG_ON(osd
>= map
->max_osd
);
790 map
->osd_state
[osd
] |= CEPH_OSD_UP
;
791 map
->osd_addr
[osd
] = addr
;
795 ceph_decode_32_safe(p
, end
, len
, bad
);
798 ceph_decode_32_safe(p
, end
, osd
, bad
);
799 (*p
)++; /* clean flag */
800 pr_info("osd%d down\n", osd
);
801 if (osd
< map
->max_osd
)
802 map
->osd_state
[osd
] &= ~CEPH_OSD_UP
;
806 ceph_decode_32_safe(p
, end
, len
, bad
);
809 ceph_decode_need(p
, end
, sizeof(u32
)*2, bad
);
810 osd
= ceph_decode_32(p
);
811 off
= ceph_decode_32(p
);
812 pr_info("osd%d weight 0x%x %s\n", osd
, off
,
813 off
== CEPH_OSD_IN
? "(in)" :
814 (off
== CEPH_OSD_OUT
? "(out)" : ""));
815 if (osd
< map
->max_osd
)
816 map
->osd_weight
[osd
] = off
;
820 rbp
= rb_first(&map
->pg_temp
);
821 ceph_decode_32_safe(p
, end
, len
, bad
);
823 struct ceph_pg_mapping
*pg
;
827 ceph_decode_need(p
, end
, sizeof(u64
) + sizeof(u32
), bad
);
828 ceph_decode_copy(p
, &pgid
, sizeof(pgid
));
829 pglen
= ceph_decode_32(p
);
832 while (rbp
&& pgid_cmp(rb_entry(rbp
, struct ceph_pg_mapping
,
833 node
)->pgid
, pgid
) <= 0) {
834 struct ceph_pg_mapping
*cur
=
835 rb_entry(rbp
, struct ceph_pg_mapping
, node
);
838 dout(" removed pg_temp %llx\n", *(u64
*)&cur
->pgid
);
839 rb_erase(&cur
->node
, &map
->pg_temp
);
845 ceph_decode_need(p
, end
, pglen
*sizeof(u32
), bad
);
846 pg
= kmalloc(sizeof(*pg
) + sizeof(u32
)*pglen
, GFP_NOFS
);
853 for (j
= 0; j
< pglen
; j
++)
854 pg
->osds
[j
] = ceph_decode_32(p
);
855 err
= __insert_pg_mapping(pg
, &map
->pg_temp
);
860 dout(" added pg_temp %llx len %d\n", *(u64
*)&pgid
,
865 struct ceph_pg_mapping
*cur
=
866 rb_entry(rbp
, struct ceph_pg_mapping
, node
);
869 dout(" removed pg_temp %llx\n", *(u64
*)&cur
->pgid
);
870 rb_erase(&cur
->node
, &map
->pg_temp
);
874 /* ignore the rest */
879 pr_err("corrupt inc osdmap epoch %d off %d (%p of %p-%p)\n",
880 epoch
, (int)(*p
- start
), *p
, start
, end
);
881 print_hex_dump(KERN_DEBUG
, "osdmap: ",
882 DUMP_PREFIX_OFFSET
, 16, 1,
883 start
, end
- start
, true);
885 crush_destroy(newcrush
);
893 * calculate file layout from given offset, length.
894 * fill in correct oid, logical length, and object extent
897 * for now, we write only a single su, until we can
898 * pass a stride back to the caller.
900 void ceph_calc_file_object_mapping(struct ceph_file_layout
*layout
,
903 u64
*oxoff
, u64
*oxlen
)
905 u32 osize
= le32_to_cpu(layout
->fl_object_size
);
906 u32 su
= le32_to_cpu(layout
->fl_stripe_unit
);
907 u32 sc
= le32_to_cpu(layout
->fl_stripe_count
);
908 u32 bl
, stripeno
, stripepos
, objsetno
;
912 dout("mapping %llu~%llu osize %u fl_su %u\n", off
, *plen
,
914 su_per_object
= osize
/ su
;
915 dout("osize %u / su %u = su_per_object %u\n", osize
, su
,
918 BUG_ON((su
& ~PAGE_MASK
) != 0);
919 /* bl = *off / su; */
923 dout("off %llu / su %u = bl %u\n", off
, su
, bl
);
927 objsetno
= stripeno
/ su_per_object
;
929 *ono
= objsetno
* sc
+ stripepos
;
930 dout("objset %u * sc %u = ono %u\n", objsetno
, sc
, (unsigned)*ono
);
932 /* *oxoff = *off % layout->fl_stripe_unit; # offset in su */
934 su_offset
= do_div(t
, su
);
935 *oxoff
= su_offset
+ (stripeno
% su_per_object
) * su
;
938 * Calculate the length of the extent being written to the selected
939 * object. This is the minimum of the full length requested (plen) or
940 * the remainder of the current stripe being written to.
942 *oxlen
= min_t(u64
, *plen
, su
- su_offset
);
945 dout(" obj extent %llu~%llu\n", *oxoff
, *oxlen
);
949 * calculate an object layout (i.e. pgid) from an oid,
950 * file_layout, and osdmap
952 int ceph_calc_object_layout(struct ceph_object_layout
*ol
,
954 struct ceph_file_layout
*fl
,
955 struct ceph_osdmap
*osdmap
)
957 unsigned num
, num_mask
;
959 s32 preferred
= (s32
)le32_to_cpu(fl
->fl_pg_preferred
);
960 int poolid
= le32_to_cpu(fl
->fl_pg_pool
);
961 struct ceph_pg_pool_info
*pool
;
966 pool
= __lookup_pg_pool(&osdmap
->pg_pools
, poolid
);
969 ps
= ceph_str_hash(pool
->v
.object_hash
, oid
, strlen(oid
));
970 if (preferred
>= 0) {
972 num
= le32_to_cpu(pool
->v
.lpg_num
);
973 num_mask
= pool
->lpg_num_mask
;
975 num
= le32_to_cpu(pool
->v
.pg_num
);
976 num_mask
= pool
->pg_num_mask
;
979 pgid
.ps
= cpu_to_le16(ps
);
980 pgid
.preferred
= cpu_to_le16(preferred
);
981 pgid
.pool
= fl
->fl_pg_pool
;
983 dout("calc_object_layout '%s' pgid %d.%xp%d\n", oid
, poolid
, ps
,
986 dout("calc_object_layout '%s' pgid %d.%x\n", oid
, poolid
, ps
);
989 ol
->ol_stripe_unit
= fl
->fl_object_stripe_unit
;
994 * Calculate raw osd vector for the given pgid. Return pointer to osd
995 * array, or NULL on failure.
997 static int *calc_pg_raw(struct ceph_osdmap
*osdmap
, struct ceph_pg pgid
,
1000 struct ceph_pg_mapping
*pg
;
1001 struct ceph_pg_pool_info
*pool
;
1003 unsigned poolid
, ps
, pps
;
1007 pg
= __lookup_pg_mapping(&osdmap
->pg_temp
, pgid
);
1014 poolid
= le32_to_cpu(pgid
.pool
);
1015 ps
= le16_to_cpu(pgid
.ps
);
1016 preferred
= (s16
)le16_to_cpu(pgid
.preferred
);
1018 /* don't forcefeed bad device ids to crush */
1019 if (preferred
>= osdmap
->max_osd
||
1020 preferred
>= osdmap
->crush
->max_devices
)
1023 pool
= __lookup_pg_pool(&osdmap
->pg_pools
, poolid
);
1026 ruleno
= crush_find_rule(osdmap
->crush
, pool
->v
.crush_ruleset
,
1027 pool
->v
.type
, pool
->v
.size
);
1029 pr_err("no crush rule pool %d type %d size %d\n",
1030 poolid
, pool
->v
.type
, pool
->v
.size
);
1035 pps
= ceph_stable_mod(ps
,
1036 le32_to_cpu(pool
->v
.lpgp_num
),
1037 pool
->lpgp_num_mask
);
1039 pps
= ceph_stable_mod(ps
,
1040 le32_to_cpu(pool
->v
.pgp_num
),
1041 pool
->pgp_num_mask
);
1043 *num
= crush_do_rule(osdmap
->crush
, ruleno
, pps
, osds
,
1044 min_t(int, pool
->v
.size
, *num
),
1045 preferred
, osdmap
->osd_weight
);
1050 * Return acting set for given pgid.
1052 int ceph_calc_pg_acting(struct ceph_osdmap
*osdmap
, struct ceph_pg pgid
,
1055 int rawosds
[CEPH_PG_MAX_SIZE
], *osds
;
1056 int i
, o
, num
= CEPH_PG_MAX_SIZE
;
1058 osds
= calc_pg_raw(osdmap
, pgid
, rawosds
, &num
);
1062 /* primary is first up osd */
1064 for (i
= 0; i
< num
; i
++)
1065 if (ceph_osd_is_up(osdmap
, osds
[i
]))
1066 acting
[o
++] = osds
[i
];
1071 * Return primary osd for given pgid, or -1 if none.
1073 int ceph_calc_pg_primary(struct ceph_osdmap
*osdmap
, struct ceph_pg pgid
)
1075 int rawosds
[CEPH_PG_MAX_SIZE
], *osds
;
1076 int i
, num
= CEPH_PG_MAX_SIZE
;
1078 osds
= calc_pg_raw(osdmap
, pgid
, rawosds
, &num
);
1082 /* primary is first up osd */
1083 for (i
= 0; i
< num
; i
++)
1084 if (ceph_osd_is_up(osdmap
, osds
[i
]))