2 * Ceph - scalable distributed file system
4 * Copyright (C) 2015 Intel Corporation All Rights Reserved
6 * This is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License version 2.1, as published by the Free Software
9 * Foundation. See file COPYING.
14 # include <linux/string.h>
15 # include <linux/slab.h>
16 # include <linux/bug.h>
17 # include <linux/kernel.h>
18 # include <linux/crush/crush.h>
19 # include <linux/crush/hash.h>
20 # include <linux/crush/mapper.h>
22 # include "crush_compat.h"
27 #include "crush_ln_table.h"
29 #define dprintk(args...) /* printf(args) */
32 * Implement the core CRUSH mapping algorithm.
36 * crush_find_rule - find a crush_rule id for a given ruleset, type, and size.
38 * @ruleset: the storage ruleset id (user defined)
39 * @type: storage ruleset type (user defined)
40 * @size: output set size
42 int crush_find_rule(const struct crush_map
*map
, int ruleset
, int type
, int size
)
46 for (i
= 0; i
< map
->max_rules
; i
++) {
48 map
->rules
[i
]->mask
.ruleset
== ruleset
&&
49 map
->rules
[i
]->mask
.type
== type
&&
50 map
->rules
[i
]->mask
.min_size
<= size
&&
51 map
->rules
[i
]->mask
.max_size
>= size
)
58 * bucket choose methods
60 * For each bucket algorithm, we have a "choose" method that, given a
61 * crush input @x and replica position (usually, position in output set) @r,
62 * will produce an item in the bucket.
66 * Choose based on a random permutation of the bucket.
68 * We used to use some prime number arithmetic to do this, but it
69 * wasn't very random, and had some other bad behaviors. Instead, we
70 * calculate an actual random permutation of the bucket members.
71 * Since this is expensive, we optimize for the r=0 case, which
72 * captures the vast majority of calls.
74 static int bucket_perm_choose(const struct crush_bucket
*bucket
,
75 struct crush_work_bucket
*work
,
78 unsigned int pr
= r
% bucket
->size
;
81 /* start a new permutation if @x has changed */
82 if (work
->perm_x
!= (__u32
)x
|| work
->perm_n
== 0) {
83 dprintk("bucket %d new x=%d\n", bucket
->id
, x
);
86 /* optimize common r=0 case */
88 s
= crush_hash32_3(bucket
->hash
, x
, bucket
->id
, 0) %
91 work
->perm_n
= 0xffff; /* magic value, see below */
95 for (i
= 0; i
< bucket
->size
; i
++)
98 } else if (work
->perm_n
== 0xffff) {
99 /* clean up after the r=0 case above */
100 for (i
= 1; i
< bucket
->size
; i
++)
102 work
->perm
[work
->perm
[0]] = 0;
106 /* calculate permutation up to pr */
107 for (i
= 0; i
< work
->perm_n
; i
++)
108 dprintk(" perm_choose have %d: %d\n", i
, work
->perm
[i
]);
109 while (work
->perm_n
<= pr
) {
110 unsigned int p
= work
->perm_n
;
111 /* no point in swapping the final entry */
112 if (p
< bucket
->size
- 1) {
113 i
= crush_hash32_3(bucket
->hash
, x
, bucket
->id
, p
) %
116 unsigned int t
= work
->perm
[p
+ i
];
117 work
->perm
[p
+ i
] = work
->perm
[p
];
120 dprintk(" perm_choose swap %d with %d\n", p
, p
+i
);
124 for (i
= 0; i
< bucket
->size
; i
++)
125 dprintk(" perm_choose %d: %d\n", i
, work
->perm
[i
]);
129 dprintk(" perm_choose %d sz=%d x=%d r=%d (%d) s=%d\n", bucket
->id
,
130 bucket
->size
, x
, r
, pr
, s
);
131 return bucket
->items
[s
];
135 static int bucket_uniform_choose(const struct crush_bucket_uniform
*bucket
,
136 struct crush_work_bucket
*work
, int x
, int r
)
138 return bucket_perm_choose(&bucket
->h
, work
, x
, r
);
142 static int bucket_list_choose(const struct crush_bucket_list
*bucket
,
147 for (i
= bucket
->h
.size
-1; i
>= 0; i
--) {
148 __u64 w
= crush_hash32_4(bucket
->h
.hash
, x
, bucket
->h
.items
[i
],
151 dprintk("list_choose i=%d x=%d r=%d item %d weight %x "
153 i
, x
, r
, bucket
->h
.items
[i
], bucket
->item_weights
[i
],
154 bucket
->sum_weights
[i
], w
);
155 w
*= bucket
->sum_weights
[i
];
157 /*dprintk(" scaled %llx\n", w);*/
158 if (w
< bucket
->item_weights
[i
]) {
159 return bucket
->h
.items
[i
];
163 dprintk("bad list sums for bucket %d\n", bucket
->h
.id
);
164 return bucket
->h
.items
[0];
169 static int height(int n
)
172 while ((n
& 1) == 0) {
179 static int left(int x
)
182 return x
- (1 << (h
-1));
185 static int right(int x
)
188 return x
+ (1 << (h
-1));
191 static int terminal(int x
)
196 static int bucket_tree_choose(const struct crush_bucket_tree
*bucket
,
204 n
= bucket
->num_nodes
>> 1;
206 while (!terminal(n
)) {
208 /* pick point in [0, w) */
209 w
= bucket
->node_weights
[n
];
210 t
= (__u64
)crush_hash32_4(bucket
->h
.hash
, x
, n
, r
,
211 bucket
->h
.id
) * (__u64
)w
;
214 /* descend to the left or right? */
216 if (t
< bucket
->node_weights
[l
])
222 return bucket
->h
.items
[n
>> 1];
228 static int bucket_straw_choose(const struct crush_bucket_straw
*bucket
,
236 for (i
= 0; i
< bucket
->h
.size
; i
++) {
237 draw
= crush_hash32_3(bucket
->h
.hash
, x
, bucket
->h
.items
[i
], r
);
239 draw
*= bucket
->straws
[i
];
240 if (i
== 0 || draw
> high_draw
) {
245 return bucket
->h
.items
[high
];
248 /* compute 2^44*log2(input+1) */
249 static __u64
crush_ln(unsigned int xin
)
251 unsigned int x
= xin
;
252 int iexpon
, index1
, index2
;
253 __u64 RH
, LH
, LL
, xl64
, result
;
257 /* normalize input */
261 * figure out number of bits we need to shift and
262 * do it in one step instead of iteratively
264 if (!(x
& 0x18000)) {
265 int bits
= __builtin_clz(x
& 0x1FFFF) - 16;
270 index1
= (x
>> 8) << 1;
271 /* RH ~ 2^56/index1 */
272 RH
= __RH_LH_tbl
[index1
- 256];
273 /* LH ~ 2^48 * log2(index1/256) */
274 LH
= __RH_LH_tbl
[index1
+ 1 - 256];
276 /* RH*x ~ 2^48 * (2^15 + xf), xf<2^8 */
277 xl64
= (__s64
)x
* RH
;
281 result
<<= (12 + 32);
283 index2
= xl64
& 0xff;
284 /* LL ~ 2^48*log2(1.0+index2/2^15) */
285 LL
= __LL_tbl
[index2
];
289 LH
>>= (48 - 12 - 32);
299 * for reference, see:
301 * http://en.wikipedia.org/wiki/Exponential_distribution#Distribution_of_the_minimum_of_exponential_random_variables
305 static int bucket_straw2_choose(const struct crush_bucket_straw2
*bucket
,
308 unsigned int i
, high
= 0;
311 __s64 ln
, draw
, high_draw
= 0;
313 for (i
= 0; i
< bucket
->h
.size
; i
++) {
314 w
= bucket
->item_weights
[i
];
316 u
= crush_hash32_3(bucket
->h
.hash
, x
,
317 bucket
->h
.items
[i
], r
);
321 * for some reason slightly less than 0x10000 produces
322 * a slightly more accurate distribution... probably a
325 * the natural log lookup table maps [0,0xffff]
326 * (corresponding to real numbers [1/0x10000, 1] to
327 * [0, 0xffffffffffff] (corresponding to real numbers
330 ln
= crush_ln(u
) - 0x1000000000000ll
;
333 * divide by 16.16 fixed-point weight. note
334 * that the ln value is negative, so a larger
335 * weight means a larger (less negative) value
338 draw
= div64_s64(ln
, w
);
343 if (i
== 0 || draw
> high_draw
) {
349 return bucket
->h
.items
[high
];
353 static int crush_bucket_choose(const struct crush_bucket
*in
,
354 struct crush_work_bucket
*work
,
357 dprintk(" crush_bucket_choose %d x=%d r=%d\n", in
->id
, x
, r
);
358 BUG_ON(in
->size
== 0);
360 case CRUSH_BUCKET_UNIFORM
:
361 return bucket_uniform_choose(
362 (const struct crush_bucket_uniform
*)in
,
364 case CRUSH_BUCKET_LIST
:
365 return bucket_list_choose((const struct crush_bucket_list
*)in
,
367 case CRUSH_BUCKET_TREE
:
368 return bucket_tree_choose((const struct crush_bucket_tree
*)in
,
370 case CRUSH_BUCKET_STRAW
:
371 return bucket_straw_choose(
372 (const struct crush_bucket_straw
*)in
,
374 case CRUSH_BUCKET_STRAW2
:
375 return bucket_straw2_choose(
376 (const struct crush_bucket_straw2
*)in
,
379 dprintk("unknown bucket %d alg %d\n", in
->id
, in
->alg
);
385 * true if device is marked "out" (failed, fully offloaded)
388 static int is_out(const struct crush_map
*map
,
389 const __u32
*weight
, int weight_max
,
392 if (item
>= weight_max
)
394 if (weight
[item
] >= 0x10000)
396 if (weight
[item
] == 0)
398 if ((crush_hash32_2(CRUSH_HASH_RJENKINS1
, x
, item
) & 0xffff)
405 * crush_choose_firstn - choose numrep distinct items of given type
406 * @map: the crush_map
407 * @bucket: the bucket we are choose an item from
408 * @x: crush input value
409 * @numrep: the number of items to choose
410 * @type: the type of item to choose
411 * @out: pointer to output vector
412 * @outpos: our position in that vector
413 * @out_size: size of the out vector
414 * @tries: number of attempts to make
415 * @recurse_tries: number of attempts to have recursive chooseleaf make
416 * @local_retries: localized retries
417 * @local_fallback_retries: localized fallback retries
418 * @recurse_to_leaf: true if we want one device under each item of given type (chooseleaf instead of choose)
419 * @stable: stable mode starts rep=0 in the recursive call for all replicas
420 * @vary_r: pass r to recursive calls
421 * @out2: second output vector for leaf items (if @recurse_to_leaf)
422 * @parent_r: r value passed from the parent
424 static int crush_choose_firstn(const struct crush_map
*map
,
425 struct crush_work
*work
,
426 const struct crush_bucket
*bucket
,
427 const __u32
*weight
, int weight_max
,
428 int x
, int numrep
, int type
,
429 int *out
, int outpos
,
432 unsigned int recurse_tries
,
433 unsigned int local_retries
,
434 unsigned int local_fallback_retries
,
442 unsigned int ftotal
, flocal
;
443 int retry_descent
, retry_bucket
, skip_rep
;
444 const struct crush_bucket
*in
= bucket
;
450 int count
= out_size
;
452 dprintk("CHOOSE%s bucket %d x %d outpos %d numrep %d tries %d recurse_tries %d local_retries %d local_fallback_retries %d parent_r %d stable %d\n",
453 recurse_to_leaf
? "_LEAF" : "",
454 bucket
->id
, x
, outpos
, numrep
,
455 tries
, recurse_tries
, local_retries
, local_fallback_retries
,
458 for (rep
= stable
? 0 : outpos
; rep
< numrep
&& count
> 0 ; rep
++) {
459 /* keep trying until we get a non-out, non-colliding item */
464 in
= bucket
; /* initial bucket */
466 /* choose through intervening buckets */
472 /* r' = r + f_total */
480 if (local_fallback_retries
> 0 &&
481 flocal
>= (in
->size
>>1) &&
482 flocal
> local_fallback_retries
)
483 item
= bucket_perm_choose(
484 in
, work
->work
[-1-in
->id
],
487 item
= crush_bucket_choose(
488 in
, work
->work
[-1-in
->id
],
490 if (item
>= map
->max_devices
) {
491 dprintk(" bad item %d\n", item
);
498 itemtype
= map
->buckets
[-1-item
]->type
;
501 dprintk(" item %d type %d\n", item
, itemtype
);
504 if (itemtype
!= type
) {
506 (-1-item
) >= map
->max_buckets
) {
507 dprintk(" bad item type %d\n", type
);
511 in
= map
->buckets
[-1-item
];
517 for (i
= 0; i
< outpos
; i
++) {
518 if (out
[i
] == item
) {
525 if (!collide
&& recurse_to_leaf
) {
529 sub_r
= r
>> (vary_r
-1);
532 if (crush_choose_firstn(
535 map
->buckets
[-1-item
],
537 x
, stable
? 1 : outpos
+1, 0,
541 local_fallback_retries
,
547 /* didn't get leaf */
550 /* we already have a leaf! */
555 if (!reject
&& !collide
) {
558 reject
= is_out(map
, weight
,
564 if (reject
|| collide
) {
568 if (collide
&& flocal
<= local_retries
)
569 /* retry locally a few times */
571 else if (local_fallback_retries
> 0 &&
572 flocal
<= in
->size
+ local_fallback_retries
)
573 /* exhaustive bucket search */
575 else if (ftotal
< tries
)
576 /* then retry descent */
581 dprintk(" reject %d collide %d "
582 "ftotal %u flocal %u\n",
583 reject
, collide
, ftotal
,
586 } while (retry_bucket
);
587 } while (retry_descent
);
590 dprintk("skip rep\n");
594 dprintk("CHOOSE got %d\n", item
);
599 if (map
->choose_tries
&& ftotal
<= map
->choose_total_tries
)
600 map
->choose_tries
[ftotal
]++;
604 dprintk("CHOOSE returns %d\n", outpos
);
610 * crush_choose_indep: alternative breadth-first positionally stable mapping
613 static void crush_choose_indep(const struct crush_map
*map
,
614 struct crush_work
*work
,
615 const struct crush_bucket
*bucket
,
616 const __u32
*weight
, int weight_max
,
617 int x
, int left
, int numrep
, int type
,
618 int *out
, int outpos
,
620 unsigned int recurse_tries
,
625 const struct crush_bucket
*in
= bucket
;
626 int endpos
= outpos
+ left
;
635 dprintk("CHOOSE%s INDEP bucket %d x %d outpos %d numrep %d\n", recurse_to_leaf
? "_LEAF" : "",
636 bucket
->id
, x
, outpos
, numrep
);
638 /* initially my result is undefined */
639 for (rep
= outpos
; rep
< endpos
; rep
++) {
640 out
[rep
] = CRUSH_ITEM_UNDEF
;
642 out2
[rep
] = CRUSH_ITEM_UNDEF
;
645 for (ftotal
= 0; left
> 0 && ftotal
< tries
; ftotal
++) {
647 if (out2
&& ftotal
) {
648 dprintk("%u %d a: ", ftotal
, left
);
649 for (rep
= outpos
; rep
< endpos
; rep
++) {
650 dprintk(" %d", out
[rep
]);
653 dprintk("%u %d b: ", ftotal
, left
);
654 for (rep
= outpos
; rep
< endpos
; rep
++) {
655 dprintk(" %d", out2
[rep
]);
660 for (rep
= outpos
; rep
< endpos
; rep
++) {
661 if (out
[rep
] != CRUSH_ITEM_UNDEF
)
664 in
= bucket
; /* initial bucket */
666 /* choose through intervening buckets */
668 /* note: we base the choice on the position
669 * even in the nested call. that means that
670 * if the first layer chooses the same bucket
671 * in a different position, we will tend to
672 * choose a different item in that bucket.
673 * this will involve more devices in data
674 * movement and tend to distribute the load.
679 if (in
->alg
== CRUSH_BUCKET_UNIFORM
&&
680 in
->size
% numrep
== 0)
681 /* r'=r+(n+1)*f_total */
682 r
+= (numrep
+1) * ftotal
;
684 /* r' = r + n*f_total */
685 r
+= numrep
* ftotal
;
689 dprintk(" empty bucket\n");
693 item
= crush_bucket_choose(
694 in
, work
->work
[-1-in
->id
],
696 if (item
>= map
->max_devices
) {
697 dprintk(" bad item %d\n", item
);
698 out
[rep
] = CRUSH_ITEM_NONE
;
700 out2
[rep
] = CRUSH_ITEM_NONE
;
707 itemtype
= map
->buckets
[-1-item
]->type
;
710 dprintk(" item %d type %d\n", item
, itemtype
);
713 if (itemtype
!= type
) {
715 (-1-item
) >= map
->max_buckets
) {
716 dprintk(" bad item type %d\n", type
);
717 out
[rep
] = CRUSH_ITEM_NONE
;
724 in
= map
->buckets
[-1-item
];
730 for (i
= outpos
; i
< endpos
; i
++) {
731 if (out
[i
] == item
) {
739 if (recurse_to_leaf
) {
744 map
->buckets
[-1-item
],
750 if (out2
[rep
] == CRUSH_ITEM_NONE
) {
751 /* placed nothing; no leaf */
755 /* we already have a leaf! */
762 is_out(map
, weight
, weight_max
, item
, x
))
772 for (rep
= outpos
; rep
< endpos
; rep
++) {
773 if (out
[rep
] == CRUSH_ITEM_UNDEF
) {
774 out
[rep
] = CRUSH_ITEM_NONE
;
776 if (out2
&& out2
[rep
] == CRUSH_ITEM_UNDEF
) {
777 out2
[rep
] = CRUSH_ITEM_NONE
;
781 if (map
->choose_tries
&& ftotal
<= map
->choose_total_tries
)
782 map
->choose_tries
[ftotal
]++;
786 dprintk("%u %d a: ", ftotal
, left
);
787 for (rep
= outpos
; rep
< endpos
; rep
++) {
788 dprintk(" %d", out
[rep
]);
791 dprintk("%u %d b: ", ftotal
, left
);
792 for (rep
= outpos
; rep
< endpos
; rep
++) {
793 dprintk(" %d", out2
[rep
]);
802 * This takes a chunk of memory and sets it up to be a shiny new
803 * working area for a CRUSH placement computation. It must be called
804 * on any newly allocated memory before passing it in to
805 * crush_do_rule. It may be used repeatedly after that, so long as the
806 * map has not changed. If the map /has/ changed, you must make sure
807 * the working size is no smaller than what was allocated and re-run
808 * crush_init_workspace.
810 * If you do retain the working space between calls to crush, make it
813 void crush_init_workspace(const struct crush_map
*map
, void *v
)
815 struct crush_work
*w
= v
;
819 * We work by moving through the available space and setting
820 * values and pointers as we go.
822 * It's a bit like Forth's use of the 'allot' word since we
823 * set the pointer first and then reserve the space for it to
824 * point to by incrementing the point.
826 v
+= sizeof(struct crush_work
*);
828 v
+= map
->max_buckets
* sizeof(struct crush_work_bucket
*);
829 for (b
= 0; b
< map
->max_buckets
; ++b
) {
830 if (!map
->buckets
[b
])
834 switch (map
->buckets
[b
]->alg
) {
836 v
+= sizeof(struct crush_work_bucket
);
839 w
->work
[b
]->perm_x
= 0;
840 w
->work
[b
]->perm_n
= 0;
841 w
->work
[b
]->perm
= v
;
842 v
+= map
->buckets
[b
]->size
* sizeof(__u32
);
844 BUG_ON(v
- (void *)w
!= map
->working_size
);
848 * crush_do_rule - calculate a mapping with the given input and rule
849 * @map: the crush_map
850 * @ruleno: the rule id
852 * @result: pointer to result vector
853 * @result_max: maximum result size
854 * @weight: weight vector (for map leaves)
855 * @weight_max: size of weight vector
856 * @cwin: pointer to at least crush_work_size() bytes of memory
858 int crush_do_rule(const struct crush_map
*map
,
859 int ruleno
, int x
, int *result
, int result_max
,
860 const __u32
*weight
, int weight_max
,
864 struct crush_work
*cw
= cwin
;
865 int *a
= cwin
+ map
->working_size
;
866 int *b
= a
+ result_max
;
867 int *c
= b
+ result_max
;
874 const struct crush_rule
*rule
;
880 * the original choose_total_tries value was off by one (it
881 * counted "retries" and not "tries"). add one.
883 int choose_tries
= map
->choose_total_tries
+ 1;
884 int choose_leaf_tries
= 0;
886 * the local tries values were counted as "retries", though,
887 * and need no adjustment
889 int choose_local_retries
= map
->choose_local_tries
;
890 int choose_local_fallback_retries
= map
->choose_local_fallback_tries
;
892 int vary_r
= map
->chooseleaf_vary_r
;
893 int stable
= map
->chooseleaf_stable
;
895 if ((__u32
)ruleno
>= map
->max_rules
) {
896 dprintk(" bad ruleno %d\n", ruleno
);
900 rule
= map
->rules
[ruleno
];
903 for (step
= 0; step
< rule
->len
; step
++) {
905 const struct crush_rule_step
*curstep
= &rule
->steps
[step
];
907 switch (curstep
->op
) {
908 case CRUSH_RULE_TAKE
:
909 if ((curstep
->arg1
>= 0 &&
910 curstep
->arg1
< map
->max_devices
) ||
911 (-1-curstep
->arg1
>= 0 &&
912 -1-curstep
->arg1
< map
->max_buckets
&&
913 map
->buckets
[-1-curstep
->arg1
])) {
914 w
[0] = curstep
->arg1
;
917 dprintk(" bad take value %d\n", curstep
->arg1
);
921 case CRUSH_RULE_SET_CHOOSE_TRIES
:
922 if (curstep
->arg1
> 0)
923 choose_tries
= curstep
->arg1
;
926 case CRUSH_RULE_SET_CHOOSELEAF_TRIES
:
927 if (curstep
->arg1
> 0)
928 choose_leaf_tries
= curstep
->arg1
;
931 case CRUSH_RULE_SET_CHOOSE_LOCAL_TRIES
:
932 if (curstep
->arg1
>= 0)
933 choose_local_retries
= curstep
->arg1
;
936 case CRUSH_RULE_SET_CHOOSE_LOCAL_FALLBACK_TRIES
:
937 if (curstep
->arg1
>= 0)
938 choose_local_fallback_retries
= curstep
->arg1
;
941 case CRUSH_RULE_SET_CHOOSELEAF_VARY_R
:
942 if (curstep
->arg1
>= 0)
943 vary_r
= curstep
->arg1
;
946 case CRUSH_RULE_SET_CHOOSELEAF_STABLE
:
947 if (curstep
->arg1
>= 0)
948 stable
= curstep
->arg1
;
951 case CRUSH_RULE_CHOOSELEAF_FIRSTN
:
952 case CRUSH_RULE_CHOOSE_FIRSTN
:
955 case CRUSH_RULE_CHOOSELEAF_INDEP
:
956 case CRUSH_RULE_CHOOSE_INDEP
:
962 CRUSH_RULE_CHOOSELEAF_FIRSTN
||
964 CRUSH_RULE_CHOOSELEAF_INDEP
;
969 for (i
= 0; i
< wsize
; i
++) {
972 * see CRUSH_N, CRUSH_N_MINUS macros.
973 * basically, numrep <= 0 means relative to
974 * the provided result_max
976 numrep
= curstep
->arg1
;
978 numrep
+= result_max
;
983 /* make sure bucket id is valid */
985 if (bno
< 0 || bno
>= map
->max_buckets
) {
986 /* w[i] is probably CRUSH_ITEM_NONE */
987 dprintk(" bad w[i] %d\n", w
[i
]);
992 if (choose_leaf_tries
)
995 else if (map
->chooseleaf_descend_once
)
998 recurse_tries
= choose_tries
;
999 osize
+= crush_choose_firstn(
1010 choose_local_retries
,
1011 choose_local_fallback_retries
,
1018 out_size
= ((numrep
< (result_max
-osize
)) ?
1019 numrep
: (result_max
-osize
));
1025 x
, out_size
, numrep
,
1030 choose_leaf_tries
: 1,
1038 if (recurse_to_leaf
)
1039 /* copy final _leaf_ values to output set */
1040 memcpy(o
, c
, osize
*sizeof(*o
));
1042 /* swap o and w arrays */
1050 case CRUSH_RULE_EMIT
:
1051 for (i
= 0; i
< wsize
&& result_len
< result_max
; i
++) {
1052 result
[result_len
] = w
[i
];
1059 dprintk(" unknown op %d at step %d\n",