2 * Ceph - scalable distributed file system
4 * Copyright (C) 2015 Intel Corporation All Rights Reserved
6 * This is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License version 2.1, as published by the Free Software
9 * Foundation. See file COPYING.
14 # include <linux/string.h>
15 # include <linux/slab.h>
16 # include <linux/bug.h>
17 # include <linux/kernel.h>
18 # include <linux/crush/crush.h>
19 # include <linux/crush/hash.h>
20 # include <linux/crush/mapper.h>
22 # include "crush_compat.h"
27 #include "crush_ln_table.h"
29 #define dprintk(args...) /* printf(args) */
32 * Implement the core CRUSH mapping algorithm.
36 * crush_find_rule - find a crush_rule id for a given ruleset, type, and size.
38 * @ruleset: the storage ruleset id (user defined)
39 * @type: storage ruleset type (user defined)
40 * @size: output set size
42 int crush_find_rule(const struct crush_map
*map
, int ruleset
, int type
, int size
)
46 for (i
= 0; i
< map
->max_rules
; i
++) {
48 map
->rules
[i
]->mask
.ruleset
== ruleset
&&
49 map
->rules
[i
]->mask
.type
== type
&&
50 map
->rules
[i
]->mask
.min_size
<= size
&&
51 map
->rules
[i
]->mask
.max_size
>= size
)
58 * bucket choose methods
60 * For each bucket algorithm, we have a "choose" method that, given a
61 * crush input @x and replica position (usually, position in output set) @r,
62 * will produce an item in the bucket.
66 * Choose based on a random permutation of the bucket.
68 * We used to use some prime number arithmetic to do this, but it
69 * wasn't very random, and had some other bad behaviors. Instead, we
70 * calculate an actual random permutation of the bucket members.
71 * Since this is expensive, we optimize for the r=0 case, which
72 * captures the vast majority of calls.
74 static int bucket_perm_choose(const struct crush_bucket
*bucket
,
75 struct crush_work_bucket
*work
,
78 unsigned int pr
= r
% bucket
->size
;
81 /* start a new permutation if @x has changed */
82 if (work
->perm_x
!= (__u32
)x
|| work
->perm_n
== 0) {
83 dprintk("bucket %d new x=%d\n", bucket
->id
, x
);
86 /* optimize common r=0 case */
88 s
= crush_hash32_3(bucket
->hash
, x
, bucket
->id
, 0) %
91 work
->perm_n
= 0xffff; /* magic value, see below */
95 for (i
= 0; i
< bucket
->size
; i
++)
98 } else if (work
->perm_n
== 0xffff) {
99 /* clean up after the r=0 case above */
100 for (i
= 1; i
< bucket
->size
; i
++)
102 work
->perm
[work
->perm
[0]] = 0;
106 /* calculate permutation up to pr */
107 for (i
= 0; i
< work
->perm_n
; i
++)
108 dprintk(" perm_choose have %d: %d\n", i
, work
->perm
[i
]);
109 while (work
->perm_n
<= pr
) {
110 unsigned int p
= work
->perm_n
;
111 /* no point in swapping the final entry */
112 if (p
< bucket
->size
- 1) {
113 i
= crush_hash32_3(bucket
->hash
, x
, bucket
->id
, p
) %
116 unsigned int t
= work
->perm
[p
+ i
];
117 work
->perm
[p
+ i
] = work
->perm
[p
];
120 dprintk(" perm_choose swap %d with %d\n", p
, p
+i
);
124 for (i
= 0; i
< bucket
->size
; i
++)
125 dprintk(" perm_choose %d: %d\n", i
, work
->perm
[i
]);
129 dprintk(" perm_choose %d sz=%d x=%d r=%d (%d) s=%d\n", bucket
->id
,
130 bucket
->size
, x
, r
, pr
, s
);
131 return bucket
->items
[s
];
135 static int bucket_uniform_choose(const struct crush_bucket_uniform
*bucket
,
136 struct crush_work_bucket
*work
, int x
, int r
)
138 return bucket_perm_choose(&bucket
->h
, work
, x
, r
);
142 static int bucket_list_choose(const struct crush_bucket_list
*bucket
,
147 for (i
= bucket
->h
.size
-1; i
>= 0; i
--) {
148 __u64 w
= crush_hash32_4(bucket
->h
.hash
, x
, bucket
->h
.items
[i
],
151 dprintk("list_choose i=%d x=%d r=%d item %d weight %x "
153 i
, x
, r
, bucket
->h
.items
[i
], bucket
->item_weights
[i
],
154 bucket
->sum_weights
[i
], w
);
155 w
*= bucket
->sum_weights
[i
];
157 /*dprintk(" scaled %llx\n", w);*/
158 if (w
< bucket
->item_weights
[i
]) {
159 return bucket
->h
.items
[i
];
163 dprintk("bad list sums for bucket %d\n", bucket
->h
.id
);
164 return bucket
->h
.items
[0];
169 static int height(int n
)
172 while ((n
& 1) == 0) {
179 static int left(int x
)
182 return x
- (1 << (h
-1));
185 static int right(int x
)
188 return x
+ (1 << (h
-1));
191 static int terminal(int x
)
196 static int bucket_tree_choose(const struct crush_bucket_tree
*bucket
,
204 n
= bucket
->num_nodes
>> 1;
206 while (!terminal(n
)) {
208 /* pick point in [0, w) */
209 w
= bucket
->node_weights
[n
];
210 t
= (__u64
)crush_hash32_4(bucket
->h
.hash
, x
, n
, r
,
211 bucket
->h
.id
) * (__u64
)w
;
214 /* descend to the left or right? */
216 if (t
< bucket
->node_weights
[l
])
222 return bucket
->h
.items
[n
>> 1];
228 static int bucket_straw_choose(const struct crush_bucket_straw
*bucket
,
236 for (i
= 0; i
< bucket
->h
.size
; i
++) {
237 draw
= crush_hash32_3(bucket
->h
.hash
, x
, bucket
->h
.items
[i
], r
);
239 draw
*= bucket
->straws
[i
];
240 if (i
== 0 || draw
> high_draw
) {
245 return bucket
->h
.items
[high
];
248 /* compute 2^44*log2(input+1) */
249 static __u64
crush_ln(unsigned int xin
)
251 unsigned int x
= xin
;
252 int iexpon
, index1
, index2
;
253 __u64 RH
, LH
, LL
, xl64
, result
;
257 /* normalize input */
261 * figure out number of bits we need to shift and
262 * do it in one step instead of iteratively
264 if (!(x
& 0x18000)) {
265 int bits
= __builtin_clz(x
& 0x1FFFF) - 16;
270 index1
= (x
>> 8) << 1;
271 /* RH ~ 2^56/index1 */
272 RH
= __RH_LH_tbl
[index1
- 256];
273 /* LH ~ 2^48 * log2(index1/256) */
274 LH
= __RH_LH_tbl
[index1
+ 1 - 256];
276 /* RH*x ~ 2^48 * (2^15 + xf), xf<2^8 */
277 xl64
= (__s64
)x
* RH
;
281 result
<<= (12 + 32);
283 index2
= xl64
& 0xff;
284 /* LL ~ 2^48*log2(1.0+index2/2^15) */
285 LL
= __LL_tbl
[index2
];
289 LH
>>= (48 - 12 - 32);
299 * for reference, see:
301 * https://en.wikipedia.org/wiki/Exponential_distribution#Distribution_of_the_minimum_of_exponential_random_variables
305 static __u32
*get_choose_arg_weights(const struct crush_bucket_straw2
*bucket
,
306 const struct crush_choose_arg
*arg
,
309 if (!arg
|| !arg
->weight_set
)
310 return bucket
->item_weights
;
312 if (position
>= arg
->weight_set_size
)
313 position
= arg
->weight_set_size
- 1;
314 return arg
->weight_set
[position
].weights
;
317 static __s32
*get_choose_arg_ids(const struct crush_bucket_straw2
*bucket
,
318 const struct crush_choose_arg
*arg
)
320 if (!arg
|| !arg
->ids
)
321 return bucket
->h
.items
;
326 static int bucket_straw2_choose(const struct crush_bucket_straw2
*bucket
,
328 const struct crush_choose_arg
*arg
,
331 unsigned int i
, high
= 0;
333 __s64 ln
, draw
, high_draw
= 0;
334 __u32
*weights
= get_choose_arg_weights(bucket
, arg
, position
);
335 __s32
*ids
= get_choose_arg_ids(bucket
, arg
);
337 for (i
= 0; i
< bucket
->h
.size
; i
++) {
338 dprintk("weight 0x%x item %d\n", weights
[i
], ids
[i
]);
340 u
= crush_hash32_3(bucket
->h
.hash
, x
, ids
[i
], r
);
344 * for some reason slightly less than 0x10000 produces
345 * a slightly more accurate distribution... probably a
348 * the natural log lookup table maps [0,0xffff]
349 * (corresponding to real numbers [1/0x10000, 1] to
350 * [0, 0xffffffffffff] (corresponding to real numbers
353 ln
= crush_ln(u
) - 0x1000000000000ll
;
356 * divide by 16.16 fixed-point weight. note
357 * that the ln value is negative, so a larger
358 * weight means a larger (less negative) value
361 draw
= div64_s64(ln
, weights
[i
]);
366 if (i
== 0 || draw
> high_draw
) {
372 return bucket
->h
.items
[high
];
376 static int crush_bucket_choose(const struct crush_bucket
*in
,
377 struct crush_work_bucket
*work
,
379 const struct crush_choose_arg
*arg
,
382 dprintk(" crush_bucket_choose %d x=%d r=%d\n", in
->id
, x
, r
);
383 BUG_ON(in
->size
== 0);
385 case CRUSH_BUCKET_UNIFORM
:
386 return bucket_uniform_choose(
387 (const struct crush_bucket_uniform
*)in
,
389 case CRUSH_BUCKET_LIST
:
390 return bucket_list_choose((const struct crush_bucket_list
*)in
,
392 case CRUSH_BUCKET_TREE
:
393 return bucket_tree_choose((const struct crush_bucket_tree
*)in
,
395 case CRUSH_BUCKET_STRAW
:
396 return bucket_straw_choose(
397 (const struct crush_bucket_straw
*)in
,
399 case CRUSH_BUCKET_STRAW2
:
400 return bucket_straw2_choose(
401 (const struct crush_bucket_straw2
*)in
,
402 x
, r
, arg
, position
);
404 dprintk("unknown bucket %d alg %d\n", in
->id
, in
->alg
);
410 * true if device is marked "out" (failed, fully offloaded)
413 static int is_out(const struct crush_map
*map
,
414 const __u32
*weight
, int weight_max
,
417 if (item
>= weight_max
)
419 if (weight
[item
] >= 0x10000)
421 if (weight
[item
] == 0)
423 if ((crush_hash32_2(CRUSH_HASH_RJENKINS1
, x
, item
) & 0xffff)
430 * crush_choose_firstn - choose numrep distinct items of given type
431 * @map: the crush_map
432 * @work: working space initialized by crush_init_workspace()
433 * @bucket: the bucket we are choose an item from
434 * @weight: weight vector (for map leaves)
435 * @weight_max: size of weight vector
436 * @x: crush input value
437 * @numrep: the number of items to choose
438 * @type: the type of item to choose
439 * @out: pointer to output vector
440 * @outpos: our position in that vector
441 * @out_size: size of the out vector
442 * @tries: number of attempts to make
443 * @recurse_tries: number of attempts to have recursive chooseleaf make
444 * @local_retries: localized retries
445 * @local_fallback_retries: localized fallback retries
446 * @recurse_to_leaf: true if we want one device under each item of given type (chooseleaf instead of choose)
447 * @stable: stable mode starts rep=0 in the recursive call for all replicas
448 * @vary_r: pass r to recursive calls
449 * @out2: second output vector for leaf items (if @recurse_to_leaf)
450 * @parent_r: r value passed from the parent
451 * @choose_args: weights and ids for each known bucket
453 static int crush_choose_firstn(const struct crush_map
*map
,
454 struct crush_work
*work
,
455 const struct crush_bucket
*bucket
,
456 const __u32
*weight
, int weight_max
,
457 int x
, int numrep
, int type
,
458 int *out
, int outpos
,
461 unsigned int recurse_tries
,
462 unsigned int local_retries
,
463 unsigned int local_fallback_retries
,
469 const struct crush_choose_arg
*choose_args
)
472 unsigned int ftotal
, flocal
;
473 int retry_descent
, retry_bucket
, skip_rep
;
474 const struct crush_bucket
*in
= bucket
;
480 int count
= out_size
;
482 dprintk("CHOOSE%s bucket %d x %d outpos %d numrep %d tries %d recurse_tries %d local_retries %d local_fallback_retries %d parent_r %d stable %d\n",
483 recurse_to_leaf
? "_LEAF" : "",
484 bucket
->id
, x
, outpos
, numrep
,
485 tries
, recurse_tries
, local_retries
, local_fallback_retries
,
488 for (rep
= stable
? 0 : outpos
; rep
< numrep
&& count
> 0 ; rep
++) {
489 /* keep trying until we get a non-out, non-colliding item */
494 in
= bucket
; /* initial bucket */
496 /* choose through intervening buckets */
502 /* r' = r + f_total */
510 if (local_fallback_retries
> 0 &&
511 flocal
>= (in
->size
>>1) &&
512 flocal
> local_fallback_retries
)
513 item
= bucket_perm_choose(
514 in
, work
->work
[-1-in
->id
],
517 item
= crush_bucket_choose(
518 in
, work
->work
[-1-in
->id
],
521 &choose_args
[-1-in
->id
] : NULL
),
523 if (item
>= map
->max_devices
) {
524 dprintk(" bad item %d\n", item
);
531 itemtype
= map
->buckets
[-1-item
]->type
;
534 dprintk(" item %d type %d\n", item
, itemtype
);
537 if (itemtype
!= type
) {
539 (-1-item
) >= map
->max_buckets
) {
540 dprintk(" bad item type %d\n", type
);
544 in
= map
->buckets
[-1-item
];
550 for (i
= 0; i
< outpos
; i
++) {
551 if (out
[i
] == item
) {
558 if (!collide
&& recurse_to_leaf
) {
562 sub_r
= r
>> (vary_r
-1);
565 if (crush_choose_firstn(
568 map
->buckets
[-1-item
],
570 x
, stable
? 1 : outpos
+1, 0,
574 local_fallback_retries
,
580 choose_args
) <= outpos
)
581 /* didn't get leaf */
584 /* we already have a leaf! */
589 if (!reject
&& !collide
) {
592 reject
= is_out(map
, weight
,
598 if (reject
|| collide
) {
602 if (collide
&& flocal
<= local_retries
)
603 /* retry locally a few times */
605 else if (local_fallback_retries
> 0 &&
606 flocal
<= in
->size
+ local_fallback_retries
)
607 /* exhaustive bucket search */
609 else if (ftotal
< tries
)
610 /* then retry descent */
615 dprintk(" reject %d collide %d "
616 "ftotal %u flocal %u\n",
617 reject
, collide
, ftotal
,
620 } while (retry_bucket
);
621 } while (retry_descent
);
624 dprintk("skip rep\n");
628 dprintk("CHOOSE got %d\n", item
);
633 if (map
->choose_tries
&& ftotal
<= map
->choose_total_tries
)
634 map
->choose_tries
[ftotal
]++;
638 dprintk("CHOOSE returns %d\n", outpos
);
644 * crush_choose_indep: alternative breadth-first positionally stable mapping
646 static void crush_choose_indep(const struct crush_map
*map
,
647 struct crush_work
*work
,
648 const struct crush_bucket
*bucket
,
649 const __u32
*weight
, int weight_max
,
650 int x
, int left
, int numrep
, int type
,
651 int *out
, int outpos
,
653 unsigned int recurse_tries
,
657 const struct crush_choose_arg
*choose_args
)
659 const struct crush_bucket
*in
= bucket
;
660 int endpos
= outpos
+ left
;
669 dprintk("CHOOSE%s INDEP bucket %d x %d outpos %d numrep %d\n", recurse_to_leaf
? "_LEAF" : "",
670 bucket
->id
, x
, outpos
, numrep
);
672 /* initially my result is undefined */
673 for (rep
= outpos
; rep
< endpos
; rep
++) {
674 out
[rep
] = CRUSH_ITEM_UNDEF
;
676 out2
[rep
] = CRUSH_ITEM_UNDEF
;
679 for (ftotal
= 0; left
> 0 && ftotal
< tries
; ftotal
++) {
681 if (out2
&& ftotal
) {
682 dprintk("%u %d a: ", ftotal
, left
);
683 for (rep
= outpos
; rep
< endpos
; rep
++) {
684 dprintk(" %d", out
[rep
]);
687 dprintk("%u %d b: ", ftotal
, left
);
688 for (rep
= outpos
; rep
< endpos
; rep
++) {
689 dprintk(" %d", out2
[rep
]);
694 for (rep
= outpos
; rep
< endpos
; rep
++) {
695 if (out
[rep
] != CRUSH_ITEM_UNDEF
)
698 in
= bucket
; /* initial bucket */
700 /* choose through intervening buckets */
702 /* note: we base the choice on the position
703 * even in the nested call. that means that
704 * if the first layer chooses the same bucket
705 * in a different position, we will tend to
706 * choose a different item in that bucket.
707 * this will involve more devices in data
708 * movement and tend to distribute the load.
713 if (in
->alg
== CRUSH_BUCKET_UNIFORM
&&
714 in
->size
% numrep
== 0)
715 /* r'=r+(n+1)*f_total */
716 r
+= (numrep
+1) * ftotal
;
718 /* r' = r + n*f_total */
719 r
+= numrep
* ftotal
;
723 dprintk(" empty bucket\n");
727 item
= crush_bucket_choose(
728 in
, work
->work
[-1-in
->id
],
731 &choose_args
[-1-in
->id
] : NULL
),
733 if (item
>= map
->max_devices
) {
734 dprintk(" bad item %d\n", item
);
735 out
[rep
] = CRUSH_ITEM_NONE
;
737 out2
[rep
] = CRUSH_ITEM_NONE
;
744 itemtype
= map
->buckets
[-1-item
]->type
;
747 dprintk(" item %d type %d\n", item
, itemtype
);
750 if (itemtype
!= type
) {
752 (-1-item
) >= map
->max_buckets
) {
753 dprintk(" bad item type %d\n", type
);
754 out
[rep
] = CRUSH_ITEM_NONE
;
761 in
= map
->buckets
[-1-item
];
767 for (i
= outpos
; i
< endpos
; i
++) {
768 if (out
[i
] == item
) {
776 if (recurse_to_leaf
) {
781 map
->buckets
[-1-item
],
788 if (out2
[rep
] == CRUSH_ITEM_NONE
) {
789 /* placed nothing; no leaf */
793 /* we already have a leaf! */
800 is_out(map
, weight
, weight_max
, item
, x
))
810 for (rep
= outpos
; rep
< endpos
; rep
++) {
811 if (out
[rep
] == CRUSH_ITEM_UNDEF
) {
812 out
[rep
] = CRUSH_ITEM_NONE
;
814 if (out2
&& out2
[rep
] == CRUSH_ITEM_UNDEF
) {
815 out2
[rep
] = CRUSH_ITEM_NONE
;
819 if (map
->choose_tries
&& ftotal
<= map
->choose_total_tries
)
820 map
->choose_tries
[ftotal
]++;
824 dprintk("%u %d a: ", ftotal
, left
);
825 for (rep
= outpos
; rep
< endpos
; rep
++) {
826 dprintk(" %d", out
[rep
]);
829 dprintk("%u %d b: ", ftotal
, left
);
830 for (rep
= outpos
; rep
< endpos
; rep
++) {
831 dprintk(" %d", out2
[rep
]);
840 * This takes a chunk of memory and sets it up to be a shiny new
841 * working area for a CRUSH placement computation. It must be called
842 * on any newly allocated memory before passing it in to
843 * crush_do_rule. It may be used repeatedly after that, so long as the
844 * map has not changed. If the map /has/ changed, you must make sure
845 * the working size is no smaller than what was allocated and re-run
846 * crush_init_workspace.
848 * If you do retain the working space between calls to crush, make it
851 void crush_init_workspace(const struct crush_map
*map
, void *v
)
853 struct crush_work
*w
= v
;
857 * We work by moving through the available space and setting
858 * values and pointers as we go.
860 * It's a bit like Forth's use of the 'allot' word since we
861 * set the pointer first and then reserve the space for it to
862 * point to by incrementing the point.
864 v
+= sizeof(struct crush_work
);
866 v
+= map
->max_buckets
* sizeof(struct crush_work_bucket
*);
867 for (b
= 0; b
< map
->max_buckets
; ++b
) {
868 if (!map
->buckets
[b
])
872 switch (map
->buckets
[b
]->alg
) {
874 v
+= sizeof(struct crush_work_bucket
);
877 w
->work
[b
]->perm_x
= 0;
878 w
->work
[b
]->perm_n
= 0;
879 w
->work
[b
]->perm
= v
;
880 v
+= map
->buckets
[b
]->size
* sizeof(__u32
);
882 BUG_ON(v
- (void *)w
!= map
->working_size
);
886 * crush_do_rule - calculate a mapping with the given input and rule
887 * @map: the crush_map
888 * @ruleno: the rule id
890 * @result: pointer to result vector
891 * @result_max: maximum result size
892 * @weight: weight vector (for map leaves)
893 * @weight_max: size of weight vector
894 * @cwin: pointer to at least crush_work_size() bytes of memory
895 * @choose_args: weights and ids for each known bucket
897 int crush_do_rule(const struct crush_map
*map
,
898 int ruleno
, int x
, int *result
, int result_max
,
899 const __u32
*weight
, int weight_max
,
900 void *cwin
, const struct crush_choose_arg
*choose_args
)
903 struct crush_work
*cw
= cwin
;
904 int *a
= cwin
+ map
->working_size
;
905 int *b
= a
+ result_max
;
906 int *c
= b
+ result_max
;
912 const struct crush_rule
*rule
;
918 * the original choose_total_tries value was off by one (it
919 * counted "retries" and not "tries"). add one.
921 int choose_tries
= map
->choose_total_tries
+ 1;
922 int choose_leaf_tries
= 0;
924 * the local tries values were counted as "retries", though,
925 * and need no adjustment
927 int choose_local_retries
= map
->choose_local_tries
;
928 int choose_local_fallback_retries
= map
->choose_local_fallback_tries
;
930 int vary_r
= map
->chooseleaf_vary_r
;
931 int stable
= map
->chooseleaf_stable
;
933 if ((__u32
)ruleno
>= map
->max_rules
) {
934 dprintk(" bad ruleno %d\n", ruleno
);
938 rule
= map
->rules
[ruleno
];
941 for (step
= 0; step
< rule
->len
; step
++) {
943 const struct crush_rule_step
*curstep
= &rule
->steps
[step
];
945 switch (curstep
->op
) {
946 case CRUSH_RULE_TAKE
:
947 if ((curstep
->arg1
>= 0 &&
948 curstep
->arg1
< map
->max_devices
) ||
949 (-1-curstep
->arg1
>= 0 &&
950 -1-curstep
->arg1
< map
->max_buckets
&&
951 map
->buckets
[-1-curstep
->arg1
])) {
952 w
[0] = curstep
->arg1
;
955 dprintk(" bad take value %d\n", curstep
->arg1
);
959 case CRUSH_RULE_SET_CHOOSE_TRIES
:
960 if (curstep
->arg1
> 0)
961 choose_tries
= curstep
->arg1
;
964 case CRUSH_RULE_SET_CHOOSELEAF_TRIES
:
965 if (curstep
->arg1
> 0)
966 choose_leaf_tries
= curstep
->arg1
;
969 case CRUSH_RULE_SET_CHOOSE_LOCAL_TRIES
:
970 if (curstep
->arg1
>= 0)
971 choose_local_retries
= curstep
->arg1
;
974 case CRUSH_RULE_SET_CHOOSE_LOCAL_FALLBACK_TRIES
:
975 if (curstep
->arg1
>= 0)
976 choose_local_fallback_retries
= curstep
->arg1
;
979 case CRUSH_RULE_SET_CHOOSELEAF_VARY_R
:
980 if (curstep
->arg1
>= 0)
981 vary_r
= curstep
->arg1
;
984 case CRUSH_RULE_SET_CHOOSELEAF_STABLE
:
985 if (curstep
->arg1
>= 0)
986 stable
= curstep
->arg1
;
989 case CRUSH_RULE_CHOOSELEAF_FIRSTN
:
990 case CRUSH_RULE_CHOOSE_FIRSTN
:
993 case CRUSH_RULE_CHOOSELEAF_INDEP
:
994 case CRUSH_RULE_CHOOSE_INDEP
:
1000 CRUSH_RULE_CHOOSELEAF_FIRSTN
||
1002 CRUSH_RULE_CHOOSELEAF_INDEP
;
1007 for (i
= 0; i
< wsize
; i
++) {
1009 numrep
= curstep
->arg1
;
1011 numrep
+= result_max
;
1016 /* make sure bucket id is valid */
1018 if (bno
< 0 || bno
>= map
->max_buckets
) {
1019 /* w[i] is probably CRUSH_ITEM_NONE */
1020 dprintk(" bad w[i] %d\n", w
[i
]);
1025 if (choose_leaf_tries
)
1028 else if (map
->chooseleaf_descend_once
)
1031 recurse_tries
= choose_tries
;
1032 osize
+= crush_choose_firstn(
1043 choose_local_retries
,
1044 choose_local_fallback_retries
,
1052 out_size
= ((numrep
< (result_max
-osize
)) ?
1053 numrep
: (result_max
-osize
));
1059 x
, out_size
, numrep
,
1064 choose_leaf_tries
: 1,
1073 if (recurse_to_leaf
)
1074 /* copy final _leaf_ values to output set */
1075 memcpy(o
, c
, osize
*sizeof(*o
));
1077 /* swap o and w arrays */
1083 case CRUSH_RULE_EMIT
:
1084 for (i
= 0; i
< wsize
&& result_len
< result_max
; i
++) {
1085 result
[result_len
] = w
[i
];
1092 dprintk(" unknown op %d at step %d\n",