spi-topcliff-pch: add recovery processing in case wait-event timeout
[zen-stable.git] / drivers / md / persistent-data / dm-btree-internal.h
blobd279c768f8f10f8cf22baef4a6978483c82bb56b
1 /*
2 * Copyright (C) 2011 Red Hat, Inc.
4 * This file is released under the GPL.
5 */
7 #ifndef DM_BTREE_INTERNAL_H
8 #define DM_BTREE_INTERNAL_H
10 #include "dm-btree.h"
12 /*----------------------------------------------------------------*/
15 * We'll need 2 accessor functions for n->csum and n->blocknr
16 * to support dm-btree-spine.c in that case.
19 enum node_flags {
20 INTERNAL_NODE = 1,
21 LEAF_NODE = 1 << 1
25 * Every btree node begins with this structure. Make sure it's a multiple
26 * of 8-bytes in size, otherwise the 64bit keys will be mis-aligned.
28 struct node_header {
29 __le32 csum;
30 __le32 flags;
31 __le64 blocknr; /* Block this node is supposed to live in. */
33 __le32 nr_entries;
34 __le32 max_entries;
35 __le32 value_size;
36 __le32 padding;
37 } __packed;
39 struct node {
40 struct node_header header;
41 __le64 keys[0];
42 } __packed;
45 void inc_children(struct dm_transaction_manager *tm, struct node *n,
46 struct dm_btree_value_type *vt);
48 int new_block(struct dm_btree_info *info, struct dm_block **result);
49 int unlock_block(struct dm_btree_info *info, struct dm_block *b);
52 * Spines keep track of the rolling locks. There are 2 variants, read-only
53 * and one that uses shadowing. These are separate structs to allow the
54 * type checker to spot misuse, for example accidentally calling read_lock
55 * on a shadow spine.
57 struct ro_spine {
58 struct dm_btree_info *info;
60 int count;
61 struct dm_block *nodes[2];
64 void init_ro_spine(struct ro_spine *s, struct dm_btree_info *info);
65 int exit_ro_spine(struct ro_spine *s);
66 int ro_step(struct ro_spine *s, dm_block_t new_child);
67 struct node *ro_node(struct ro_spine *s);
69 struct shadow_spine {
70 struct dm_btree_info *info;
72 int count;
73 struct dm_block *nodes[2];
75 dm_block_t root;
78 void init_shadow_spine(struct shadow_spine *s, struct dm_btree_info *info);
79 int exit_shadow_spine(struct shadow_spine *s);
81 int shadow_step(struct shadow_spine *s, dm_block_t b,
82 struct dm_btree_value_type *vt);
85 * The spine must have at least one entry before calling this.
87 struct dm_block *shadow_current(struct shadow_spine *s);
90 * The spine must have at least two entries before calling this.
92 struct dm_block *shadow_parent(struct shadow_spine *s);
94 int shadow_has_parent(struct shadow_spine *s);
96 int shadow_root(struct shadow_spine *s);
99 * Some inlines.
101 static inline __le64 *key_ptr(struct node *n, uint32_t index)
103 return n->keys + index;
106 static inline void *value_base(struct node *n)
108 return &n->keys[le32_to_cpu(n->header.max_entries)];
112 * FIXME: Now that value size is stored in node we don't need the third parm.
114 static inline void *value_ptr(struct node *n, uint32_t index, size_t value_size)
116 BUG_ON(value_size != le32_to_cpu(n->header.value_size));
117 return value_base(n) + (value_size * index);
121 * Assumes the values are suitably-aligned and converts to core format.
123 static inline uint64_t value64(struct node *n, uint32_t index)
125 __le64 *values_le = value_base(n);
127 return le64_to_cpu(values_le[index]);
131 * Searching for a key within a single node.
133 int lower_bound(struct node *n, uint64_t key);
135 extern struct dm_block_validator btree_node_validator;
137 #endif /* DM_BTREE_INTERNAL_H */