x86/PCI: Use generic cacheline sizing instead of per-vendor tests.
[linux-2.6/next.git] / fs / btrfs / extent_map.h
blobab6d74b6e6477dcfb1bf65df494c5749cc8d80d9
1 #ifndef __EXTENTMAP__
2 #define __EXTENTMAP__
4 #include <linux/rbtree.h>
6 #define EXTENT_MAP_LAST_BYTE (u64)-4
7 #define EXTENT_MAP_HOLE (u64)-3
8 #define EXTENT_MAP_INLINE (u64)-2
9 #define EXTENT_MAP_DELALLOC (u64)-1
11 /* bits for the flags field */
12 #define EXTENT_FLAG_PINNED 0 /* this entry not yet on disk, don't free it */
13 #define EXTENT_FLAG_COMPRESSED 1
14 #define EXTENT_FLAG_VACANCY 2 /* no file extent item found */
15 #define EXTENT_FLAG_PREALLOC 3 /* pre-allocated extent */
17 struct extent_map {
18 struct rb_node rb_node;
20 /* all of these are in bytes */
21 u64 start;
22 u64 len;
23 u64 orig_start;
24 u64 block_start;
25 u64 block_len;
26 unsigned long flags;
27 struct block_device *bdev;
28 atomic_t refs;
29 int in_tree;
32 struct extent_map_tree {
33 struct rb_root map;
34 rwlock_t lock;
37 static inline u64 extent_map_end(struct extent_map *em)
39 if (em->start + em->len < em->start)
40 return (u64)-1;
41 return em->start + em->len;
44 static inline u64 extent_map_block_end(struct extent_map *em)
46 if (em->block_start + em->block_len < em->block_start)
47 return (u64)-1;
48 return em->block_start + em->block_len;
51 void extent_map_tree_init(struct extent_map_tree *tree, gfp_t mask);
52 struct extent_map *lookup_extent_mapping(struct extent_map_tree *tree,
53 u64 start, u64 len);
54 int add_extent_mapping(struct extent_map_tree *tree,
55 struct extent_map *em);
56 int remove_extent_mapping(struct extent_map_tree *tree, struct extent_map *em);
58 struct extent_map *alloc_extent_map(gfp_t mask);
59 void free_extent_map(struct extent_map *em);
60 int __init extent_map_init(void);
61 void extent_map_exit(void);
62 int unpin_extent_cache(struct extent_map_tree *tree, u64 start, u64 len);
63 struct extent_map *search_extent_mapping(struct extent_map_tree *tree,
64 u64 start, u64 len);
65 #endif