2 #include <linux/slab.h>
3 #include <linux/spinlock.h>
4 #include <linux/hardirq.h>
6 #include "extent_map.h"
9 static struct kmem_cache
*extent_map_cache
;
11 int __init
extent_map_init(void)
13 extent_map_cache
= kmem_cache_create("btrfs_extent_map",
14 sizeof(struct extent_map
), 0,
15 SLAB_RECLAIM_ACCOUNT
| SLAB_MEM_SPREAD
, NULL
);
16 if (!extent_map_cache
)
21 void extent_map_exit(void)
24 kmem_cache_destroy(extent_map_cache
);
28 * extent_map_tree_init - initialize extent map tree
29 * @tree: tree to initialize
31 * Initialize the extent tree @tree. Should be called for each new inode
32 * or other user of the extent_map interface.
34 void extent_map_tree_init(struct extent_map_tree
*tree
)
37 INIT_LIST_HEAD(&tree
->modified_extents
);
38 rwlock_init(&tree
->lock
);
42 * alloc_extent_map - allocate new extent map structure
44 * Allocate a new extent_map structure. The new structure is
45 * returned with a reference count of one and needs to be
46 * freed using free_extent_map()
48 struct extent_map
*alloc_extent_map(void)
50 struct extent_map
*em
;
51 em
= kmem_cache_zalloc(extent_map_cache
, GFP_NOFS
);
54 RB_CLEAR_NODE(&em
->rb_node
);
56 em
->compress_type
= BTRFS_COMPRESS_NONE
;
58 atomic_set(&em
->refs
, 1);
59 INIT_LIST_HEAD(&em
->list
);
64 * free_extent_map - drop reference count of an extent_map
65 * @em: extent map beeing releasead
67 * Drops the reference out on @em by one and free the structure
68 * if the reference count hits zero.
70 void free_extent_map(struct extent_map
*em
)
74 WARN_ON(atomic_read(&em
->refs
) == 0);
75 if (atomic_dec_and_test(&em
->refs
)) {
76 WARN_ON(extent_map_in_tree(em
));
77 WARN_ON(!list_empty(&em
->list
));
78 if (test_bit(EXTENT_FLAG_FS_MAPPING
, &em
->flags
))
80 kmem_cache_free(extent_map_cache
, em
);
84 /* simple helper to do math around the end of an extent, handling wrap */
85 static u64
range_end(u64 start
, u64 len
)
87 if (start
+ len
< start
)
92 static int tree_insert(struct rb_root
*root
, struct extent_map
*em
)
94 struct rb_node
**p
= &root
->rb_node
;
95 struct rb_node
*parent
= NULL
;
96 struct extent_map
*entry
= NULL
;
97 struct rb_node
*orig_parent
= NULL
;
98 u64 end
= range_end(em
->start
, em
->len
);
102 entry
= rb_entry(parent
, struct extent_map
, rb_node
);
104 if (em
->start
< entry
->start
)
106 else if (em
->start
>= extent_map_end(entry
))
112 orig_parent
= parent
;
113 while (parent
&& em
->start
>= extent_map_end(entry
)) {
114 parent
= rb_next(parent
);
115 entry
= rb_entry(parent
, struct extent_map
, rb_node
);
118 if (end
> entry
->start
&& em
->start
< extent_map_end(entry
))
121 parent
= orig_parent
;
122 entry
= rb_entry(parent
, struct extent_map
, rb_node
);
123 while (parent
&& em
->start
< entry
->start
) {
124 parent
= rb_prev(parent
);
125 entry
= rb_entry(parent
, struct extent_map
, rb_node
);
128 if (end
> entry
->start
&& em
->start
< extent_map_end(entry
))
131 rb_link_node(&em
->rb_node
, orig_parent
, p
);
132 rb_insert_color(&em
->rb_node
, root
);
137 * search through the tree for an extent_map with a given offset. If
138 * it can't be found, try to find some neighboring extents
140 static struct rb_node
*__tree_search(struct rb_root
*root
, u64 offset
,
141 struct rb_node
**prev_ret
,
142 struct rb_node
**next_ret
)
144 struct rb_node
*n
= root
->rb_node
;
145 struct rb_node
*prev
= NULL
;
146 struct rb_node
*orig_prev
= NULL
;
147 struct extent_map
*entry
;
148 struct extent_map
*prev_entry
= NULL
;
151 entry
= rb_entry(n
, struct extent_map
, rb_node
);
155 if (offset
< entry
->start
)
157 else if (offset
>= extent_map_end(entry
))
165 while (prev
&& offset
>= extent_map_end(prev_entry
)) {
166 prev
= rb_next(prev
);
167 prev_entry
= rb_entry(prev
, struct extent_map
, rb_node
);
174 prev_entry
= rb_entry(prev
, struct extent_map
, rb_node
);
175 while (prev
&& offset
< prev_entry
->start
) {
176 prev
= rb_prev(prev
);
177 prev_entry
= rb_entry(prev
, struct extent_map
, rb_node
);
184 /* check to see if two extent_map structs are adjacent and safe to merge */
185 static int mergable_maps(struct extent_map
*prev
, struct extent_map
*next
)
187 if (test_bit(EXTENT_FLAG_PINNED
, &prev
->flags
))
191 * don't merge compressed extents, we need to know their
194 if (test_bit(EXTENT_FLAG_COMPRESSED
, &prev
->flags
))
197 if (test_bit(EXTENT_FLAG_LOGGING
, &prev
->flags
) ||
198 test_bit(EXTENT_FLAG_LOGGING
, &next
->flags
))
202 * We don't want to merge stuff that hasn't been written to the log yet
203 * since it may not reflect exactly what is on disk, and that would be
206 if (!list_empty(&prev
->list
) || !list_empty(&next
->list
))
209 if (extent_map_end(prev
) == next
->start
&&
210 prev
->flags
== next
->flags
&&
211 prev
->bdev
== next
->bdev
&&
212 ((next
->block_start
== EXTENT_MAP_HOLE
&&
213 prev
->block_start
== EXTENT_MAP_HOLE
) ||
214 (next
->block_start
== EXTENT_MAP_INLINE
&&
215 prev
->block_start
== EXTENT_MAP_INLINE
) ||
216 (next
->block_start
== EXTENT_MAP_DELALLOC
&&
217 prev
->block_start
== EXTENT_MAP_DELALLOC
) ||
218 (next
->block_start
< EXTENT_MAP_LAST_BYTE
- 1 &&
219 next
->block_start
== extent_map_block_end(prev
)))) {
225 static void try_merge_map(struct extent_map_tree
*tree
, struct extent_map
*em
)
227 struct extent_map
*merge
= NULL
;
230 if (em
->start
!= 0) {
231 rb
= rb_prev(&em
->rb_node
);
233 merge
= rb_entry(rb
, struct extent_map
, rb_node
);
234 if (rb
&& mergable_maps(merge
, em
)) {
235 em
->start
= merge
->start
;
236 em
->orig_start
= merge
->orig_start
;
237 em
->len
+= merge
->len
;
238 em
->block_len
+= merge
->block_len
;
239 em
->block_start
= merge
->block_start
;
240 em
->mod_len
= (em
->mod_len
+ em
->mod_start
) - merge
->mod_start
;
241 em
->mod_start
= merge
->mod_start
;
242 em
->generation
= max(em
->generation
, merge
->generation
);
244 rb_erase(&merge
->rb_node
, &tree
->map
);
245 RB_CLEAR_NODE(&merge
->rb_node
);
246 free_extent_map(merge
);
250 rb
= rb_next(&em
->rb_node
);
252 merge
= rb_entry(rb
, struct extent_map
, rb_node
);
253 if (rb
&& mergable_maps(em
, merge
)) {
254 em
->len
+= merge
->len
;
255 em
->block_len
+= merge
->block_len
;
256 rb_erase(&merge
->rb_node
, &tree
->map
);
257 RB_CLEAR_NODE(&merge
->rb_node
);
258 em
->mod_len
= (merge
->mod_start
+ merge
->mod_len
) - em
->mod_start
;
259 em
->generation
= max(em
->generation
, merge
->generation
);
260 free_extent_map(merge
);
265 * unpin_extent_cache - unpin an extent from the cache
266 * @tree: tree to unpin the extent in
267 * @start: logical offset in the file
268 * @len: length of the extent
269 * @gen: generation that this extent has been modified in
271 * Called after an extent has been written to disk properly. Set the generation
272 * to the generation that actually added the file item to the inode so we know
273 * we need to sync this extent when we call fsync().
275 int unpin_extent_cache(struct extent_map_tree
*tree
, u64 start
, u64 len
,
279 struct extent_map
*em
;
280 bool prealloc
= false;
282 write_lock(&tree
->lock
);
283 em
= lookup_extent_mapping(tree
, start
, len
);
285 WARN_ON(!em
|| em
->start
!= start
);
290 em
->generation
= gen
;
291 clear_bit(EXTENT_FLAG_PINNED
, &em
->flags
);
292 em
->mod_start
= em
->start
;
293 em
->mod_len
= em
->len
;
295 if (test_bit(EXTENT_FLAG_FILLING
, &em
->flags
)) {
297 clear_bit(EXTENT_FLAG_FILLING
, &em
->flags
);
300 try_merge_map(tree
, em
);
303 em
->mod_start
= em
->start
;
304 em
->mod_len
= em
->len
;
309 write_unlock(&tree
->lock
);
314 void clear_em_logging(struct extent_map_tree
*tree
, struct extent_map
*em
)
316 clear_bit(EXTENT_FLAG_LOGGING
, &em
->flags
);
317 if (extent_map_in_tree(em
))
318 try_merge_map(tree
, em
);
321 static inline void setup_extent_mapping(struct extent_map_tree
*tree
,
322 struct extent_map
*em
,
325 atomic_inc(&em
->refs
);
326 em
->mod_start
= em
->start
;
327 em
->mod_len
= em
->len
;
330 list_move(&em
->list
, &tree
->modified_extents
);
332 try_merge_map(tree
, em
);
336 * add_extent_mapping - add new extent map to the extent tree
337 * @tree: tree to insert new map in
340 * Insert @em into @tree or perform a simple forward/backward merge with
341 * existing mappings. The extent_map struct passed in will be inserted
342 * into the tree directly, with an additional reference taken, or a
343 * reference dropped if the merge attempt was successful.
345 int add_extent_mapping(struct extent_map_tree
*tree
,
346 struct extent_map
*em
, int modified
)
350 ret
= tree_insert(&tree
->map
, em
);
354 setup_extent_mapping(tree
, em
, modified
);
359 static struct extent_map
*
360 __lookup_extent_mapping(struct extent_map_tree
*tree
,
361 u64 start
, u64 len
, int strict
)
363 struct extent_map
*em
;
364 struct rb_node
*rb_node
;
365 struct rb_node
*prev
= NULL
;
366 struct rb_node
*next
= NULL
;
367 u64 end
= range_end(start
, len
);
369 rb_node
= __tree_search(&tree
->map
, start
, &prev
, &next
);
379 em
= rb_entry(rb_node
, struct extent_map
, rb_node
);
381 if (strict
&& !(end
> em
->start
&& start
< extent_map_end(em
)))
384 atomic_inc(&em
->refs
);
389 * lookup_extent_mapping - lookup extent_map
390 * @tree: tree to lookup in
391 * @start: byte offset to start the search
392 * @len: length of the lookup range
394 * Find and return the first extent_map struct in @tree that intersects the
395 * [start, len] range. There may be additional objects in the tree that
396 * intersect, so check the object returned carefully to make sure that no
397 * additional lookups are needed.
399 struct extent_map
*lookup_extent_mapping(struct extent_map_tree
*tree
,
402 return __lookup_extent_mapping(tree
, start
, len
, 1);
406 * search_extent_mapping - find a nearby extent map
407 * @tree: tree to lookup in
408 * @start: byte offset to start the search
409 * @len: length of the lookup range
411 * Find and return the first extent_map struct in @tree that intersects the
412 * [start, len] range.
414 * If one can't be found, any nearby extent may be returned
416 struct extent_map
*search_extent_mapping(struct extent_map_tree
*tree
,
419 return __lookup_extent_mapping(tree
, start
, len
, 0);
423 * remove_extent_mapping - removes an extent_map from the extent tree
424 * @tree: extent tree to remove from
425 * @em: extent map beeing removed
427 * Removes @em from @tree. No reference counts are dropped, and no checks
428 * are done to see if the range is in use
430 int remove_extent_mapping(struct extent_map_tree
*tree
, struct extent_map
*em
)
434 WARN_ON(test_bit(EXTENT_FLAG_PINNED
, &em
->flags
));
435 rb_erase(&em
->rb_node
, &tree
->map
);
436 if (!test_bit(EXTENT_FLAG_LOGGING
, &em
->flags
))
437 list_del_init(&em
->list
);
438 RB_CLEAR_NODE(&em
->rb_node
);
442 void replace_extent_mapping(struct extent_map_tree
*tree
,
443 struct extent_map
*cur
,
444 struct extent_map
*new,
447 WARN_ON(test_bit(EXTENT_FLAG_PINNED
, &cur
->flags
));
448 ASSERT(extent_map_in_tree(cur
));
449 if (!test_bit(EXTENT_FLAG_LOGGING
, &cur
->flags
))
450 list_del_init(&cur
->list
);
451 rb_replace_node(&cur
->rb_node
, &new->rb_node
, &tree
->map
);
452 RB_CLEAR_NODE(&cur
->rb_node
);
454 setup_extent_mapping(tree
, new, modified
);