perf bpf: Move perf_event_output() from stdio.h to bpf.h
[linux/fpc-iii.git] / fs / btrfs / tests / extent-map-tests.c
blobbf15d3a7f20ee54a9a953411b662c4d09a4ea6d5
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (C) 2017 Oracle. All rights reserved.
4 */
6 #include <linux/types.h>
7 #include "btrfs-tests.h"
8 #include "../ctree.h"
10 static void free_extent_map_tree(struct extent_map_tree *em_tree)
12 struct extent_map *em;
13 struct rb_node *node;
15 while (!RB_EMPTY_ROOT(&em_tree->map.rb_root)) {
16 node = rb_first_cached(&em_tree->map);
17 em = rb_entry(node, struct extent_map, rb_node);
18 remove_extent_mapping(em_tree, em);
20 #ifdef CONFIG_BTRFS_DEBUG
21 if (refcount_read(&em->refs) != 1) {
22 test_err(
23 "em leak: em (start 0x%llx len 0x%llx block_start 0x%llx block_len 0x%llx) refs %d",
24 em->start, em->len, em->block_start,
25 em->block_len, refcount_read(&em->refs));
27 refcount_set(&em->refs, 1);
29 #endif
30 free_extent_map(em);
35 * Test scenario:
37 * Suppose that no extent map has been loaded into memory yet, there is a file
38 * extent [0, 16K), followed by another file extent [16K, 20K), two dio reads
39 * are entering btrfs_get_extent() concurrently, t1 is reading [8K, 16K), t2 is
40 * reading [0, 8K)
42 * t1 t2
43 * btrfs_get_extent() btrfs_get_extent()
44 * -> lookup_extent_mapping() ->lookup_extent_mapping()
45 * -> add_extent_mapping(0, 16K)
46 * -> return em
47 * ->add_extent_mapping(0, 16K)
48 * -> #handle -EEXIST
50 static void test_case_1(struct btrfs_fs_info *fs_info,
51 struct extent_map_tree *em_tree)
53 struct extent_map *em;
54 u64 start = 0;
55 u64 len = SZ_8K;
56 int ret;
58 em = alloc_extent_map();
59 if (!em)
60 /* Skip the test on error. */
61 return;
63 /* Add [0, 16K) */
64 em->start = 0;
65 em->len = SZ_16K;
66 em->block_start = 0;
67 em->block_len = SZ_16K;
68 ret = add_extent_mapping(em_tree, em, 0);
69 ASSERT(ret == 0);
70 free_extent_map(em);
72 /* Add [16K, 20K) following [0, 16K) */
73 em = alloc_extent_map();
74 if (!em)
75 goto out;
77 em->start = SZ_16K;
78 em->len = SZ_4K;
79 em->block_start = SZ_32K; /* avoid merging */
80 em->block_len = SZ_4K;
81 ret = add_extent_mapping(em_tree, em, 0);
82 ASSERT(ret == 0);
83 free_extent_map(em);
85 em = alloc_extent_map();
86 if (!em)
87 goto out;
89 /* Add [0, 8K), should return [0, 16K) instead. */
90 em->start = start;
91 em->len = len;
92 em->block_start = start;
93 em->block_len = len;
94 ret = btrfs_add_extent_mapping(fs_info, em_tree, &em, em->start, em->len);
95 if (ret)
96 test_err("case1 [%llu %llu]: ret %d", start, start + len, ret);
97 if (em &&
98 (em->start != 0 || extent_map_end(em) != SZ_16K ||
99 em->block_start != 0 || em->block_len != SZ_16K))
100 test_err(
101 "case1 [%llu %llu]: ret %d return a wrong em (start %llu len %llu block_start %llu block_len %llu",
102 start, start + len, ret, em->start, em->len,
103 em->block_start, em->block_len);
104 free_extent_map(em);
105 out:
106 /* free memory */
107 free_extent_map_tree(em_tree);
111 * Test scenario:
113 * Reading the inline ending up with EEXIST, ie. read an inline
114 * extent and discard page cache and read it again.
116 static void test_case_2(struct btrfs_fs_info *fs_info,
117 struct extent_map_tree *em_tree)
119 struct extent_map *em;
120 int ret;
122 em = alloc_extent_map();
123 if (!em)
124 /* Skip the test on error. */
125 return;
127 /* Add [0, 1K) */
128 em->start = 0;
129 em->len = SZ_1K;
130 em->block_start = EXTENT_MAP_INLINE;
131 em->block_len = (u64)-1;
132 ret = add_extent_mapping(em_tree, em, 0);
133 ASSERT(ret == 0);
134 free_extent_map(em);
136 /* Add [4K, 4K) following [0, 1K) */
137 em = alloc_extent_map();
138 if (!em)
139 goto out;
141 em->start = SZ_4K;
142 em->len = SZ_4K;
143 em->block_start = SZ_4K;
144 em->block_len = SZ_4K;
145 ret = add_extent_mapping(em_tree, em, 0);
146 ASSERT(ret == 0);
147 free_extent_map(em);
149 em = alloc_extent_map();
150 if (!em)
151 goto out;
153 /* Add [0, 1K) */
154 em->start = 0;
155 em->len = SZ_1K;
156 em->block_start = EXTENT_MAP_INLINE;
157 em->block_len = (u64)-1;
158 ret = btrfs_add_extent_mapping(fs_info, em_tree, &em, em->start, em->len);
159 if (ret)
160 test_err("case2 [0 1K]: ret %d", ret);
161 if (em &&
162 (em->start != 0 || extent_map_end(em) != SZ_1K ||
163 em->block_start != EXTENT_MAP_INLINE || em->block_len != (u64)-1))
164 test_err(
165 "case2 [0 1K]: ret %d return a wrong em (start %llu len %llu block_start %llu block_len %llu",
166 ret, em->start, em->len, em->block_start,
167 em->block_len);
168 free_extent_map(em);
169 out:
170 /* free memory */
171 free_extent_map_tree(em_tree);
174 static void __test_case_3(struct btrfs_fs_info *fs_info,
175 struct extent_map_tree *em_tree, u64 start)
177 struct extent_map *em;
178 u64 len = SZ_4K;
179 int ret;
181 em = alloc_extent_map();
182 if (!em)
183 /* Skip this test on error. */
184 return;
186 /* Add [4K, 8K) */
187 em->start = SZ_4K;
188 em->len = SZ_4K;
189 em->block_start = SZ_4K;
190 em->block_len = SZ_4K;
191 ret = add_extent_mapping(em_tree, em, 0);
192 ASSERT(ret == 0);
193 free_extent_map(em);
195 em = alloc_extent_map();
196 if (!em)
197 goto out;
199 /* Add [0, 16K) */
200 em->start = 0;
201 em->len = SZ_16K;
202 em->block_start = 0;
203 em->block_len = SZ_16K;
204 ret = btrfs_add_extent_mapping(fs_info, em_tree, &em, start, len);
205 if (ret)
206 test_err("case3 [0x%llx 0x%llx): ret %d",
207 start, start + len, ret);
209 * Since bytes within em are contiguous, em->block_start is identical to
210 * em->start.
212 if (em &&
213 (start < em->start || start + len > extent_map_end(em) ||
214 em->start != em->block_start || em->len != em->block_len))
215 test_err(
216 "case3 [0x%llx 0x%llx): ret %d em (start 0x%llx len 0x%llx block_start 0x%llx block_len 0x%llx)",
217 start, start + len, ret, em->start, em->len,
218 em->block_start, em->block_len);
219 free_extent_map(em);
220 out:
221 /* free memory */
222 free_extent_map_tree(em_tree);
226 * Test scenario:
228 * Suppose that no extent map has been loaded into memory yet.
229 * There is a file extent [0, 16K), two jobs are running concurrently
230 * against it, t1 is buffered writing to [4K, 8K) and t2 is doing dio
231 * read from [0, 4K) or [8K, 12K) or [12K, 16K).
233 * t1 goes ahead of t2 and adds em [4K, 8K) into tree.
235 * t1 t2
236 * cow_file_range() btrfs_get_extent()
237 * -> lookup_extent_mapping()
238 * -> add_extent_mapping()
239 * -> add_extent_mapping()
241 static void test_case_3(struct btrfs_fs_info *fs_info,
242 struct extent_map_tree *em_tree)
244 __test_case_3(fs_info, em_tree, 0);
245 __test_case_3(fs_info, em_tree, SZ_8K);
246 __test_case_3(fs_info, em_tree, (12 * 1024ULL));
249 static void __test_case_4(struct btrfs_fs_info *fs_info,
250 struct extent_map_tree *em_tree, u64 start)
252 struct extent_map *em;
253 u64 len = SZ_4K;
254 int ret;
256 em = alloc_extent_map();
257 if (!em)
258 /* Skip this test on error. */
259 return;
261 /* Add [0K, 8K) */
262 em->start = 0;
263 em->len = SZ_8K;
264 em->block_start = 0;
265 em->block_len = SZ_8K;
266 ret = add_extent_mapping(em_tree, em, 0);
267 ASSERT(ret == 0);
268 free_extent_map(em);
270 em = alloc_extent_map();
271 if (!em)
272 goto out;
274 /* Add [8K, 24K) */
275 em->start = SZ_8K;
276 em->len = 24 * 1024ULL;
277 em->block_start = SZ_16K; /* avoid merging */
278 em->block_len = 24 * 1024ULL;
279 ret = add_extent_mapping(em_tree, em, 0);
280 ASSERT(ret == 0);
281 free_extent_map(em);
283 em = alloc_extent_map();
284 if (!em)
285 goto out;
286 /* Add [0K, 32K) */
287 em->start = 0;
288 em->len = SZ_32K;
289 em->block_start = 0;
290 em->block_len = SZ_32K;
291 ret = btrfs_add_extent_mapping(fs_info, em_tree, &em, start, len);
292 if (ret)
293 test_err("case4 [0x%llx 0x%llx): ret %d",
294 start, len, ret);
295 if (em &&
296 (start < em->start || start + len > extent_map_end(em)))
297 test_err(
298 "case4 [0x%llx 0x%llx): ret %d, added wrong em (start 0x%llx len 0x%llx block_start 0x%llx block_len 0x%llx)",
299 start, len, ret, em->start, em->len, em->block_start,
300 em->block_len);
301 free_extent_map(em);
302 out:
303 /* free memory */
304 free_extent_map_tree(em_tree);
308 * Test scenario:
310 * Suppose that no extent map has been loaded into memory yet.
311 * There is a file extent [0, 32K), two jobs are running concurrently
312 * against it, t1 is doing dio write to [8K, 32K) and t2 is doing dio
313 * read from [0, 4K) or [4K, 8K).
315 * t1 goes ahead of t2 and splits em [0, 32K) to em [0K, 8K) and [8K 32K).
317 * t1 t2
318 * btrfs_get_blocks_direct() btrfs_get_blocks_direct()
319 * -> btrfs_get_extent() -> btrfs_get_extent()
320 * -> lookup_extent_mapping()
321 * -> add_extent_mapping() -> lookup_extent_mapping()
322 * # load [0, 32K)
323 * -> btrfs_new_extent_direct()
324 * -> btrfs_drop_extent_cache()
325 * # split [0, 32K)
326 * -> add_extent_mapping()
327 * # add [8K, 32K)
328 * -> add_extent_mapping()
329 * # handle -EEXIST when adding
330 * # [0, 32K)
332 static void test_case_4(struct btrfs_fs_info *fs_info,
333 struct extent_map_tree *em_tree)
335 __test_case_4(fs_info, em_tree, 0);
336 __test_case_4(fs_info, em_tree, SZ_4K);
339 int btrfs_test_extent_map(void)
341 struct btrfs_fs_info *fs_info = NULL;
342 struct extent_map_tree *em_tree;
344 test_msg("running extent_map tests");
347 * Note: the fs_info is not set up completely, we only need
348 * fs_info::fsid for the tracepoint.
350 fs_info = btrfs_alloc_dummy_fs_info(PAGE_SIZE, PAGE_SIZE);
351 if (!fs_info) {
352 test_msg("Couldn't allocate dummy fs info");
353 return -ENOMEM;
356 em_tree = kzalloc(sizeof(*em_tree), GFP_KERNEL);
357 if (!em_tree)
358 /* Skip the test on error. */
359 goto out;
361 extent_map_tree_init(em_tree);
363 test_case_1(fs_info, em_tree);
364 test_case_2(fs_info, em_tree);
365 test_case_3(fs_info, em_tree);
366 test_case_4(fs_info, em_tree);
368 kfree(em_tree);
369 out:
370 btrfs_free_dummy_fs_info(fs_info);
372 return 0;