page allocator: do not setup zonelist cache when there is only one node
[linux/fpc-iii.git] / fs / btrfs / tree-defrag.c
blobb10eacdb16200e686b3519a46747b706b927761f
1 /*
2 * Copyright (C) 2007 Oracle. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
19 #include <linux/sched.h>
20 #include "ctree.h"
21 #include "disk-io.h"
22 #include "print-tree.h"
23 #include "transaction.h"
24 #include "locking.h"
26 /* defrag all the leaves in a given btree. If cache_only == 1, don't read
27 * things from disk, otherwise read all the leaves and try to get key order to
28 * better reflect disk order
31 int btrfs_defrag_leaves(struct btrfs_trans_handle *trans,
32 struct btrfs_root *root, int cache_only)
34 struct btrfs_path *path = NULL;
35 struct btrfs_key key;
36 int ret = 0;
37 int wret;
38 int level;
39 int orig_level;
40 int is_extent = 0;
41 int next_key_ret = 0;
42 u64 last_ret = 0;
43 u64 min_trans = 0;
45 if (cache_only)
46 goto out;
48 if (root->fs_info->extent_root == root) {
50 * there's recursion here right now in the tree locking,
51 * we can't defrag the extent root without deadlock
53 goto out;
56 if (root->ref_cows == 0 && !is_extent)
57 goto out;
59 if (btrfs_test_opt(root, SSD))
60 goto out;
62 path = btrfs_alloc_path();
63 if (!path)
64 return -ENOMEM;
66 level = btrfs_header_level(root->node);
67 orig_level = level;
69 if (level == 0)
70 goto out;
72 if (root->defrag_progress.objectid == 0) {
73 struct extent_buffer *root_node;
74 u32 nritems;
76 root_node = btrfs_lock_root_node(root);
77 btrfs_set_lock_blocking(root_node);
78 nritems = btrfs_header_nritems(root_node);
79 root->defrag_max.objectid = 0;
80 /* from above we know this is not a leaf */
81 btrfs_node_key_to_cpu(root_node, &root->defrag_max,
82 nritems - 1);
83 btrfs_tree_unlock(root_node);
84 free_extent_buffer(root_node);
85 memset(&key, 0, sizeof(key));
86 } else {
87 memcpy(&key, &root->defrag_progress, sizeof(key));
90 path->keep_locks = 1;
91 if (cache_only)
92 min_trans = root->defrag_trans_start;
94 ret = btrfs_search_forward(root, &key, NULL, path,
95 cache_only, min_trans);
96 if (ret < 0)
97 goto out;
98 if (ret > 0) {
99 ret = 0;
100 goto out;
102 btrfs_release_path(root, path);
103 wret = btrfs_search_slot(trans, root, &key, path, 0, 1);
105 if (wret < 0) {
106 ret = wret;
107 goto out;
109 if (!path->nodes[1]) {
110 ret = 0;
111 goto out;
113 path->slots[1] = btrfs_header_nritems(path->nodes[1]);
114 next_key_ret = btrfs_find_next_key(root, path, &key, 1, cache_only,
115 min_trans);
116 ret = btrfs_realloc_node(trans, root,
117 path->nodes[1], 0,
118 cache_only, &last_ret,
119 &root->defrag_progress);
120 WARN_ON(ret && ret != -EAGAIN);
121 if (next_key_ret == 0) {
122 memcpy(&root->defrag_progress, &key, sizeof(key));
123 ret = -EAGAIN;
126 btrfs_release_path(root, path);
127 out:
128 if (path)
129 btrfs_free_path(path);
130 if (ret == -EAGAIN) {
131 if (root->defrag_max.objectid > root->defrag_progress.objectid)
132 goto done;
133 if (root->defrag_max.type > root->defrag_progress.type)
134 goto done;
135 if (root->defrag_max.offset > root->defrag_progress.offset)
136 goto done;
137 ret = 0;
139 done:
140 if (ret != -EAGAIN) {
141 memset(&root->defrag_progress, 0,
142 sizeof(root->defrag_progress));
143 root->defrag_trans_start = trans->transid;
145 return ret;