gpio: rcar: Fix runtime PM imbalance on error
[linux/fpc-iii.git] / fs / btrfs / uuid-tree.c
blob76671a6bcb61c903be6ad378b521fd666dfbd1c1
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (C) STRATO AG 2013. All rights reserved.
4 */
6 #include <linux/uuid.h>
7 #include <asm/unaligned.h>
8 #include "ctree.h"
9 #include "transaction.h"
10 #include "disk-io.h"
11 #include "print-tree.h"
14 static void btrfs_uuid_to_key(u8 *uuid, u8 type, struct btrfs_key *key)
16 key->type = type;
17 key->objectid = get_unaligned_le64(uuid);
18 key->offset = get_unaligned_le64(uuid + sizeof(u64));
21 /* return -ENOENT for !found, < 0 for errors, or 0 if an item was found */
22 static int btrfs_uuid_tree_lookup(struct btrfs_root *uuid_root, u8 *uuid,
23 u8 type, u64 subid)
25 int ret;
26 struct btrfs_path *path = NULL;
27 struct extent_buffer *eb;
28 int slot;
29 u32 item_size;
30 unsigned long offset;
31 struct btrfs_key key;
33 if (WARN_ON_ONCE(!uuid_root)) {
34 ret = -ENOENT;
35 goto out;
38 path = btrfs_alloc_path();
39 if (!path) {
40 ret = -ENOMEM;
41 goto out;
44 btrfs_uuid_to_key(uuid, type, &key);
45 ret = btrfs_search_slot(NULL, uuid_root, &key, path, 0, 0);
46 if (ret < 0) {
47 goto out;
48 } else if (ret > 0) {
49 ret = -ENOENT;
50 goto out;
53 eb = path->nodes[0];
54 slot = path->slots[0];
55 item_size = btrfs_item_size_nr(eb, slot);
56 offset = btrfs_item_ptr_offset(eb, slot);
57 ret = -ENOENT;
59 if (!IS_ALIGNED(item_size, sizeof(u64))) {
60 btrfs_warn(uuid_root->fs_info,
61 "uuid item with illegal size %lu!",
62 (unsigned long)item_size);
63 goto out;
65 while (item_size) {
66 __le64 data;
68 read_extent_buffer(eb, &data, offset, sizeof(data));
69 if (le64_to_cpu(data) == subid) {
70 ret = 0;
71 break;
73 offset += sizeof(data);
74 item_size -= sizeof(data);
77 out:
78 btrfs_free_path(path);
79 return ret;
82 int btrfs_uuid_tree_add(struct btrfs_trans_handle *trans, u8 *uuid, u8 type,
83 u64 subid_cpu)
85 struct btrfs_fs_info *fs_info = trans->fs_info;
86 struct btrfs_root *uuid_root = fs_info->uuid_root;
87 int ret;
88 struct btrfs_path *path = NULL;
89 struct btrfs_key key;
90 struct extent_buffer *eb;
91 int slot;
92 unsigned long offset;
93 __le64 subid_le;
95 ret = btrfs_uuid_tree_lookup(uuid_root, uuid, type, subid_cpu);
96 if (ret != -ENOENT)
97 return ret;
99 if (WARN_ON_ONCE(!uuid_root)) {
100 ret = -EINVAL;
101 goto out;
104 btrfs_uuid_to_key(uuid, type, &key);
106 path = btrfs_alloc_path();
107 if (!path) {
108 ret = -ENOMEM;
109 goto out;
112 ret = btrfs_insert_empty_item(trans, uuid_root, path, &key,
113 sizeof(subid_le));
114 if (ret >= 0) {
115 /* Add an item for the type for the first time */
116 eb = path->nodes[0];
117 slot = path->slots[0];
118 offset = btrfs_item_ptr_offset(eb, slot);
119 } else if (ret == -EEXIST) {
121 * An item with that type already exists.
122 * Extend the item and store the new subid at the end.
124 btrfs_extend_item(path, sizeof(subid_le));
125 eb = path->nodes[0];
126 slot = path->slots[0];
127 offset = btrfs_item_ptr_offset(eb, slot);
128 offset += btrfs_item_size_nr(eb, slot) - sizeof(subid_le);
129 } else {
130 btrfs_warn(fs_info,
131 "insert uuid item failed %d (0x%016llx, 0x%016llx) type %u!",
132 ret, (unsigned long long)key.objectid,
133 (unsigned long long)key.offset, type);
134 goto out;
137 ret = 0;
138 subid_le = cpu_to_le64(subid_cpu);
139 write_extent_buffer(eb, &subid_le, offset, sizeof(subid_le));
140 btrfs_mark_buffer_dirty(eb);
142 out:
143 btrfs_free_path(path);
144 return ret;
147 int btrfs_uuid_tree_remove(struct btrfs_trans_handle *trans, u8 *uuid, u8 type,
148 u64 subid)
150 struct btrfs_fs_info *fs_info = trans->fs_info;
151 struct btrfs_root *uuid_root = fs_info->uuid_root;
152 int ret;
153 struct btrfs_path *path = NULL;
154 struct btrfs_key key;
155 struct extent_buffer *eb;
156 int slot;
157 unsigned long offset;
158 u32 item_size;
159 unsigned long move_dst;
160 unsigned long move_src;
161 unsigned long move_len;
163 if (WARN_ON_ONCE(!uuid_root)) {
164 ret = -EINVAL;
165 goto out;
168 btrfs_uuid_to_key(uuid, type, &key);
170 path = btrfs_alloc_path();
171 if (!path) {
172 ret = -ENOMEM;
173 goto out;
176 ret = btrfs_search_slot(trans, uuid_root, &key, path, -1, 1);
177 if (ret < 0) {
178 btrfs_warn(fs_info, "error %d while searching for uuid item!",
179 ret);
180 goto out;
182 if (ret > 0) {
183 ret = -ENOENT;
184 goto out;
187 eb = path->nodes[0];
188 slot = path->slots[0];
189 offset = btrfs_item_ptr_offset(eb, slot);
190 item_size = btrfs_item_size_nr(eb, slot);
191 if (!IS_ALIGNED(item_size, sizeof(u64))) {
192 btrfs_warn(fs_info, "uuid item with illegal size %lu!",
193 (unsigned long)item_size);
194 ret = -ENOENT;
195 goto out;
197 while (item_size) {
198 __le64 read_subid;
200 read_extent_buffer(eb, &read_subid, offset, sizeof(read_subid));
201 if (le64_to_cpu(read_subid) == subid)
202 break;
203 offset += sizeof(read_subid);
204 item_size -= sizeof(read_subid);
207 if (!item_size) {
208 ret = -ENOENT;
209 goto out;
212 item_size = btrfs_item_size_nr(eb, slot);
213 if (item_size == sizeof(subid)) {
214 ret = btrfs_del_item(trans, uuid_root, path);
215 goto out;
218 move_dst = offset;
219 move_src = offset + sizeof(subid);
220 move_len = item_size - (move_src - btrfs_item_ptr_offset(eb, slot));
221 memmove_extent_buffer(eb, move_dst, move_src, move_len);
222 btrfs_truncate_item(path, item_size - sizeof(subid), 1);
224 out:
225 btrfs_free_path(path);
226 return ret;
229 static int btrfs_uuid_iter_rem(struct btrfs_root *uuid_root, u8 *uuid, u8 type,
230 u64 subid)
232 struct btrfs_trans_handle *trans;
233 int ret;
235 /* 1 - for the uuid item */
236 trans = btrfs_start_transaction(uuid_root, 1);
237 if (IS_ERR(trans)) {
238 ret = PTR_ERR(trans);
239 goto out;
242 ret = btrfs_uuid_tree_remove(trans, uuid, type, subid);
243 btrfs_end_transaction(trans);
245 out:
246 return ret;
250 * Check if there's an matching subvolume for given UUID
252 * Return:
253 * 0 check succeeded, the entry is not outdated
254 * > 0 if the check failed, the caller should remove the entry
255 * < 0 if an error occurred
257 static int btrfs_check_uuid_tree_entry(struct btrfs_fs_info *fs_info,
258 u8 *uuid, u8 type, u64 subvolid)
260 struct btrfs_key key;
261 int ret = 0;
262 struct btrfs_root *subvol_root;
264 if (type != BTRFS_UUID_KEY_SUBVOL &&
265 type != BTRFS_UUID_KEY_RECEIVED_SUBVOL)
266 goto out;
268 key.objectid = subvolid;
269 key.type = BTRFS_ROOT_ITEM_KEY;
270 key.offset = (u64)-1;
271 subvol_root = btrfs_get_fs_root(fs_info, &key, true);
272 if (IS_ERR(subvol_root)) {
273 ret = PTR_ERR(subvol_root);
274 if (ret == -ENOENT)
275 ret = 1;
276 goto out;
279 switch (type) {
280 case BTRFS_UUID_KEY_SUBVOL:
281 if (memcmp(uuid, subvol_root->root_item.uuid, BTRFS_UUID_SIZE))
282 ret = 1;
283 break;
284 case BTRFS_UUID_KEY_RECEIVED_SUBVOL:
285 if (memcmp(uuid, subvol_root->root_item.received_uuid,
286 BTRFS_UUID_SIZE))
287 ret = 1;
288 break;
290 btrfs_put_root(subvol_root);
291 out:
292 return ret;
295 int btrfs_uuid_tree_iterate(struct btrfs_fs_info *fs_info)
297 struct btrfs_root *root = fs_info->uuid_root;
298 struct btrfs_key key;
299 struct btrfs_path *path;
300 int ret = 0;
301 struct extent_buffer *leaf;
302 int slot;
303 u32 item_size;
304 unsigned long offset;
306 path = btrfs_alloc_path();
307 if (!path) {
308 ret = -ENOMEM;
309 goto out;
312 key.objectid = 0;
313 key.type = 0;
314 key.offset = 0;
316 again_search_slot:
317 ret = btrfs_search_forward(root, &key, path, BTRFS_OLDEST_GENERATION);
318 if (ret) {
319 if (ret > 0)
320 ret = 0;
321 goto out;
324 while (1) {
325 if (btrfs_fs_closing(fs_info)) {
326 ret = -EINTR;
327 goto out;
329 cond_resched();
330 leaf = path->nodes[0];
331 slot = path->slots[0];
332 btrfs_item_key_to_cpu(leaf, &key, slot);
334 if (key.type != BTRFS_UUID_KEY_SUBVOL &&
335 key.type != BTRFS_UUID_KEY_RECEIVED_SUBVOL)
336 goto skip;
338 offset = btrfs_item_ptr_offset(leaf, slot);
339 item_size = btrfs_item_size_nr(leaf, slot);
340 if (!IS_ALIGNED(item_size, sizeof(u64))) {
341 btrfs_warn(fs_info,
342 "uuid item with illegal size %lu!",
343 (unsigned long)item_size);
344 goto skip;
346 while (item_size) {
347 u8 uuid[BTRFS_UUID_SIZE];
348 __le64 subid_le;
349 u64 subid_cpu;
351 put_unaligned_le64(key.objectid, uuid);
352 put_unaligned_le64(key.offset, uuid + sizeof(u64));
353 read_extent_buffer(leaf, &subid_le, offset,
354 sizeof(subid_le));
355 subid_cpu = le64_to_cpu(subid_le);
356 ret = btrfs_check_uuid_tree_entry(fs_info, uuid,
357 key.type, subid_cpu);
358 if (ret < 0)
359 goto out;
360 if (ret > 0) {
361 btrfs_release_path(path);
362 ret = btrfs_uuid_iter_rem(root, uuid, key.type,
363 subid_cpu);
364 if (ret == 0) {
366 * this might look inefficient, but the
367 * justification is that it is an
368 * exception that check_func returns 1,
369 * and that in the regular case only one
370 * entry per UUID exists.
372 goto again_search_slot;
374 if (ret < 0 && ret != -ENOENT)
375 goto out;
376 key.offset++;
377 goto again_search_slot;
379 item_size -= sizeof(subid_le);
380 offset += sizeof(subid_le);
383 skip:
384 ret = btrfs_next_item(root, path);
385 if (ret == 0)
386 continue;
387 else if (ret > 0)
388 ret = 0;
389 break;
392 out:
393 btrfs_free_path(path);
394 return ret;