PRCM: 34XX: Fix wrong shift value used in dpll4_m4x2_ck enable bit
[linux-ginger.git] / fs / xfs / xfs_bmap_btree.c
blob4f0e849d973edda419fb75980b62f5e4f159419a
1 /*
2 * Copyright (c) 2000-2003,2005 Silicon Graphics, Inc.
3 * All Rights Reserved.
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
18 #include "xfs.h"
19 #include "xfs_fs.h"
20 #include "xfs_types.h"
21 #include "xfs_bit.h"
22 #include "xfs_log.h"
23 #include "xfs_inum.h"
24 #include "xfs_trans.h"
25 #include "xfs_sb.h"
26 #include "xfs_ag.h"
27 #include "xfs_dir2.h"
28 #include "xfs_dmapi.h"
29 #include "xfs_mount.h"
30 #include "xfs_bmap_btree.h"
31 #include "xfs_alloc_btree.h"
32 #include "xfs_ialloc_btree.h"
33 #include "xfs_dir2_sf.h"
34 #include "xfs_attr_sf.h"
35 #include "xfs_dinode.h"
36 #include "xfs_inode.h"
37 #include "xfs_inode_item.h"
38 #include "xfs_alloc.h"
39 #include "xfs_btree.h"
40 #include "xfs_ialloc.h"
41 #include "xfs_itable.h"
42 #include "xfs_bmap.h"
43 #include "xfs_error.h"
44 #include "xfs_quota.h"
46 #if defined(XFS_BMBT_TRACE)
47 ktrace_t *xfs_bmbt_trace_buf;
48 #endif
51 * Prototypes for internal btree functions.
55 STATIC int xfs_bmbt_killroot(xfs_btree_cur_t *);
56 STATIC void xfs_bmbt_log_keys(xfs_btree_cur_t *, xfs_buf_t *, int, int);
57 STATIC void xfs_bmbt_log_ptrs(xfs_btree_cur_t *, xfs_buf_t *, int, int);
58 STATIC int xfs_bmbt_lshift(xfs_btree_cur_t *, int, int *);
59 STATIC int xfs_bmbt_rshift(xfs_btree_cur_t *, int, int *);
60 STATIC int xfs_bmbt_split(xfs_btree_cur_t *, int, xfs_fsblock_t *,
61 __uint64_t *, xfs_btree_cur_t **, int *);
62 STATIC int xfs_bmbt_updkey(xfs_btree_cur_t *, xfs_bmbt_key_t *, int);
65 #if defined(XFS_BMBT_TRACE)
67 static char ARGS[] = "args";
68 static char ENTRY[] = "entry";
69 static char ERROR[] = "error";
70 #undef EXIT
71 static char EXIT[] = "exit";
74 * Add a trace buffer entry for the arguments given to the routine,
75 * generic form.
77 STATIC void
78 xfs_bmbt_trace_enter(
79 const char *func,
80 xfs_btree_cur_t *cur,
81 char *s,
82 int type,
83 int line,
84 __psunsigned_t a0,
85 __psunsigned_t a1,
86 __psunsigned_t a2,
87 __psunsigned_t a3,
88 __psunsigned_t a4,
89 __psunsigned_t a5,
90 __psunsigned_t a6,
91 __psunsigned_t a7,
92 __psunsigned_t a8,
93 __psunsigned_t a9,
94 __psunsigned_t a10)
96 xfs_inode_t *ip;
97 int whichfork;
99 ip = cur->bc_private.b.ip;
100 whichfork = cur->bc_private.b.whichfork;
101 ktrace_enter(xfs_bmbt_trace_buf,
102 (void *)((__psint_t)type | (whichfork << 8) | (line << 16)),
103 (void *)func, (void *)s, (void *)ip, (void *)cur,
104 (void *)a0, (void *)a1, (void *)a2, (void *)a3,
105 (void *)a4, (void *)a5, (void *)a6, (void *)a7,
106 (void *)a8, (void *)a9, (void *)a10);
107 ASSERT(ip->i_btrace);
108 ktrace_enter(ip->i_btrace,
109 (void *)((__psint_t)type | (whichfork << 8) | (line << 16)),
110 (void *)func, (void *)s, (void *)ip, (void *)cur,
111 (void *)a0, (void *)a1, (void *)a2, (void *)a3,
112 (void *)a4, (void *)a5, (void *)a6, (void *)a7,
113 (void *)a8, (void *)a9, (void *)a10);
116 * Add a trace buffer entry for arguments, for a buffer & 1 integer arg.
118 STATIC void
119 xfs_bmbt_trace_argbi(
120 const char *func,
121 xfs_btree_cur_t *cur,
122 xfs_buf_t *b,
123 int i,
124 int line)
126 xfs_bmbt_trace_enter(func, cur, ARGS, XFS_BMBT_KTRACE_ARGBI, line,
127 (__psunsigned_t)b, i, 0, 0,
128 0, 0, 0, 0,
129 0, 0, 0);
133 * Add a trace buffer entry for arguments, for a buffer & 2 integer args.
135 STATIC void
136 xfs_bmbt_trace_argbii(
137 const char *func,
138 xfs_btree_cur_t *cur,
139 xfs_buf_t *b,
140 int i0,
141 int i1,
142 int line)
144 xfs_bmbt_trace_enter(func, cur, ARGS, XFS_BMBT_KTRACE_ARGBII, line,
145 (__psunsigned_t)b, i0, i1, 0,
146 0, 0, 0, 0,
147 0, 0, 0);
151 * Add a trace buffer entry for arguments, for 3 block-length args
152 * and an integer arg.
154 STATIC void
155 xfs_bmbt_trace_argfffi(
156 const char *func,
157 xfs_btree_cur_t *cur,
158 xfs_dfiloff_t o,
159 xfs_dfsbno_t b,
160 xfs_dfilblks_t i,
161 int j,
162 int line)
164 xfs_bmbt_trace_enter(func, cur, ARGS, XFS_BMBT_KTRACE_ARGFFFI, line,
165 o >> 32, (int)o, b >> 32, (int)b,
166 i >> 32, (int)i, (int)j, 0,
167 0, 0, 0);
171 * Add a trace buffer entry for arguments, for one integer arg.
173 STATIC void
174 xfs_bmbt_trace_argi(
175 const char *func,
176 xfs_btree_cur_t *cur,
177 int i,
178 int line)
180 xfs_bmbt_trace_enter(func, cur, ARGS, XFS_BMBT_KTRACE_ARGI, line,
181 i, 0, 0, 0,
182 0, 0, 0, 0,
183 0, 0, 0);
187 * Add a trace buffer entry for arguments, for int, fsblock, key.
189 STATIC void
190 xfs_bmbt_trace_argifk(
191 const char *func,
192 xfs_btree_cur_t *cur,
193 int i,
194 xfs_fsblock_t f,
195 xfs_dfiloff_t o,
196 int line)
198 xfs_bmbt_trace_enter(func, cur, ARGS, XFS_BMBT_KTRACE_ARGIFK, line,
199 i, (xfs_dfsbno_t)f >> 32, (int)f, o >> 32,
200 (int)o, 0, 0, 0,
201 0, 0, 0);
205 * Add a trace buffer entry for arguments, for int, fsblock, rec.
207 STATIC void
208 xfs_bmbt_trace_argifr(
209 const char *func,
210 xfs_btree_cur_t *cur,
211 int i,
212 xfs_fsblock_t f,
213 xfs_bmbt_rec_t *r,
214 int line)
216 xfs_dfsbno_t b;
217 xfs_dfilblks_t c;
218 xfs_dfsbno_t d;
219 xfs_dfiloff_t o;
220 xfs_bmbt_irec_t s;
222 d = (xfs_dfsbno_t)f;
223 xfs_bmbt_disk_get_all(r, &s);
224 o = (xfs_dfiloff_t)s.br_startoff;
225 b = (xfs_dfsbno_t)s.br_startblock;
226 c = s.br_blockcount;
227 xfs_bmbt_trace_enter(func, cur, ARGS, XFS_BMBT_KTRACE_ARGIFR, line,
228 i, d >> 32, (int)d, o >> 32,
229 (int)o, b >> 32, (int)b, c >> 32,
230 (int)c, 0, 0);
234 * Add a trace buffer entry for arguments, for int, key.
236 STATIC void
237 xfs_bmbt_trace_argik(
238 const char *func,
239 xfs_btree_cur_t *cur,
240 int i,
241 xfs_bmbt_key_t *k,
242 int line)
244 xfs_dfiloff_t o;
246 o = be64_to_cpu(k->br_startoff);
247 xfs_bmbt_trace_enter(func, cur, ARGS, XFS_BMBT_KTRACE_ARGIFK, line,
248 i, o >> 32, (int)o, 0,
249 0, 0, 0, 0,
250 0, 0, 0);
254 * Add a trace buffer entry for the cursor/operation.
256 STATIC void
257 xfs_bmbt_trace_cursor(
258 const char *func,
259 xfs_btree_cur_t *cur,
260 char *s,
261 int line)
263 xfs_bmbt_rec_host_t r;
265 xfs_bmbt_set_all(&r, &cur->bc_rec.b);
266 xfs_bmbt_trace_enter(func, cur, s, XFS_BMBT_KTRACE_CUR, line,
267 (cur->bc_nlevels << 24) | (cur->bc_private.b.flags << 16) |
268 cur->bc_private.b.allocated,
269 r.l0 >> 32, (int)r.l0,
270 r.l1 >> 32, (int)r.l1,
271 (unsigned long)cur->bc_bufs[0], (unsigned long)cur->bc_bufs[1],
272 (unsigned long)cur->bc_bufs[2], (unsigned long)cur->bc_bufs[3],
273 (cur->bc_ptrs[0] << 16) | cur->bc_ptrs[1],
274 (cur->bc_ptrs[2] << 16) | cur->bc_ptrs[3]);
277 #define XFS_BMBT_TRACE_ARGBI(c,b,i) \
278 xfs_bmbt_trace_argbi(__func__, c, b, i, __LINE__)
279 #define XFS_BMBT_TRACE_ARGBII(c,b,i,j) \
280 xfs_bmbt_trace_argbii(__func__, c, b, i, j, __LINE__)
281 #define XFS_BMBT_TRACE_ARGFFFI(c,o,b,i,j) \
282 xfs_bmbt_trace_argfffi(__func__, c, o, b, i, j, __LINE__)
283 #define XFS_BMBT_TRACE_ARGI(c,i) \
284 xfs_bmbt_trace_argi(__func__, c, i, __LINE__)
285 #define XFS_BMBT_TRACE_ARGIFK(c,i,f,s) \
286 xfs_bmbt_trace_argifk(__func__, c, i, f, s, __LINE__)
287 #define XFS_BMBT_TRACE_ARGIFR(c,i,f,r) \
288 xfs_bmbt_trace_argifr(__func__, c, i, f, r, __LINE__)
289 #define XFS_BMBT_TRACE_ARGIK(c,i,k) \
290 xfs_bmbt_trace_argik(__func__, c, i, k, __LINE__)
291 #define XFS_BMBT_TRACE_CURSOR(c,s) \
292 xfs_bmbt_trace_cursor(__func__, c, s, __LINE__)
293 #else
294 #define XFS_BMBT_TRACE_ARGBI(c,b,i)
295 #define XFS_BMBT_TRACE_ARGBII(c,b,i,j)
296 #define XFS_BMBT_TRACE_ARGFFFI(c,o,b,i,j)
297 #define XFS_BMBT_TRACE_ARGI(c,i)
298 #define XFS_BMBT_TRACE_ARGIFK(c,i,f,s)
299 #define XFS_BMBT_TRACE_ARGIFR(c,i,f,r)
300 #define XFS_BMBT_TRACE_ARGIK(c,i,k)
301 #define XFS_BMBT_TRACE_CURSOR(c,s)
302 #endif /* XFS_BMBT_TRACE */
306 * Internal functions.
310 * Delete record pointed to by cur/level.
312 STATIC int /* error */
313 xfs_bmbt_delrec(
314 xfs_btree_cur_t *cur,
315 int level,
316 int *stat) /* success/failure */
318 xfs_bmbt_block_t *block; /* bmap btree block */
319 xfs_fsblock_t bno; /* fs-relative block number */
320 xfs_buf_t *bp; /* buffer for block */
321 int error; /* error return value */
322 int i; /* loop counter */
323 int j; /* temp state */
324 xfs_bmbt_key_t key; /* bmap btree key */
325 xfs_bmbt_key_t *kp=NULL; /* pointer to bmap btree key */
326 xfs_fsblock_t lbno; /* left sibling block number */
327 xfs_buf_t *lbp; /* left buffer pointer */
328 xfs_bmbt_block_t *left; /* left btree block */
329 xfs_bmbt_key_t *lkp; /* left btree key */
330 xfs_bmbt_ptr_t *lpp; /* left address pointer */
331 int lrecs=0; /* left record count */
332 xfs_bmbt_rec_t *lrp; /* left record pointer */
333 xfs_mount_t *mp; /* file system mount point */
334 xfs_bmbt_ptr_t *pp; /* pointer to bmap block addr */
335 int ptr; /* key/record index */
336 xfs_fsblock_t rbno; /* right sibling block number */
337 xfs_buf_t *rbp; /* right buffer pointer */
338 xfs_bmbt_block_t *right; /* right btree block */
339 xfs_bmbt_key_t *rkp; /* right btree key */
340 xfs_bmbt_rec_t *rp; /* pointer to bmap btree rec */
341 xfs_bmbt_ptr_t *rpp; /* right address pointer */
342 xfs_bmbt_block_t *rrblock; /* right-right btree block */
343 xfs_buf_t *rrbp; /* right-right buffer pointer */
344 int rrecs=0; /* right record count */
345 xfs_bmbt_rec_t *rrp; /* right record pointer */
346 xfs_btree_cur_t *tcur; /* temporary btree cursor */
347 int numrecs; /* temporary numrec count */
348 int numlrecs, numrrecs;
350 XFS_BMBT_TRACE_CURSOR(cur, ENTRY);
351 XFS_BMBT_TRACE_ARGI(cur, level);
352 ptr = cur->bc_ptrs[level];
353 tcur = NULL;
354 if (ptr == 0) {
355 XFS_BMBT_TRACE_CURSOR(cur, EXIT);
356 *stat = 0;
357 return 0;
359 block = xfs_bmbt_get_block(cur, level, &bp);
360 numrecs = be16_to_cpu(block->bb_numrecs);
361 #ifdef DEBUG
362 if ((error = xfs_btree_check_lblock(cur, block, level, bp))) {
363 XFS_BMBT_TRACE_CURSOR(cur, ERROR);
364 goto error0;
366 #endif
367 if (ptr > numrecs) {
368 XFS_BMBT_TRACE_CURSOR(cur, EXIT);
369 *stat = 0;
370 return 0;
372 XFS_STATS_INC(xs_bmbt_delrec);
373 if (level > 0) {
374 kp = XFS_BMAP_KEY_IADDR(block, 1, cur);
375 pp = XFS_BMAP_PTR_IADDR(block, 1, cur);
376 #ifdef DEBUG
377 for (i = ptr; i < numrecs; i++) {
378 if ((error = xfs_btree_check_lptr_disk(cur, pp[i], level))) {
379 XFS_BMBT_TRACE_CURSOR(cur, ERROR);
380 goto error0;
383 #endif
384 if (ptr < numrecs) {
385 memmove(&kp[ptr - 1], &kp[ptr],
386 (numrecs - ptr) * sizeof(*kp));
387 memmove(&pp[ptr - 1], &pp[ptr],
388 (numrecs - ptr) * sizeof(*pp));
389 xfs_bmbt_log_ptrs(cur, bp, ptr, numrecs - 1);
390 xfs_bmbt_log_keys(cur, bp, ptr, numrecs - 1);
392 } else {
393 rp = XFS_BMAP_REC_IADDR(block, 1, cur);
394 if (ptr < numrecs) {
395 memmove(&rp[ptr - 1], &rp[ptr],
396 (numrecs - ptr) * sizeof(*rp));
397 xfs_bmbt_log_recs(cur, bp, ptr, numrecs - 1);
399 if (ptr == 1) {
400 key.br_startoff =
401 cpu_to_be64(xfs_bmbt_disk_get_startoff(rp));
402 kp = &key;
405 numrecs--;
406 block->bb_numrecs = cpu_to_be16(numrecs);
407 xfs_bmbt_log_block(cur, bp, XFS_BB_NUMRECS);
409 * We're at the root level.
410 * First, shrink the root block in-memory.
411 * Try to get rid of the next level down.
412 * If we can't then there's nothing left to do.
414 if (level == cur->bc_nlevels - 1) {
415 xfs_iroot_realloc(cur->bc_private.b.ip, -1,
416 cur->bc_private.b.whichfork);
417 if ((error = xfs_bmbt_killroot(cur))) {
418 XFS_BMBT_TRACE_CURSOR(cur, ERROR);
419 goto error0;
421 if (level > 0 && (error = xfs_bmbt_decrement(cur, level, &j))) {
422 XFS_BMBT_TRACE_CURSOR(cur, ERROR);
423 goto error0;
425 XFS_BMBT_TRACE_CURSOR(cur, EXIT);
426 *stat = 1;
427 return 0;
429 if (ptr == 1 && (error = xfs_bmbt_updkey(cur, kp, level + 1))) {
430 XFS_BMBT_TRACE_CURSOR(cur, ERROR);
431 goto error0;
433 if (numrecs >= XFS_BMAP_BLOCK_IMINRECS(level, cur)) {
434 if (level > 0 && (error = xfs_bmbt_decrement(cur, level, &j))) {
435 XFS_BMBT_TRACE_CURSOR(cur, ERROR);
436 goto error0;
438 XFS_BMBT_TRACE_CURSOR(cur, EXIT);
439 *stat = 1;
440 return 0;
442 rbno = be64_to_cpu(block->bb_rightsib);
443 lbno = be64_to_cpu(block->bb_leftsib);
445 * One child of root, need to get a chance to copy its contents
446 * into the root and delete it. Can't go up to next level,
447 * there's nothing to delete there.
449 if (lbno == NULLFSBLOCK && rbno == NULLFSBLOCK &&
450 level == cur->bc_nlevels - 2) {
451 if ((error = xfs_bmbt_killroot(cur))) {
452 XFS_BMBT_TRACE_CURSOR(cur, ERROR);
453 goto error0;
455 if (level > 0 && (error = xfs_bmbt_decrement(cur, level, &i))) {
456 XFS_BMBT_TRACE_CURSOR(cur, ERROR);
457 goto error0;
459 XFS_BMBT_TRACE_CURSOR(cur, EXIT);
460 *stat = 1;
461 return 0;
463 ASSERT(rbno != NULLFSBLOCK || lbno != NULLFSBLOCK);
464 if ((error = xfs_btree_dup_cursor(cur, &tcur))) {
465 XFS_BMBT_TRACE_CURSOR(cur, ERROR);
466 goto error0;
468 bno = NULLFSBLOCK;
469 if (rbno != NULLFSBLOCK) {
470 i = xfs_btree_lastrec(tcur, level);
471 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
472 if ((error = xfs_bmbt_increment(tcur, level, &i))) {
473 XFS_BMBT_TRACE_CURSOR(cur, ERROR);
474 goto error0;
476 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
477 i = xfs_btree_lastrec(tcur, level);
478 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
479 rbp = tcur->bc_bufs[level];
480 right = XFS_BUF_TO_BMBT_BLOCK(rbp);
481 #ifdef DEBUG
482 if ((error = xfs_btree_check_lblock(cur, right, level, rbp))) {
483 XFS_BMBT_TRACE_CURSOR(cur, ERROR);
484 goto error0;
486 #endif
487 bno = be64_to_cpu(right->bb_leftsib);
488 if (be16_to_cpu(right->bb_numrecs) - 1 >=
489 XFS_BMAP_BLOCK_IMINRECS(level, cur)) {
490 if ((error = xfs_bmbt_lshift(tcur, level, &i))) {
491 XFS_BMBT_TRACE_CURSOR(cur, ERROR);
492 goto error0;
494 if (i) {
495 ASSERT(be16_to_cpu(block->bb_numrecs) >=
496 XFS_BMAP_BLOCK_IMINRECS(level, tcur));
497 xfs_btree_del_cursor(tcur, XFS_BTREE_NOERROR);
498 tcur = NULL;
499 if (level > 0) {
500 if ((error = xfs_bmbt_decrement(cur,
501 level, &i))) {
502 XFS_BMBT_TRACE_CURSOR(cur,
503 ERROR);
504 goto error0;
507 XFS_BMBT_TRACE_CURSOR(cur, EXIT);
508 *stat = 1;
509 return 0;
512 rrecs = be16_to_cpu(right->bb_numrecs);
513 if (lbno != NULLFSBLOCK) {
514 i = xfs_btree_firstrec(tcur, level);
515 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
516 if ((error = xfs_bmbt_decrement(tcur, level, &i))) {
517 XFS_BMBT_TRACE_CURSOR(cur, ERROR);
518 goto error0;
520 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
523 if (lbno != NULLFSBLOCK) {
524 i = xfs_btree_firstrec(tcur, level);
525 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
527 * decrement to last in block
529 if ((error = xfs_bmbt_decrement(tcur, level, &i))) {
530 XFS_BMBT_TRACE_CURSOR(cur, ERROR);
531 goto error0;
533 i = xfs_btree_firstrec(tcur, level);
534 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
535 lbp = tcur->bc_bufs[level];
536 left = XFS_BUF_TO_BMBT_BLOCK(lbp);
537 #ifdef DEBUG
538 if ((error = xfs_btree_check_lblock(cur, left, level, lbp))) {
539 XFS_BMBT_TRACE_CURSOR(cur, ERROR);
540 goto error0;
542 #endif
543 bno = be64_to_cpu(left->bb_rightsib);
544 if (be16_to_cpu(left->bb_numrecs) - 1 >=
545 XFS_BMAP_BLOCK_IMINRECS(level, cur)) {
546 if ((error = xfs_bmbt_rshift(tcur, level, &i))) {
547 XFS_BMBT_TRACE_CURSOR(cur, ERROR);
548 goto error0;
550 if (i) {
551 ASSERT(be16_to_cpu(block->bb_numrecs) >=
552 XFS_BMAP_BLOCK_IMINRECS(level, tcur));
553 xfs_btree_del_cursor(tcur, XFS_BTREE_NOERROR);
554 tcur = NULL;
555 if (level == 0)
556 cur->bc_ptrs[0]++;
557 XFS_BMBT_TRACE_CURSOR(cur, EXIT);
558 *stat = 1;
559 return 0;
562 lrecs = be16_to_cpu(left->bb_numrecs);
564 xfs_btree_del_cursor(tcur, XFS_BTREE_NOERROR);
565 tcur = NULL;
566 mp = cur->bc_mp;
567 ASSERT(bno != NULLFSBLOCK);
568 if (lbno != NULLFSBLOCK &&
569 lrecs + be16_to_cpu(block->bb_numrecs) <= XFS_BMAP_BLOCK_IMAXRECS(level, cur)) {
570 rbno = bno;
571 right = block;
572 rbp = bp;
573 if ((error = xfs_btree_read_bufl(mp, cur->bc_tp, lbno, 0, &lbp,
574 XFS_BMAP_BTREE_REF))) {
575 XFS_BMBT_TRACE_CURSOR(cur, ERROR);
576 goto error0;
578 left = XFS_BUF_TO_BMBT_BLOCK(lbp);
579 if ((error = xfs_btree_check_lblock(cur, left, level, lbp))) {
580 XFS_BMBT_TRACE_CURSOR(cur, ERROR);
581 goto error0;
583 } else if (rbno != NULLFSBLOCK &&
584 rrecs + be16_to_cpu(block->bb_numrecs) <=
585 XFS_BMAP_BLOCK_IMAXRECS(level, cur)) {
586 lbno = bno;
587 left = block;
588 lbp = bp;
589 if ((error = xfs_btree_read_bufl(mp, cur->bc_tp, rbno, 0, &rbp,
590 XFS_BMAP_BTREE_REF))) {
591 XFS_BMBT_TRACE_CURSOR(cur, ERROR);
592 goto error0;
594 right = XFS_BUF_TO_BMBT_BLOCK(rbp);
595 if ((error = xfs_btree_check_lblock(cur, right, level, rbp))) {
596 XFS_BMBT_TRACE_CURSOR(cur, ERROR);
597 goto error0;
599 lrecs = be16_to_cpu(left->bb_numrecs);
600 } else {
601 if (level > 0 && (error = xfs_bmbt_decrement(cur, level, &i))) {
602 XFS_BMBT_TRACE_CURSOR(cur, ERROR);
603 goto error0;
605 XFS_BMBT_TRACE_CURSOR(cur, EXIT);
606 *stat = 1;
607 return 0;
609 numlrecs = be16_to_cpu(left->bb_numrecs);
610 numrrecs = be16_to_cpu(right->bb_numrecs);
611 if (level > 0) {
612 lkp = XFS_BMAP_KEY_IADDR(left, numlrecs + 1, cur);
613 lpp = XFS_BMAP_PTR_IADDR(left, numlrecs + 1, cur);
614 rkp = XFS_BMAP_KEY_IADDR(right, 1, cur);
615 rpp = XFS_BMAP_PTR_IADDR(right, 1, cur);
616 #ifdef DEBUG
617 for (i = 0; i < numrrecs; i++) {
618 if ((error = xfs_btree_check_lptr_disk(cur, rpp[i], level))) {
619 XFS_BMBT_TRACE_CURSOR(cur, ERROR);
620 goto error0;
623 #endif
624 memcpy(lkp, rkp, numrrecs * sizeof(*lkp));
625 memcpy(lpp, rpp, numrrecs * sizeof(*lpp));
626 xfs_bmbt_log_keys(cur, lbp, numlrecs + 1, numlrecs + numrrecs);
627 xfs_bmbt_log_ptrs(cur, lbp, numlrecs + 1, numlrecs + numrrecs);
628 } else {
629 lrp = XFS_BMAP_REC_IADDR(left, numlrecs + 1, cur);
630 rrp = XFS_BMAP_REC_IADDR(right, 1, cur);
631 memcpy(lrp, rrp, numrrecs * sizeof(*lrp));
632 xfs_bmbt_log_recs(cur, lbp, numlrecs + 1, numlrecs + numrrecs);
634 be16_add_cpu(&left->bb_numrecs, numrrecs);
635 left->bb_rightsib = right->bb_rightsib;
636 xfs_bmbt_log_block(cur, lbp, XFS_BB_RIGHTSIB | XFS_BB_NUMRECS);
637 if (be64_to_cpu(left->bb_rightsib) != NULLDFSBNO) {
638 if ((error = xfs_btree_read_bufl(mp, cur->bc_tp,
639 be64_to_cpu(left->bb_rightsib),
640 0, &rrbp, XFS_BMAP_BTREE_REF))) {
641 XFS_BMBT_TRACE_CURSOR(cur, ERROR);
642 goto error0;
644 rrblock = XFS_BUF_TO_BMBT_BLOCK(rrbp);
645 if ((error = xfs_btree_check_lblock(cur, rrblock, level, rrbp))) {
646 XFS_BMBT_TRACE_CURSOR(cur, ERROR);
647 goto error0;
649 rrblock->bb_leftsib = cpu_to_be64(lbno);
650 xfs_bmbt_log_block(cur, rrbp, XFS_BB_LEFTSIB);
652 xfs_bmap_add_free(XFS_DADDR_TO_FSB(mp, XFS_BUF_ADDR(rbp)), 1,
653 cur->bc_private.b.flist, mp);
654 cur->bc_private.b.ip->i_d.di_nblocks--;
655 xfs_trans_log_inode(cur->bc_tp, cur->bc_private.b.ip, XFS_ILOG_CORE);
656 XFS_TRANS_MOD_DQUOT_BYINO(mp, cur->bc_tp, cur->bc_private.b.ip,
657 XFS_TRANS_DQ_BCOUNT, -1L);
658 xfs_trans_binval(cur->bc_tp, rbp);
659 if (bp != lbp) {
660 cur->bc_bufs[level] = lbp;
661 cur->bc_ptrs[level] += lrecs;
662 cur->bc_ra[level] = 0;
663 } else if ((error = xfs_bmbt_increment(cur, level + 1, &i))) {
664 XFS_BMBT_TRACE_CURSOR(cur, ERROR);
665 goto error0;
667 if (level > 0)
668 cur->bc_ptrs[level]--;
669 XFS_BMBT_TRACE_CURSOR(cur, EXIT);
670 *stat = 2;
671 return 0;
673 error0:
674 if (tcur)
675 xfs_btree_del_cursor(tcur, XFS_BTREE_ERROR);
676 return error;
680 * Insert one record/level. Return information to the caller
681 * allowing the next level up to proceed if necessary.
683 STATIC int /* error */
684 xfs_bmbt_insrec(
685 xfs_btree_cur_t *cur,
686 int level,
687 xfs_fsblock_t *bnop,
688 xfs_bmbt_rec_t *recp,
689 xfs_btree_cur_t **curp,
690 int *stat) /* no-go/done/continue */
692 xfs_bmbt_block_t *block; /* bmap btree block */
693 xfs_buf_t *bp; /* buffer for block */
694 int error; /* error return value */
695 int i; /* loop index */
696 xfs_bmbt_key_t key; /* bmap btree key */
697 xfs_bmbt_key_t *kp=NULL; /* pointer to bmap btree key */
698 int logflags; /* inode logging flags */
699 xfs_fsblock_t nbno; /* new block number */
700 struct xfs_btree_cur *ncur; /* new btree cursor */
701 __uint64_t startoff; /* new btree key value */
702 xfs_bmbt_rec_t nrec; /* new record count */
703 int optr; /* old key/record index */
704 xfs_bmbt_ptr_t *pp; /* pointer to bmap block addr */
705 int ptr; /* key/record index */
706 xfs_bmbt_rec_t *rp=NULL; /* pointer to bmap btree rec */
707 int numrecs;
709 ASSERT(level < cur->bc_nlevels);
710 XFS_BMBT_TRACE_CURSOR(cur, ENTRY);
711 XFS_BMBT_TRACE_ARGIFR(cur, level, *bnop, recp);
712 ncur = NULL;
713 key.br_startoff = cpu_to_be64(xfs_bmbt_disk_get_startoff(recp));
714 optr = ptr = cur->bc_ptrs[level];
715 if (ptr == 0) {
716 XFS_BMBT_TRACE_CURSOR(cur, EXIT);
717 *stat = 0;
718 return 0;
720 XFS_STATS_INC(xs_bmbt_insrec);
721 block = xfs_bmbt_get_block(cur, level, &bp);
722 numrecs = be16_to_cpu(block->bb_numrecs);
723 #ifdef DEBUG
724 if ((error = xfs_btree_check_lblock(cur, block, level, bp))) {
725 XFS_BMBT_TRACE_CURSOR(cur, ERROR);
726 return error;
728 if (ptr <= numrecs) {
729 if (level == 0) {
730 rp = XFS_BMAP_REC_IADDR(block, ptr, cur);
731 xfs_btree_check_rec(XFS_BTNUM_BMAP, recp, rp);
732 } else {
733 kp = XFS_BMAP_KEY_IADDR(block, ptr, cur);
734 xfs_btree_check_key(XFS_BTNUM_BMAP, &key, kp);
737 #endif
738 nbno = NULLFSBLOCK;
739 if (numrecs == XFS_BMAP_BLOCK_IMAXRECS(level, cur)) {
740 if (numrecs < XFS_BMAP_BLOCK_DMAXRECS(level, cur)) {
742 * A root block, that can be made bigger.
744 xfs_iroot_realloc(cur->bc_private.b.ip, 1,
745 cur->bc_private.b.whichfork);
746 block = xfs_bmbt_get_block(cur, level, &bp);
747 } else if (level == cur->bc_nlevels - 1) {
748 if ((error = xfs_bmbt_newroot(cur, &logflags, stat)) ||
749 *stat == 0) {
750 XFS_BMBT_TRACE_CURSOR(cur, ERROR);
751 return error;
753 xfs_trans_log_inode(cur->bc_tp, cur->bc_private.b.ip,
754 logflags);
755 block = xfs_bmbt_get_block(cur, level, &bp);
756 } else {
757 if ((error = xfs_bmbt_rshift(cur, level, &i))) {
758 XFS_BMBT_TRACE_CURSOR(cur, ERROR);
759 return error;
761 if (i) {
762 /* nothing */
763 } else {
764 if ((error = xfs_bmbt_lshift(cur, level, &i))) {
765 XFS_BMBT_TRACE_CURSOR(cur, ERROR);
766 return error;
768 if (i) {
769 optr = ptr = cur->bc_ptrs[level];
770 } else {
771 if ((error = xfs_bmbt_split(cur, level,
772 &nbno, &startoff, &ncur,
773 &i))) {
774 XFS_BMBT_TRACE_CURSOR(cur,
775 ERROR);
776 return error;
778 if (i) {
779 block = xfs_bmbt_get_block(
780 cur, level, &bp);
781 #ifdef DEBUG
782 if ((error =
783 xfs_btree_check_lblock(cur,
784 block, level, bp))) {
785 XFS_BMBT_TRACE_CURSOR(
786 cur, ERROR);
787 return error;
789 #endif
790 ptr = cur->bc_ptrs[level];
791 xfs_bmbt_disk_set_allf(&nrec,
792 startoff, 0, 0,
793 XFS_EXT_NORM);
794 } else {
795 XFS_BMBT_TRACE_CURSOR(cur,
796 EXIT);
797 *stat = 0;
798 return 0;
804 numrecs = be16_to_cpu(block->bb_numrecs);
805 if (level > 0) {
806 kp = XFS_BMAP_KEY_IADDR(block, 1, cur);
807 pp = XFS_BMAP_PTR_IADDR(block, 1, cur);
808 #ifdef DEBUG
809 for (i = numrecs; i >= ptr; i--) {
810 if ((error = xfs_btree_check_lptr_disk(cur, pp[i - 1],
811 level))) {
812 XFS_BMBT_TRACE_CURSOR(cur, ERROR);
813 return error;
816 #endif
817 memmove(&kp[ptr], &kp[ptr - 1],
818 (numrecs - ptr + 1) * sizeof(*kp));
819 memmove(&pp[ptr], &pp[ptr - 1],
820 (numrecs - ptr + 1) * sizeof(*pp));
821 #ifdef DEBUG
822 if ((error = xfs_btree_check_lptr(cur, *bnop, level))) {
823 XFS_BMBT_TRACE_CURSOR(cur, ERROR);
824 return error;
826 #endif
827 kp[ptr - 1] = key;
828 pp[ptr - 1] = cpu_to_be64(*bnop);
829 numrecs++;
830 block->bb_numrecs = cpu_to_be16(numrecs);
831 xfs_bmbt_log_keys(cur, bp, ptr, numrecs);
832 xfs_bmbt_log_ptrs(cur, bp, ptr, numrecs);
833 } else {
834 rp = XFS_BMAP_REC_IADDR(block, 1, cur);
835 memmove(&rp[ptr], &rp[ptr - 1],
836 (numrecs - ptr + 1) * sizeof(*rp));
837 rp[ptr - 1] = *recp;
838 numrecs++;
839 block->bb_numrecs = cpu_to_be16(numrecs);
840 xfs_bmbt_log_recs(cur, bp, ptr, numrecs);
842 xfs_bmbt_log_block(cur, bp, XFS_BB_NUMRECS);
843 #ifdef DEBUG
844 if (ptr < numrecs) {
845 if (level == 0)
846 xfs_btree_check_rec(XFS_BTNUM_BMAP, rp + ptr - 1,
847 rp + ptr);
848 else
849 xfs_btree_check_key(XFS_BTNUM_BMAP, kp + ptr - 1,
850 kp + ptr);
852 #endif
853 if (optr == 1 && (error = xfs_bmbt_updkey(cur, &key, level + 1))) {
854 XFS_BMBT_TRACE_CURSOR(cur, ERROR);
855 return error;
857 *bnop = nbno;
858 if (nbno != NULLFSBLOCK) {
859 *recp = nrec;
860 *curp = ncur;
862 XFS_BMBT_TRACE_CURSOR(cur, EXIT);
863 *stat = 1;
864 return 0;
867 STATIC int
868 xfs_bmbt_killroot(
869 xfs_btree_cur_t *cur)
871 xfs_bmbt_block_t *block;
872 xfs_bmbt_block_t *cblock;
873 xfs_buf_t *cbp;
874 xfs_bmbt_key_t *ckp;
875 xfs_bmbt_ptr_t *cpp;
876 #ifdef DEBUG
877 int error;
878 #endif
879 int i;
880 xfs_bmbt_key_t *kp;
881 xfs_inode_t *ip;
882 xfs_ifork_t *ifp;
883 int level;
884 xfs_bmbt_ptr_t *pp;
886 XFS_BMBT_TRACE_CURSOR(cur, ENTRY);
887 level = cur->bc_nlevels - 1;
888 ASSERT(level >= 1);
890 * Don't deal with the root block needs to be a leaf case.
891 * We're just going to turn the thing back into extents anyway.
893 if (level == 1) {
894 XFS_BMBT_TRACE_CURSOR(cur, EXIT);
895 return 0;
897 block = xfs_bmbt_get_block(cur, level, &cbp);
899 * Give up if the root has multiple children.
901 if (be16_to_cpu(block->bb_numrecs) != 1) {
902 XFS_BMBT_TRACE_CURSOR(cur, EXIT);
903 return 0;
906 * Only do this if the next level will fit.
907 * Then the data must be copied up to the inode,
908 * instead of freeing the root you free the next level.
910 cbp = cur->bc_bufs[level - 1];
911 cblock = XFS_BUF_TO_BMBT_BLOCK(cbp);
912 if (be16_to_cpu(cblock->bb_numrecs) > XFS_BMAP_BLOCK_DMAXRECS(level, cur)) {
913 XFS_BMBT_TRACE_CURSOR(cur, EXIT);
914 return 0;
916 ASSERT(be64_to_cpu(cblock->bb_leftsib) == NULLDFSBNO);
917 ASSERT(be64_to_cpu(cblock->bb_rightsib) == NULLDFSBNO);
918 ip = cur->bc_private.b.ip;
919 ifp = XFS_IFORK_PTR(ip, cur->bc_private.b.whichfork);
920 ASSERT(XFS_BMAP_BLOCK_IMAXRECS(level, cur) ==
921 XFS_BMAP_BROOT_MAXRECS(ifp->if_broot_bytes));
922 i = (int)(be16_to_cpu(cblock->bb_numrecs) - XFS_BMAP_BLOCK_IMAXRECS(level, cur));
923 if (i) {
924 xfs_iroot_realloc(ip, i, cur->bc_private.b.whichfork);
925 block = ifp->if_broot;
927 be16_add_cpu(&block->bb_numrecs, i);
928 ASSERT(block->bb_numrecs == cblock->bb_numrecs);
929 kp = XFS_BMAP_KEY_IADDR(block, 1, cur);
930 ckp = XFS_BMAP_KEY_IADDR(cblock, 1, cur);
931 memcpy(kp, ckp, be16_to_cpu(block->bb_numrecs) * sizeof(*kp));
932 pp = XFS_BMAP_PTR_IADDR(block, 1, cur);
933 cpp = XFS_BMAP_PTR_IADDR(cblock, 1, cur);
934 #ifdef DEBUG
935 for (i = 0; i < be16_to_cpu(cblock->bb_numrecs); i++) {
936 if ((error = xfs_btree_check_lptr_disk(cur, cpp[i], level - 1))) {
937 XFS_BMBT_TRACE_CURSOR(cur, ERROR);
938 return error;
941 #endif
942 memcpy(pp, cpp, be16_to_cpu(block->bb_numrecs) * sizeof(*pp));
943 xfs_bmap_add_free(XFS_DADDR_TO_FSB(cur->bc_mp, XFS_BUF_ADDR(cbp)), 1,
944 cur->bc_private.b.flist, cur->bc_mp);
945 ip->i_d.di_nblocks--;
946 XFS_TRANS_MOD_DQUOT_BYINO(cur->bc_mp, cur->bc_tp, ip,
947 XFS_TRANS_DQ_BCOUNT, -1L);
948 xfs_trans_binval(cur->bc_tp, cbp);
949 cur->bc_bufs[level - 1] = NULL;
950 be16_add_cpu(&block->bb_level, -1);
951 xfs_trans_log_inode(cur->bc_tp, ip,
952 XFS_ILOG_CORE | XFS_ILOG_FBROOT(cur->bc_private.b.whichfork));
953 cur->bc_nlevels--;
954 XFS_BMBT_TRACE_CURSOR(cur, EXIT);
955 return 0;
959 * Log key values from the btree block.
961 STATIC void
962 xfs_bmbt_log_keys(
963 xfs_btree_cur_t *cur,
964 xfs_buf_t *bp,
965 int kfirst,
966 int klast)
968 xfs_trans_t *tp;
970 XFS_BMBT_TRACE_CURSOR(cur, ENTRY);
971 XFS_BMBT_TRACE_ARGBII(cur, bp, kfirst, klast);
972 tp = cur->bc_tp;
973 if (bp) {
974 xfs_bmbt_block_t *block;
975 int first;
976 xfs_bmbt_key_t *kp;
977 int last;
979 block = XFS_BUF_TO_BMBT_BLOCK(bp);
980 kp = XFS_BMAP_KEY_DADDR(block, 1, cur);
981 first = (int)((xfs_caddr_t)&kp[kfirst - 1] - (xfs_caddr_t)block);
982 last = (int)(((xfs_caddr_t)&kp[klast] - 1) - (xfs_caddr_t)block);
983 xfs_trans_log_buf(tp, bp, first, last);
984 } else {
985 xfs_inode_t *ip;
987 ip = cur->bc_private.b.ip;
988 xfs_trans_log_inode(tp, ip,
989 XFS_ILOG_FBROOT(cur->bc_private.b.whichfork));
991 XFS_BMBT_TRACE_CURSOR(cur, EXIT);
995 * Log pointer values from the btree block.
997 STATIC void
998 xfs_bmbt_log_ptrs(
999 xfs_btree_cur_t *cur,
1000 xfs_buf_t *bp,
1001 int pfirst,
1002 int plast)
1004 xfs_trans_t *tp;
1006 XFS_BMBT_TRACE_CURSOR(cur, ENTRY);
1007 XFS_BMBT_TRACE_ARGBII(cur, bp, pfirst, plast);
1008 tp = cur->bc_tp;
1009 if (bp) {
1010 xfs_bmbt_block_t *block;
1011 int first;
1012 int last;
1013 xfs_bmbt_ptr_t *pp;
1015 block = XFS_BUF_TO_BMBT_BLOCK(bp);
1016 pp = XFS_BMAP_PTR_DADDR(block, 1, cur);
1017 first = (int)((xfs_caddr_t)&pp[pfirst - 1] - (xfs_caddr_t)block);
1018 last = (int)(((xfs_caddr_t)&pp[plast] - 1) - (xfs_caddr_t)block);
1019 xfs_trans_log_buf(tp, bp, first, last);
1020 } else {
1021 xfs_inode_t *ip;
1023 ip = cur->bc_private.b.ip;
1024 xfs_trans_log_inode(tp, ip,
1025 XFS_ILOG_FBROOT(cur->bc_private.b.whichfork));
1027 XFS_BMBT_TRACE_CURSOR(cur, EXIT);
1031 * Lookup the record. The cursor is made to point to it, based on dir.
1033 STATIC int /* error */
1034 xfs_bmbt_lookup(
1035 xfs_btree_cur_t *cur,
1036 xfs_lookup_t dir,
1037 int *stat) /* success/failure */
1039 xfs_bmbt_block_t *block=NULL;
1040 xfs_buf_t *bp;
1041 xfs_daddr_t d;
1042 xfs_sfiloff_t diff;
1043 int error; /* error return value */
1044 xfs_fsblock_t fsbno=0;
1045 int high;
1046 int i;
1047 int keyno=0;
1048 xfs_bmbt_key_t *kkbase=NULL;
1049 xfs_bmbt_key_t *kkp;
1050 xfs_bmbt_rec_t *krbase=NULL;
1051 xfs_bmbt_rec_t *krp;
1052 int level;
1053 int low;
1054 xfs_mount_t *mp;
1055 xfs_bmbt_ptr_t *pp;
1056 xfs_bmbt_irec_t *rp;
1057 xfs_fileoff_t startoff;
1058 xfs_trans_t *tp;
1060 XFS_STATS_INC(xs_bmbt_lookup);
1061 XFS_BMBT_TRACE_CURSOR(cur, ENTRY);
1062 XFS_BMBT_TRACE_ARGI(cur, (int)dir);
1063 tp = cur->bc_tp;
1064 mp = cur->bc_mp;
1065 rp = &cur->bc_rec.b;
1066 for (level = cur->bc_nlevels - 1, diff = 1; level >= 0; level--) {
1067 if (level < cur->bc_nlevels - 1) {
1068 d = XFS_FSB_TO_DADDR(mp, fsbno);
1069 bp = cur->bc_bufs[level];
1070 if (bp && XFS_BUF_ADDR(bp) != d)
1071 bp = NULL;
1072 if (!bp) {
1073 if ((error = xfs_btree_read_bufl(mp, tp, fsbno,
1074 0, &bp, XFS_BMAP_BTREE_REF))) {
1075 XFS_BMBT_TRACE_CURSOR(cur, ERROR);
1076 return error;
1078 xfs_btree_setbuf(cur, level, bp);
1079 block = XFS_BUF_TO_BMBT_BLOCK(bp);
1080 if ((error = xfs_btree_check_lblock(cur, block,
1081 level, bp))) {
1082 XFS_BMBT_TRACE_CURSOR(cur, ERROR);
1083 return error;
1085 } else
1086 block = XFS_BUF_TO_BMBT_BLOCK(bp);
1087 } else
1088 block = xfs_bmbt_get_block(cur, level, &bp);
1089 if (diff == 0)
1090 keyno = 1;
1091 else {
1092 if (level > 0)
1093 kkbase = XFS_BMAP_KEY_IADDR(block, 1, cur);
1094 else
1095 krbase = XFS_BMAP_REC_IADDR(block, 1, cur);
1096 low = 1;
1097 if (!(high = be16_to_cpu(block->bb_numrecs))) {
1098 ASSERT(level == 0);
1099 cur->bc_ptrs[0] = dir != XFS_LOOKUP_LE;
1100 XFS_BMBT_TRACE_CURSOR(cur, EXIT);
1101 *stat = 0;
1102 return 0;
1104 while (low <= high) {
1105 XFS_STATS_INC(xs_bmbt_compare);
1106 keyno = (low + high) >> 1;
1107 if (level > 0) {
1108 kkp = kkbase + keyno - 1;
1109 startoff = be64_to_cpu(kkp->br_startoff);
1110 } else {
1111 krp = krbase + keyno - 1;
1112 startoff = xfs_bmbt_disk_get_startoff(krp);
1114 diff = (xfs_sfiloff_t)
1115 (startoff - rp->br_startoff);
1116 if (diff < 0)
1117 low = keyno + 1;
1118 else if (diff > 0)
1119 high = keyno - 1;
1120 else
1121 break;
1124 if (level > 0) {
1125 if (diff > 0 && --keyno < 1)
1126 keyno = 1;
1127 pp = XFS_BMAP_PTR_IADDR(block, keyno, cur);
1128 fsbno = be64_to_cpu(*pp);
1129 #ifdef DEBUG
1130 if ((error = xfs_btree_check_lptr(cur, fsbno, level))) {
1131 XFS_BMBT_TRACE_CURSOR(cur, ERROR);
1132 return error;
1134 #endif
1135 cur->bc_ptrs[level] = keyno;
1138 if (dir != XFS_LOOKUP_LE && diff < 0) {
1139 keyno++;
1141 * If ge search and we went off the end of the block, but it's
1142 * not the last block, we're in the wrong block.
1144 if (dir == XFS_LOOKUP_GE && keyno > be16_to_cpu(block->bb_numrecs) &&
1145 be64_to_cpu(block->bb_rightsib) != NULLDFSBNO) {
1146 cur->bc_ptrs[0] = keyno;
1147 if ((error = xfs_bmbt_increment(cur, 0, &i))) {
1148 XFS_BMBT_TRACE_CURSOR(cur, ERROR);
1149 return error;
1151 XFS_WANT_CORRUPTED_RETURN(i == 1);
1152 XFS_BMBT_TRACE_CURSOR(cur, EXIT);
1153 *stat = 1;
1154 return 0;
1157 else if (dir == XFS_LOOKUP_LE && diff > 0)
1158 keyno--;
1159 cur->bc_ptrs[0] = keyno;
1160 if (keyno == 0 || keyno > be16_to_cpu(block->bb_numrecs)) {
1161 XFS_BMBT_TRACE_CURSOR(cur, EXIT);
1162 *stat = 0;
1163 } else {
1164 XFS_BMBT_TRACE_CURSOR(cur, EXIT);
1165 *stat = ((dir != XFS_LOOKUP_EQ) || (diff == 0));
1167 return 0;
1171 * Move 1 record left from cur/level if possible.
1172 * Update cur to reflect the new path.
1174 STATIC int /* error */
1175 xfs_bmbt_lshift(
1176 xfs_btree_cur_t *cur,
1177 int level,
1178 int *stat) /* success/failure */
1180 int error; /* error return value */
1181 #ifdef DEBUG
1182 int i; /* loop counter */
1183 #endif
1184 xfs_bmbt_key_t key; /* bmap btree key */
1185 xfs_buf_t *lbp; /* left buffer pointer */
1186 xfs_bmbt_block_t *left; /* left btree block */
1187 xfs_bmbt_key_t *lkp=NULL; /* left btree key */
1188 xfs_bmbt_ptr_t *lpp; /* left address pointer */
1189 int lrecs; /* left record count */
1190 xfs_bmbt_rec_t *lrp=NULL; /* left record pointer */
1191 xfs_mount_t *mp; /* file system mount point */
1192 xfs_buf_t *rbp; /* right buffer pointer */
1193 xfs_bmbt_block_t *right; /* right btree block */
1194 xfs_bmbt_key_t *rkp=NULL; /* right btree key */
1195 xfs_bmbt_ptr_t *rpp=NULL; /* right address pointer */
1196 xfs_bmbt_rec_t *rrp=NULL; /* right record pointer */
1197 int rrecs; /* right record count */
1199 XFS_BMBT_TRACE_CURSOR(cur, ENTRY);
1200 XFS_BMBT_TRACE_ARGI(cur, level);
1201 if (level == cur->bc_nlevels - 1) {
1202 XFS_BMBT_TRACE_CURSOR(cur, EXIT);
1203 *stat = 0;
1204 return 0;
1206 rbp = cur->bc_bufs[level];
1207 right = XFS_BUF_TO_BMBT_BLOCK(rbp);
1208 #ifdef DEBUG
1209 if ((error = xfs_btree_check_lblock(cur, right, level, rbp))) {
1210 XFS_BMBT_TRACE_CURSOR(cur, ERROR);
1211 return error;
1213 #endif
1214 if (be64_to_cpu(right->bb_leftsib) == NULLDFSBNO) {
1215 XFS_BMBT_TRACE_CURSOR(cur, EXIT);
1216 *stat = 0;
1217 return 0;
1219 if (cur->bc_ptrs[level] <= 1) {
1220 XFS_BMBT_TRACE_CURSOR(cur, EXIT);
1221 *stat = 0;
1222 return 0;
1224 mp = cur->bc_mp;
1225 if ((error = xfs_btree_read_bufl(mp, cur->bc_tp, be64_to_cpu(right->bb_leftsib), 0,
1226 &lbp, XFS_BMAP_BTREE_REF))) {
1227 XFS_BMBT_TRACE_CURSOR(cur, ERROR);
1228 return error;
1230 left = XFS_BUF_TO_BMBT_BLOCK(lbp);
1231 if ((error = xfs_btree_check_lblock(cur, left, level, lbp))) {
1232 XFS_BMBT_TRACE_CURSOR(cur, ERROR);
1233 return error;
1235 if (be16_to_cpu(left->bb_numrecs) == XFS_BMAP_BLOCK_IMAXRECS(level, cur)) {
1236 XFS_BMBT_TRACE_CURSOR(cur, EXIT);
1237 *stat = 0;
1238 return 0;
1240 lrecs = be16_to_cpu(left->bb_numrecs) + 1;
1241 if (level > 0) {
1242 lkp = XFS_BMAP_KEY_IADDR(left, lrecs, cur);
1243 rkp = XFS_BMAP_KEY_IADDR(right, 1, cur);
1244 *lkp = *rkp;
1245 xfs_bmbt_log_keys(cur, lbp, lrecs, lrecs);
1246 lpp = XFS_BMAP_PTR_IADDR(left, lrecs, cur);
1247 rpp = XFS_BMAP_PTR_IADDR(right, 1, cur);
1248 #ifdef DEBUG
1249 if ((error = xfs_btree_check_lptr_disk(cur, *rpp, level))) {
1250 XFS_BMBT_TRACE_CURSOR(cur, ERROR);
1251 return error;
1253 #endif
1254 *lpp = *rpp;
1255 xfs_bmbt_log_ptrs(cur, lbp, lrecs, lrecs);
1256 } else {
1257 lrp = XFS_BMAP_REC_IADDR(left, lrecs, cur);
1258 rrp = XFS_BMAP_REC_IADDR(right, 1, cur);
1259 *lrp = *rrp;
1260 xfs_bmbt_log_recs(cur, lbp, lrecs, lrecs);
1262 left->bb_numrecs = cpu_to_be16(lrecs);
1263 xfs_bmbt_log_block(cur, lbp, XFS_BB_NUMRECS);
1264 #ifdef DEBUG
1265 if (level > 0)
1266 xfs_btree_check_key(XFS_BTNUM_BMAP, lkp - 1, lkp);
1267 else
1268 xfs_btree_check_rec(XFS_BTNUM_BMAP, lrp - 1, lrp);
1269 #endif
1270 rrecs = be16_to_cpu(right->bb_numrecs) - 1;
1271 right->bb_numrecs = cpu_to_be16(rrecs);
1272 xfs_bmbt_log_block(cur, rbp, XFS_BB_NUMRECS);
1273 if (level > 0) {
1274 #ifdef DEBUG
1275 for (i = 0; i < rrecs; i++) {
1276 if ((error = xfs_btree_check_lptr_disk(cur, rpp[i + 1],
1277 level))) {
1278 XFS_BMBT_TRACE_CURSOR(cur, ERROR);
1279 return error;
1282 #endif
1283 memmove(rkp, rkp + 1, rrecs * sizeof(*rkp));
1284 memmove(rpp, rpp + 1, rrecs * sizeof(*rpp));
1285 xfs_bmbt_log_keys(cur, rbp, 1, rrecs);
1286 xfs_bmbt_log_ptrs(cur, rbp, 1, rrecs);
1287 } else {
1288 memmove(rrp, rrp + 1, rrecs * sizeof(*rrp));
1289 xfs_bmbt_log_recs(cur, rbp, 1, rrecs);
1290 key.br_startoff = cpu_to_be64(xfs_bmbt_disk_get_startoff(rrp));
1291 rkp = &key;
1293 if ((error = xfs_bmbt_updkey(cur, rkp, level + 1))) {
1294 XFS_BMBT_TRACE_CURSOR(cur, ERROR);
1295 return error;
1297 cur->bc_ptrs[level]--;
1298 XFS_BMBT_TRACE_CURSOR(cur, EXIT);
1299 *stat = 1;
1300 return 0;
1304 * Move 1 record right from cur/level if possible.
1305 * Update cur to reflect the new path.
1307 STATIC int /* error */
1308 xfs_bmbt_rshift(
1309 xfs_btree_cur_t *cur,
1310 int level,
1311 int *stat) /* success/failure */
1313 int error; /* error return value */
1314 int i; /* loop counter */
1315 xfs_bmbt_key_t key; /* bmap btree key */
1316 xfs_buf_t *lbp; /* left buffer pointer */
1317 xfs_bmbt_block_t *left; /* left btree block */
1318 xfs_bmbt_key_t *lkp; /* left btree key */
1319 xfs_bmbt_ptr_t *lpp; /* left address pointer */
1320 xfs_bmbt_rec_t *lrp; /* left record pointer */
1321 xfs_mount_t *mp; /* file system mount point */
1322 xfs_buf_t *rbp; /* right buffer pointer */
1323 xfs_bmbt_block_t *right; /* right btree block */
1324 xfs_bmbt_key_t *rkp; /* right btree key */
1325 xfs_bmbt_ptr_t *rpp; /* right address pointer */
1326 xfs_bmbt_rec_t *rrp=NULL; /* right record pointer */
1327 struct xfs_btree_cur *tcur; /* temporary btree cursor */
1329 XFS_BMBT_TRACE_CURSOR(cur, ENTRY);
1330 XFS_BMBT_TRACE_ARGI(cur, level);
1331 if (level == cur->bc_nlevels - 1) {
1332 XFS_BMBT_TRACE_CURSOR(cur, EXIT);
1333 *stat = 0;
1334 return 0;
1336 lbp = cur->bc_bufs[level];
1337 left = XFS_BUF_TO_BMBT_BLOCK(lbp);
1338 #ifdef DEBUG
1339 if ((error = xfs_btree_check_lblock(cur, left, level, lbp))) {
1340 XFS_BMBT_TRACE_CURSOR(cur, ERROR);
1341 return error;
1343 #endif
1344 if (be64_to_cpu(left->bb_rightsib) == NULLDFSBNO) {
1345 XFS_BMBT_TRACE_CURSOR(cur, EXIT);
1346 *stat = 0;
1347 return 0;
1349 if (cur->bc_ptrs[level] >= be16_to_cpu(left->bb_numrecs)) {
1350 XFS_BMBT_TRACE_CURSOR(cur, EXIT);
1351 *stat = 0;
1352 return 0;
1354 mp = cur->bc_mp;
1355 if ((error = xfs_btree_read_bufl(mp, cur->bc_tp, be64_to_cpu(left->bb_rightsib), 0,
1356 &rbp, XFS_BMAP_BTREE_REF))) {
1357 XFS_BMBT_TRACE_CURSOR(cur, ERROR);
1358 return error;
1360 right = XFS_BUF_TO_BMBT_BLOCK(rbp);
1361 if ((error = xfs_btree_check_lblock(cur, right, level, rbp))) {
1362 XFS_BMBT_TRACE_CURSOR(cur, ERROR);
1363 return error;
1365 if (be16_to_cpu(right->bb_numrecs) == XFS_BMAP_BLOCK_IMAXRECS(level, cur)) {
1366 XFS_BMBT_TRACE_CURSOR(cur, EXIT);
1367 *stat = 0;
1368 return 0;
1370 if (level > 0) {
1371 lkp = XFS_BMAP_KEY_IADDR(left, be16_to_cpu(left->bb_numrecs), cur);
1372 lpp = XFS_BMAP_PTR_IADDR(left, be16_to_cpu(left->bb_numrecs), cur);
1373 rkp = XFS_BMAP_KEY_IADDR(right, 1, cur);
1374 rpp = XFS_BMAP_PTR_IADDR(right, 1, cur);
1375 #ifdef DEBUG
1376 for (i = be16_to_cpu(right->bb_numrecs) - 1; i >= 0; i--) {
1377 if ((error = xfs_btree_check_lptr_disk(cur, rpp[i], level))) {
1378 XFS_BMBT_TRACE_CURSOR(cur, ERROR);
1379 return error;
1382 #endif
1383 memmove(rkp + 1, rkp, be16_to_cpu(right->bb_numrecs) * sizeof(*rkp));
1384 memmove(rpp + 1, rpp, be16_to_cpu(right->bb_numrecs) * sizeof(*rpp));
1385 #ifdef DEBUG
1386 if ((error = xfs_btree_check_lptr_disk(cur, *lpp, level))) {
1387 XFS_BMBT_TRACE_CURSOR(cur, ERROR);
1388 return error;
1390 #endif
1391 *rkp = *lkp;
1392 *rpp = *lpp;
1393 xfs_bmbt_log_keys(cur, rbp, 1, be16_to_cpu(right->bb_numrecs) + 1);
1394 xfs_bmbt_log_ptrs(cur, rbp, 1, be16_to_cpu(right->bb_numrecs) + 1);
1395 } else {
1396 lrp = XFS_BMAP_REC_IADDR(left, be16_to_cpu(left->bb_numrecs), cur);
1397 rrp = XFS_BMAP_REC_IADDR(right, 1, cur);
1398 memmove(rrp + 1, rrp, be16_to_cpu(right->bb_numrecs) * sizeof(*rrp));
1399 *rrp = *lrp;
1400 xfs_bmbt_log_recs(cur, rbp, 1, be16_to_cpu(right->bb_numrecs) + 1);
1401 key.br_startoff = cpu_to_be64(xfs_bmbt_disk_get_startoff(rrp));
1402 rkp = &key;
1404 be16_add_cpu(&left->bb_numrecs, -1);
1405 xfs_bmbt_log_block(cur, lbp, XFS_BB_NUMRECS);
1406 be16_add_cpu(&right->bb_numrecs, 1);
1407 #ifdef DEBUG
1408 if (level > 0)
1409 xfs_btree_check_key(XFS_BTNUM_BMAP, rkp, rkp + 1);
1410 else
1411 xfs_btree_check_rec(XFS_BTNUM_BMAP, rrp, rrp + 1);
1412 #endif
1413 xfs_bmbt_log_block(cur, rbp, XFS_BB_NUMRECS);
1414 if ((error = xfs_btree_dup_cursor(cur, &tcur))) {
1415 XFS_BMBT_TRACE_CURSOR(cur, ERROR);
1416 return error;
1418 i = xfs_btree_lastrec(tcur, level);
1419 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
1420 if ((error = xfs_bmbt_increment(tcur, level, &i))) {
1421 XFS_BMBT_TRACE_CURSOR(tcur, ERROR);
1422 goto error1;
1424 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
1425 if ((error = xfs_bmbt_updkey(tcur, rkp, level + 1))) {
1426 XFS_BMBT_TRACE_CURSOR(tcur, ERROR);
1427 goto error1;
1429 xfs_btree_del_cursor(tcur, XFS_BTREE_NOERROR);
1430 XFS_BMBT_TRACE_CURSOR(cur, EXIT);
1431 *stat = 1;
1432 return 0;
1433 error0:
1434 XFS_BMBT_TRACE_CURSOR(cur, ERROR);
1435 error1:
1436 xfs_btree_del_cursor(tcur, XFS_BTREE_ERROR);
1437 return error;
1441 * Determine the extent state.
1443 /* ARGSUSED */
1444 STATIC xfs_exntst_t
1445 xfs_extent_state(
1446 xfs_filblks_t blks,
1447 int extent_flag)
1449 if (extent_flag) {
1450 ASSERT(blks != 0); /* saved for DMIG */
1451 return XFS_EXT_UNWRITTEN;
1453 return XFS_EXT_NORM;
1458 * Split cur/level block in half.
1459 * Return new block number and its first record (to be inserted into parent).
1461 STATIC int /* error */
1462 xfs_bmbt_split(
1463 xfs_btree_cur_t *cur,
1464 int level,
1465 xfs_fsblock_t *bnop,
1466 __uint64_t *startoff,
1467 xfs_btree_cur_t **curp,
1468 int *stat) /* success/failure */
1470 xfs_alloc_arg_t args; /* block allocation args */
1471 int error; /* error return value */
1472 int i; /* loop counter */
1473 xfs_fsblock_t lbno; /* left sibling block number */
1474 xfs_buf_t *lbp; /* left buffer pointer */
1475 xfs_bmbt_block_t *left; /* left btree block */
1476 xfs_bmbt_key_t *lkp; /* left btree key */
1477 xfs_bmbt_ptr_t *lpp; /* left address pointer */
1478 xfs_bmbt_rec_t *lrp; /* left record pointer */
1479 xfs_buf_t *rbp; /* right buffer pointer */
1480 xfs_bmbt_block_t *right; /* right btree block */
1481 xfs_bmbt_key_t *rkp; /* right btree key */
1482 xfs_bmbt_ptr_t *rpp; /* right address pointer */
1483 xfs_bmbt_block_t *rrblock; /* right-right btree block */
1484 xfs_buf_t *rrbp; /* right-right buffer pointer */
1485 xfs_bmbt_rec_t *rrp; /* right record pointer */
1487 XFS_BMBT_TRACE_CURSOR(cur, ENTRY);
1488 XFS_BMBT_TRACE_ARGIFK(cur, level, *bnop, *startoff);
1489 args.tp = cur->bc_tp;
1490 args.mp = cur->bc_mp;
1491 lbp = cur->bc_bufs[level];
1492 lbno = XFS_DADDR_TO_FSB(args.mp, XFS_BUF_ADDR(lbp));
1493 left = XFS_BUF_TO_BMBT_BLOCK(lbp);
1494 args.fsbno = cur->bc_private.b.firstblock;
1495 args.firstblock = args.fsbno;
1496 if (args.fsbno == NULLFSBLOCK) {
1497 args.fsbno = lbno;
1498 args.type = XFS_ALLOCTYPE_START_BNO;
1499 } else
1500 args.type = XFS_ALLOCTYPE_NEAR_BNO;
1501 args.mod = args.minleft = args.alignment = args.total = args.isfl =
1502 args.userdata = args.minalignslop = 0;
1503 args.minlen = args.maxlen = args.prod = 1;
1504 args.wasdel = cur->bc_private.b.flags & XFS_BTCUR_BPRV_WASDEL;
1505 if (!args.wasdel && xfs_trans_get_block_res(args.tp) == 0) {
1506 XFS_BMBT_TRACE_CURSOR(cur, ERROR);
1507 return XFS_ERROR(ENOSPC);
1509 if ((error = xfs_alloc_vextent(&args))) {
1510 XFS_BMBT_TRACE_CURSOR(cur, ERROR);
1511 return error;
1513 if (args.fsbno == NULLFSBLOCK) {
1514 XFS_BMBT_TRACE_CURSOR(cur, EXIT);
1515 *stat = 0;
1516 return 0;
1518 ASSERT(args.len == 1);
1519 cur->bc_private.b.firstblock = args.fsbno;
1520 cur->bc_private.b.allocated++;
1521 cur->bc_private.b.ip->i_d.di_nblocks++;
1522 xfs_trans_log_inode(args.tp, cur->bc_private.b.ip, XFS_ILOG_CORE);
1523 XFS_TRANS_MOD_DQUOT_BYINO(args.mp, args.tp, cur->bc_private.b.ip,
1524 XFS_TRANS_DQ_BCOUNT, 1L);
1525 rbp = xfs_btree_get_bufl(args.mp, args.tp, args.fsbno, 0);
1526 right = XFS_BUF_TO_BMBT_BLOCK(rbp);
1527 #ifdef DEBUG
1528 if ((error = xfs_btree_check_lblock(cur, left, level, rbp))) {
1529 XFS_BMBT_TRACE_CURSOR(cur, ERROR);
1530 return error;
1532 #endif
1533 right->bb_magic = cpu_to_be32(XFS_BMAP_MAGIC);
1534 right->bb_level = left->bb_level;
1535 right->bb_numrecs = cpu_to_be16(be16_to_cpu(left->bb_numrecs) / 2);
1536 if ((be16_to_cpu(left->bb_numrecs) & 1) &&
1537 cur->bc_ptrs[level] <= be16_to_cpu(right->bb_numrecs) + 1)
1538 be16_add_cpu(&right->bb_numrecs, 1);
1539 i = be16_to_cpu(left->bb_numrecs) - be16_to_cpu(right->bb_numrecs) + 1;
1540 if (level > 0) {
1541 lkp = XFS_BMAP_KEY_IADDR(left, i, cur);
1542 lpp = XFS_BMAP_PTR_IADDR(left, i, cur);
1543 rkp = XFS_BMAP_KEY_IADDR(right, 1, cur);
1544 rpp = XFS_BMAP_PTR_IADDR(right, 1, cur);
1545 #ifdef DEBUG
1546 for (i = 0; i < be16_to_cpu(right->bb_numrecs); i++) {
1547 if ((error = xfs_btree_check_lptr_disk(cur, lpp[i], level))) {
1548 XFS_BMBT_TRACE_CURSOR(cur, ERROR);
1549 return error;
1552 #endif
1553 memcpy(rkp, lkp, be16_to_cpu(right->bb_numrecs) * sizeof(*rkp));
1554 memcpy(rpp, lpp, be16_to_cpu(right->bb_numrecs) * sizeof(*rpp));
1555 xfs_bmbt_log_keys(cur, rbp, 1, be16_to_cpu(right->bb_numrecs));
1556 xfs_bmbt_log_ptrs(cur, rbp, 1, be16_to_cpu(right->bb_numrecs));
1557 *startoff = be64_to_cpu(rkp->br_startoff);
1558 } else {
1559 lrp = XFS_BMAP_REC_IADDR(left, i, cur);
1560 rrp = XFS_BMAP_REC_IADDR(right, 1, cur);
1561 memcpy(rrp, lrp, be16_to_cpu(right->bb_numrecs) * sizeof(*rrp));
1562 xfs_bmbt_log_recs(cur, rbp, 1, be16_to_cpu(right->bb_numrecs));
1563 *startoff = xfs_bmbt_disk_get_startoff(rrp);
1565 be16_add_cpu(&left->bb_numrecs, -(be16_to_cpu(right->bb_numrecs)));
1566 right->bb_rightsib = left->bb_rightsib;
1567 left->bb_rightsib = cpu_to_be64(args.fsbno);
1568 right->bb_leftsib = cpu_to_be64(lbno);
1569 xfs_bmbt_log_block(cur, rbp, XFS_BB_ALL_BITS);
1570 xfs_bmbt_log_block(cur, lbp, XFS_BB_NUMRECS | XFS_BB_RIGHTSIB);
1571 if (be64_to_cpu(right->bb_rightsib) != NULLDFSBNO) {
1572 if ((error = xfs_btree_read_bufl(args.mp, args.tp,
1573 be64_to_cpu(right->bb_rightsib), 0, &rrbp,
1574 XFS_BMAP_BTREE_REF))) {
1575 XFS_BMBT_TRACE_CURSOR(cur, ERROR);
1576 return error;
1578 rrblock = XFS_BUF_TO_BMBT_BLOCK(rrbp);
1579 if ((error = xfs_btree_check_lblock(cur, rrblock, level, rrbp))) {
1580 XFS_BMBT_TRACE_CURSOR(cur, ERROR);
1581 return error;
1583 rrblock->bb_leftsib = cpu_to_be64(args.fsbno);
1584 xfs_bmbt_log_block(cur, rrbp, XFS_BB_LEFTSIB);
1586 if (cur->bc_ptrs[level] > be16_to_cpu(left->bb_numrecs) + 1) {
1587 xfs_btree_setbuf(cur, level, rbp);
1588 cur->bc_ptrs[level] -= be16_to_cpu(left->bb_numrecs);
1590 if (level + 1 < cur->bc_nlevels) {
1591 if ((error = xfs_btree_dup_cursor(cur, curp))) {
1592 XFS_BMBT_TRACE_CURSOR(cur, ERROR);
1593 return error;
1595 (*curp)->bc_ptrs[level + 1]++;
1597 *bnop = args.fsbno;
1598 XFS_BMBT_TRACE_CURSOR(cur, EXIT);
1599 *stat = 1;
1600 return 0;
1605 * Update keys for the record.
1607 STATIC int
1608 xfs_bmbt_updkey(
1609 xfs_btree_cur_t *cur,
1610 xfs_bmbt_key_t *keyp, /* on-disk format */
1611 int level)
1613 xfs_bmbt_block_t *block;
1614 xfs_buf_t *bp;
1615 #ifdef DEBUG
1616 int error;
1617 #endif
1618 xfs_bmbt_key_t *kp;
1619 int ptr;
1621 ASSERT(level >= 1);
1622 XFS_BMBT_TRACE_CURSOR(cur, ENTRY);
1623 XFS_BMBT_TRACE_ARGIK(cur, level, keyp);
1624 for (ptr = 1; ptr == 1 && level < cur->bc_nlevels; level++) {
1625 block = xfs_bmbt_get_block(cur, level, &bp);
1626 #ifdef DEBUG
1627 if ((error = xfs_btree_check_lblock(cur, block, level, bp))) {
1628 XFS_BMBT_TRACE_CURSOR(cur, ERROR);
1629 return error;
1631 #endif
1632 ptr = cur->bc_ptrs[level];
1633 kp = XFS_BMAP_KEY_IADDR(block, ptr, cur);
1634 *kp = *keyp;
1635 xfs_bmbt_log_keys(cur, bp, ptr, ptr);
1637 XFS_BMBT_TRACE_CURSOR(cur, EXIT);
1638 return 0;
1642 * Convert on-disk form of btree root to in-memory form.
1644 void
1645 xfs_bmdr_to_bmbt(
1646 xfs_bmdr_block_t *dblock,
1647 int dblocklen,
1648 xfs_bmbt_block_t *rblock,
1649 int rblocklen)
1651 int dmxr;
1652 xfs_bmbt_key_t *fkp;
1653 __be64 *fpp;
1654 xfs_bmbt_key_t *tkp;
1655 __be64 *tpp;
1657 rblock->bb_magic = cpu_to_be32(XFS_BMAP_MAGIC);
1658 rblock->bb_level = dblock->bb_level;
1659 ASSERT(be16_to_cpu(rblock->bb_level) > 0);
1660 rblock->bb_numrecs = dblock->bb_numrecs;
1661 rblock->bb_leftsib = cpu_to_be64(NULLDFSBNO);
1662 rblock->bb_rightsib = cpu_to_be64(NULLDFSBNO);
1663 dmxr = (int)XFS_BTREE_BLOCK_MAXRECS(dblocklen, xfs_bmdr, 0);
1664 fkp = XFS_BTREE_KEY_ADDR(xfs_bmdr, dblock, 1);
1665 tkp = XFS_BMAP_BROOT_KEY_ADDR(rblock, 1, rblocklen);
1666 fpp = XFS_BTREE_PTR_ADDR(xfs_bmdr, dblock, 1, dmxr);
1667 tpp = XFS_BMAP_BROOT_PTR_ADDR(rblock, 1, rblocklen);
1668 dmxr = be16_to_cpu(dblock->bb_numrecs);
1669 memcpy(tkp, fkp, sizeof(*fkp) * dmxr);
1670 memcpy(tpp, fpp, sizeof(*fpp) * dmxr);
1674 * Decrement cursor by one record at the level.
1675 * For nonzero levels the leaf-ward information is untouched.
1677 int /* error */
1678 xfs_bmbt_decrement(
1679 xfs_btree_cur_t *cur,
1680 int level,
1681 int *stat) /* success/failure */
1683 xfs_bmbt_block_t *block;
1684 xfs_buf_t *bp;
1685 int error; /* error return value */
1686 xfs_fsblock_t fsbno;
1687 int lev;
1688 xfs_mount_t *mp;
1689 xfs_trans_t *tp;
1691 XFS_BMBT_TRACE_CURSOR(cur, ENTRY);
1692 XFS_BMBT_TRACE_ARGI(cur, level);
1693 ASSERT(level < cur->bc_nlevels);
1694 if (level < cur->bc_nlevels - 1)
1695 xfs_btree_readahead(cur, level, XFS_BTCUR_LEFTRA);
1696 if (--cur->bc_ptrs[level] > 0) {
1697 XFS_BMBT_TRACE_CURSOR(cur, EXIT);
1698 *stat = 1;
1699 return 0;
1701 block = xfs_bmbt_get_block(cur, level, &bp);
1702 #ifdef DEBUG
1703 if ((error = xfs_btree_check_lblock(cur, block, level, bp))) {
1704 XFS_BMBT_TRACE_CURSOR(cur, ERROR);
1705 return error;
1707 #endif
1708 if (be64_to_cpu(block->bb_leftsib) == NULLDFSBNO) {
1709 XFS_BMBT_TRACE_CURSOR(cur, EXIT);
1710 *stat = 0;
1711 return 0;
1713 for (lev = level + 1; lev < cur->bc_nlevels; lev++) {
1714 if (--cur->bc_ptrs[lev] > 0)
1715 break;
1716 if (lev < cur->bc_nlevels - 1)
1717 xfs_btree_readahead(cur, lev, XFS_BTCUR_LEFTRA);
1719 if (lev == cur->bc_nlevels) {
1720 XFS_BMBT_TRACE_CURSOR(cur, EXIT);
1721 *stat = 0;
1722 return 0;
1724 tp = cur->bc_tp;
1725 mp = cur->bc_mp;
1726 for (block = xfs_bmbt_get_block(cur, lev, &bp); lev > level; ) {
1727 fsbno = be64_to_cpu(*XFS_BMAP_PTR_IADDR(block, cur->bc_ptrs[lev], cur));
1728 if ((error = xfs_btree_read_bufl(mp, tp, fsbno, 0, &bp,
1729 XFS_BMAP_BTREE_REF))) {
1730 XFS_BMBT_TRACE_CURSOR(cur, ERROR);
1731 return error;
1733 lev--;
1734 xfs_btree_setbuf(cur, lev, bp);
1735 block = XFS_BUF_TO_BMBT_BLOCK(bp);
1736 if ((error = xfs_btree_check_lblock(cur, block, lev, bp))) {
1737 XFS_BMBT_TRACE_CURSOR(cur, ERROR);
1738 return error;
1740 cur->bc_ptrs[lev] = be16_to_cpu(block->bb_numrecs);
1742 XFS_BMBT_TRACE_CURSOR(cur, EXIT);
1743 *stat = 1;
1744 return 0;
1748 * Delete the record pointed to by cur.
1750 int /* error */
1751 xfs_bmbt_delete(
1752 xfs_btree_cur_t *cur,
1753 int *stat) /* success/failure */
1755 int error; /* error return value */
1756 int i;
1757 int level;
1759 XFS_BMBT_TRACE_CURSOR(cur, ENTRY);
1760 for (level = 0, i = 2; i == 2; level++) {
1761 if ((error = xfs_bmbt_delrec(cur, level, &i))) {
1762 XFS_BMBT_TRACE_CURSOR(cur, ERROR);
1763 return error;
1766 if (i == 0) {
1767 for (level = 1; level < cur->bc_nlevels; level++) {
1768 if (cur->bc_ptrs[level] == 0) {
1769 if ((error = xfs_bmbt_decrement(cur, level,
1770 &i))) {
1771 XFS_BMBT_TRACE_CURSOR(cur, ERROR);
1772 return error;
1774 break;
1778 XFS_BMBT_TRACE_CURSOR(cur, EXIT);
1779 *stat = i;
1780 return 0;
1784 * Convert a compressed bmap extent record to an uncompressed form.
1785 * This code must be in sync with the routines xfs_bmbt_get_startoff,
1786 * xfs_bmbt_get_startblock, xfs_bmbt_get_blockcount and xfs_bmbt_get_state.
1789 STATIC_INLINE void
1790 __xfs_bmbt_get_all(
1791 __uint64_t l0,
1792 __uint64_t l1,
1793 xfs_bmbt_irec_t *s)
1795 int ext_flag;
1796 xfs_exntst_t st;
1798 ext_flag = (int)(l0 >> (64 - BMBT_EXNTFLAG_BITLEN));
1799 s->br_startoff = ((xfs_fileoff_t)l0 &
1800 XFS_MASK64LO(64 - BMBT_EXNTFLAG_BITLEN)) >> 9;
1801 #if XFS_BIG_BLKNOS
1802 s->br_startblock = (((xfs_fsblock_t)l0 & XFS_MASK64LO(9)) << 43) |
1803 (((xfs_fsblock_t)l1) >> 21);
1804 #else
1805 #ifdef DEBUG
1807 xfs_dfsbno_t b;
1809 b = (((xfs_dfsbno_t)l0 & XFS_MASK64LO(9)) << 43) |
1810 (((xfs_dfsbno_t)l1) >> 21);
1811 ASSERT((b >> 32) == 0 || ISNULLDSTARTBLOCK(b));
1812 s->br_startblock = (xfs_fsblock_t)b;
1814 #else /* !DEBUG */
1815 s->br_startblock = (xfs_fsblock_t)(((xfs_dfsbno_t)l1) >> 21);
1816 #endif /* DEBUG */
1817 #endif /* XFS_BIG_BLKNOS */
1818 s->br_blockcount = (xfs_filblks_t)(l1 & XFS_MASK64LO(21));
1819 /* This is xfs_extent_state() in-line */
1820 if (ext_flag) {
1821 ASSERT(s->br_blockcount != 0); /* saved for DMIG */
1822 st = XFS_EXT_UNWRITTEN;
1823 } else
1824 st = XFS_EXT_NORM;
1825 s->br_state = st;
1828 void
1829 xfs_bmbt_get_all(
1830 xfs_bmbt_rec_host_t *r,
1831 xfs_bmbt_irec_t *s)
1833 __xfs_bmbt_get_all(r->l0, r->l1, s);
1837 * Get the block pointer for the given level of the cursor.
1838 * Fill in the buffer pointer, if applicable.
1840 xfs_bmbt_block_t *
1841 xfs_bmbt_get_block(
1842 xfs_btree_cur_t *cur,
1843 int level,
1844 xfs_buf_t **bpp)
1846 xfs_ifork_t *ifp;
1847 xfs_bmbt_block_t *rval;
1849 if (level < cur->bc_nlevels - 1) {
1850 *bpp = cur->bc_bufs[level];
1851 rval = XFS_BUF_TO_BMBT_BLOCK(*bpp);
1852 } else {
1853 *bpp = NULL;
1854 ifp = XFS_IFORK_PTR(cur->bc_private.b.ip,
1855 cur->bc_private.b.whichfork);
1856 rval = ifp->if_broot;
1858 return rval;
1862 * Extract the blockcount field from an in memory bmap extent record.
1864 xfs_filblks_t
1865 xfs_bmbt_get_blockcount(
1866 xfs_bmbt_rec_host_t *r)
1868 return (xfs_filblks_t)(r->l1 & XFS_MASK64LO(21));
1872 * Extract the startblock field from an in memory bmap extent record.
1874 xfs_fsblock_t
1875 xfs_bmbt_get_startblock(
1876 xfs_bmbt_rec_host_t *r)
1878 #if XFS_BIG_BLKNOS
1879 return (((xfs_fsblock_t)r->l0 & XFS_MASK64LO(9)) << 43) |
1880 (((xfs_fsblock_t)r->l1) >> 21);
1881 #else
1882 #ifdef DEBUG
1883 xfs_dfsbno_t b;
1885 b = (((xfs_dfsbno_t)r->l0 & XFS_MASK64LO(9)) << 43) |
1886 (((xfs_dfsbno_t)r->l1) >> 21);
1887 ASSERT((b >> 32) == 0 || ISNULLDSTARTBLOCK(b));
1888 return (xfs_fsblock_t)b;
1889 #else /* !DEBUG */
1890 return (xfs_fsblock_t)(((xfs_dfsbno_t)r->l1) >> 21);
1891 #endif /* DEBUG */
1892 #endif /* XFS_BIG_BLKNOS */
1896 * Extract the startoff field from an in memory bmap extent record.
1898 xfs_fileoff_t
1899 xfs_bmbt_get_startoff(
1900 xfs_bmbt_rec_host_t *r)
1902 return ((xfs_fileoff_t)r->l0 &
1903 XFS_MASK64LO(64 - BMBT_EXNTFLAG_BITLEN)) >> 9;
1906 xfs_exntst_t
1907 xfs_bmbt_get_state(
1908 xfs_bmbt_rec_host_t *r)
1910 int ext_flag;
1912 ext_flag = (int)((r->l0) >> (64 - BMBT_EXNTFLAG_BITLEN));
1913 return xfs_extent_state(xfs_bmbt_get_blockcount(r),
1914 ext_flag);
1917 /* Endian flipping versions of the bmbt extraction functions */
1918 void
1919 xfs_bmbt_disk_get_all(
1920 xfs_bmbt_rec_t *r,
1921 xfs_bmbt_irec_t *s)
1923 __xfs_bmbt_get_all(be64_to_cpu(r->l0), be64_to_cpu(r->l1), s);
1927 * Extract the blockcount field from an on disk bmap extent record.
1929 xfs_filblks_t
1930 xfs_bmbt_disk_get_blockcount(
1931 xfs_bmbt_rec_t *r)
1933 return (xfs_filblks_t)(be64_to_cpu(r->l1) & XFS_MASK64LO(21));
1937 * Extract the startoff field from a disk format bmap extent record.
1939 xfs_fileoff_t
1940 xfs_bmbt_disk_get_startoff(
1941 xfs_bmbt_rec_t *r)
1943 return ((xfs_fileoff_t)be64_to_cpu(r->l0) &
1944 XFS_MASK64LO(64 - BMBT_EXNTFLAG_BITLEN)) >> 9;
1948 * Increment cursor by one record at the level.
1949 * For nonzero levels the leaf-ward information is untouched.
1951 int /* error */
1952 xfs_bmbt_increment(
1953 xfs_btree_cur_t *cur,
1954 int level,
1955 int *stat) /* success/failure */
1957 xfs_bmbt_block_t *block;
1958 xfs_buf_t *bp;
1959 int error; /* error return value */
1960 xfs_fsblock_t fsbno;
1961 int lev;
1962 xfs_mount_t *mp;
1963 xfs_trans_t *tp;
1965 XFS_BMBT_TRACE_CURSOR(cur, ENTRY);
1966 XFS_BMBT_TRACE_ARGI(cur, level);
1967 ASSERT(level < cur->bc_nlevels);
1968 if (level < cur->bc_nlevels - 1)
1969 xfs_btree_readahead(cur, level, XFS_BTCUR_RIGHTRA);
1970 block = xfs_bmbt_get_block(cur, level, &bp);
1971 #ifdef DEBUG
1972 if ((error = xfs_btree_check_lblock(cur, block, level, bp))) {
1973 XFS_BMBT_TRACE_CURSOR(cur, ERROR);
1974 return error;
1976 #endif
1977 if (++cur->bc_ptrs[level] <= be16_to_cpu(block->bb_numrecs)) {
1978 XFS_BMBT_TRACE_CURSOR(cur, EXIT);
1979 *stat = 1;
1980 return 0;
1982 if (be64_to_cpu(block->bb_rightsib) == NULLDFSBNO) {
1983 XFS_BMBT_TRACE_CURSOR(cur, EXIT);
1984 *stat = 0;
1985 return 0;
1987 for (lev = level + 1; lev < cur->bc_nlevels; lev++) {
1988 block = xfs_bmbt_get_block(cur, lev, &bp);
1989 #ifdef DEBUG
1990 if ((error = xfs_btree_check_lblock(cur, block, lev, bp))) {
1991 XFS_BMBT_TRACE_CURSOR(cur, ERROR);
1992 return error;
1994 #endif
1995 if (++cur->bc_ptrs[lev] <= be16_to_cpu(block->bb_numrecs))
1996 break;
1997 if (lev < cur->bc_nlevels - 1)
1998 xfs_btree_readahead(cur, lev, XFS_BTCUR_RIGHTRA);
2000 if (lev == cur->bc_nlevels) {
2001 XFS_BMBT_TRACE_CURSOR(cur, EXIT);
2002 *stat = 0;
2003 return 0;
2005 tp = cur->bc_tp;
2006 mp = cur->bc_mp;
2007 for (block = xfs_bmbt_get_block(cur, lev, &bp); lev > level; ) {
2008 fsbno = be64_to_cpu(*XFS_BMAP_PTR_IADDR(block, cur->bc_ptrs[lev], cur));
2009 if ((error = xfs_btree_read_bufl(mp, tp, fsbno, 0, &bp,
2010 XFS_BMAP_BTREE_REF))) {
2011 XFS_BMBT_TRACE_CURSOR(cur, ERROR);
2012 return error;
2014 lev--;
2015 xfs_btree_setbuf(cur, lev, bp);
2016 block = XFS_BUF_TO_BMBT_BLOCK(bp);
2017 if ((error = xfs_btree_check_lblock(cur, block, lev, bp))) {
2018 XFS_BMBT_TRACE_CURSOR(cur, ERROR);
2019 return error;
2021 cur->bc_ptrs[lev] = 1;
2023 XFS_BMBT_TRACE_CURSOR(cur, EXIT);
2024 *stat = 1;
2025 return 0;
2029 * Insert the current record at the point referenced by cur.
2031 * A multi-level split of the tree on insert will invalidate the original
2032 * cursor. It appears, however, that some callers assume that the cursor is
2033 * always valid. Hence if we do a multi-level split we need to revalidate the
2034 * cursor.
2036 * When a split occurs, we will see a new cursor returned. Use that as a
2037 * trigger to determine if we need to revalidate the original cursor. If we get
2038 * a split, then use the original irec to lookup up the path of the record we
2039 * just inserted.
2041 * Note that the fact that the btree root is in the inode means that we can
2042 * have the level of the tree change without a "split" occurring at the root
2043 * level. What happens is that the root is migrated to an allocated block and
2044 * the inode root is pointed to it. This means a single split can change the
2045 * level of the tree (level 2 -> level 3) and invalidate the old cursor. Hence
2046 * the level change should be accounted as a split so as to correctly trigger a
2047 * revalidation of the old cursor.
2049 int /* error */
2050 xfs_bmbt_insert(
2051 xfs_btree_cur_t *cur,
2052 int *stat) /* success/failure */
2054 int error; /* error return value */
2055 int i;
2056 int level;
2057 xfs_fsblock_t nbno;
2058 xfs_btree_cur_t *ncur;
2059 xfs_bmbt_rec_t nrec;
2060 xfs_bmbt_irec_t oirec; /* original irec */
2061 xfs_btree_cur_t *pcur;
2062 int splits = 0;
2064 XFS_BMBT_TRACE_CURSOR(cur, ENTRY);
2065 level = 0;
2066 nbno = NULLFSBLOCK;
2067 oirec = cur->bc_rec.b;
2068 xfs_bmbt_disk_set_all(&nrec, &cur->bc_rec.b);
2069 ncur = NULL;
2070 pcur = cur;
2071 do {
2072 if ((error = xfs_bmbt_insrec(pcur, level++, &nbno, &nrec, &ncur,
2073 &i))) {
2074 if (pcur != cur)
2075 xfs_btree_del_cursor(pcur, XFS_BTREE_ERROR);
2076 goto error0;
2078 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
2079 if (pcur != cur && (ncur || nbno == NULLFSBLOCK)) {
2080 /* allocating a new root is effectively a split */
2081 if (cur->bc_nlevels != pcur->bc_nlevels)
2082 splits++;
2083 cur->bc_nlevels = pcur->bc_nlevels;
2084 cur->bc_private.b.allocated +=
2085 pcur->bc_private.b.allocated;
2086 pcur->bc_private.b.allocated = 0;
2087 ASSERT((cur->bc_private.b.firstblock != NULLFSBLOCK) ||
2088 XFS_IS_REALTIME_INODE(cur->bc_private.b.ip));
2089 cur->bc_private.b.firstblock =
2090 pcur->bc_private.b.firstblock;
2091 ASSERT(cur->bc_private.b.flist ==
2092 pcur->bc_private.b.flist);
2093 xfs_btree_del_cursor(pcur, XFS_BTREE_NOERROR);
2095 if (ncur) {
2096 splits++;
2097 pcur = ncur;
2098 ncur = NULL;
2100 } while (nbno != NULLFSBLOCK);
2102 if (splits > 1) {
2103 /* revalidate the old cursor as we had a multi-level split */
2104 error = xfs_bmbt_lookup_eq(cur, oirec.br_startoff,
2105 oirec.br_startblock, oirec.br_blockcount, &i);
2106 if (error)
2107 goto error0;
2108 ASSERT(i == 1);
2111 XFS_BMBT_TRACE_CURSOR(cur, EXIT);
2112 *stat = i;
2113 return 0;
2114 error0:
2115 XFS_BMBT_TRACE_CURSOR(cur, ERROR);
2116 return error;
2120 * Log fields from the btree block header.
2122 void
2123 xfs_bmbt_log_block(
2124 xfs_btree_cur_t *cur,
2125 xfs_buf_t *bp,
2126 int fields)
2128 int first;
2129 int last;
2130 xfs_trans_t *tp;
2131 static const short offsets[] = {
2132 offsetof(xfs_bmbt_block_t, bb_magic),
2133 offsetof(xfs_bmbt_block_t, bb_level),
2134 offsetof(xfs_bmbt_block_t, bb_numrecs),
2135 offsetof(xfs_bmbt_block_t, bb_leftsib),
2136 offsetof(xfs_bmbt_block_t, bb_rightsib),
2137 sizeof(xfs_bmbt_block_t)
2140 XFS_BMBT_TRACE_CURSOR(cur, ENTRY);
2141 XFS_BMBT_TRACE_ARGBI(cur, bp, fields);
2142 tp = cur->bc_tp;
2143 if (bp) {
2144 xfs_btree_offsets(fields, offsets, XFS_BB_NUM_BITS, &first,
2145 &last);
2146 xfs_trans_log_buf(tp, bp, first, last);
2147 } else
2148 xfs_trans_log_inode(tp, cur->bc_private.b.ip,
2149 XFS_ILOG_FBROOT(cur->bc_private.b.whichfork));
2150 XFS_BMBT_TRACE_CURSOR(cur, EXIT);
2154 * Log record values from the btree block.
2156 void
2157 xfs_bmbt_log_recs(
2158 xfs_btree_cur_t *cur,
2159 xfs_buf_t *bp,
2160 int rfirst,
2161 int rlast)
2163 xfs_bmbt_block_t *block;
2164 int first;
2165 int last;
2166 xfs_bmbt_rec_t *rp;
2167 xfs_trans_t *tp;
2169 XFS_BMBT_TRACE_CURSOR(cur, ENTRY);
2170 XFS_BMBT_TRACE_ARGBII(cur, bp, rfirst, rlast);
2171 ASSERT(bp);
2172 tp = cur->bc_tp;
2173 block = XFS_BUF_TO_BMBT_BLOCK(bp);
2174 rp = XFS_BMAP_REC_DADDR(block, 1, cur);
2175 first = (int)((xfs_caddr_t)&rp[rfirst - 1] - (xfs_caddr_t)block);
2176 last = (int)(((xfs_caddr_t)&rp[rlast] - 1) - (xfs_caddr_t)block);
2177 xfs_trans_log_buf(tp, bp, first, last);
2178 XFS_BMBT_TRACE_CURSOR(cur, EXIT);
2181 int /* error */
2182 xfs_bmbt_lookup_eq(
2183 xfs_btree_cur_t *cur,
2184 xfs_fileoff_t off,
2185 xfs_fsblock_t bno,
2186 xfs_filblks_t len,
2187 int *stat) /* success/failure */
2189 cur->bc_rec.b.br_startoff = off;
2190 cur->bc_rec.b.br_startblock = bno;
2191 cur->bc_rec.b.br_blockcount = len;
2192 return xfs_bmbt_lookup(cur, XFS_LOOKUP_EQ, stat);
2195 int /* error */
2196 xfs_bmbt_lookup_ge(
2197 xfs_btree_cur_t *cur,
2198 xfs_fileoff_t off,
2199 xfs_fsblock_t bno,
2200 xfs_filblks_t len,
2201 int *stat) /* success/failure */
2203 cur->bc_rec.b.br_startoff = off;
2204 cur->bc_rec.b.br_startblock = bno;
2205 cur->bc_rec.b.br_blockcount = len;
2206 return xfs_bmbt_lookup(cur, XFS_LOOKUP_GE, stat);
2210 * Give the bmap btree a new root block. Copy the old broot contents
2211 * down into a real block and make the broot point to it.
2213 int /* error */
2214 xfs_bmbt_newroot(
2215 xfs_btree_cur_t *cur, /* btree cursor */
2216 int *logflags, /* logging flags for inode */
2217 int *stat) /* return status - 0 fail */
2219 xfs_alloc_arg_t args; /* allocation arguments */
2220 xfs_bmbt_block_t *block; /* bmap btree block */
2221 xfs_buf_t *bp; /* buffer for block */
2222 xfs_bmbt_block_t *cblock; /* child btree block */
2223 xfs_bmbt_key_t *ckp; /* child key pointer */
2224 xfs_bmbt_ptr_t *cpp; /* child ptr pointer */
2225 int error; /* error return code */
2226 #ifdef DEBUG
2227 int i; /* loop counter */
2228 #endif
2229 xfs_bmbt_key_t *kp; /* pointer to bmap btree key */
2230 int level; /* btree level */
2231 xfs_bmbt_ptr_t *pp; /* pointer to bmap block addr */
2233 XFS_BMBT_TRACE_CURSOR(cur, ENTRY);
2234 level = cur->bc_nlevels - 1;
2235 block = xfs_bmbt_get_block(cur, level, &bp);
2237 * Copy the root into a real block.
2239 args.mp = cur->bc_mp;
2240 pp = XFS_BMAP_PTR_IADDR(block, 1, cur);
2241 args.tp = cur->bc_tp;
2242 args.fsbno = cur->bc_private.b.firstblock;
2243 args.mod = args.minleft = args.alignment = args.total = args.isfl =
2244 args.userdata = args.minalignslop = 0;
2245 args.minlen = args.maxlen = args.prod = 1;
2246 args.wasdel = cur->bc_private.b.flags & XFS_BTCUR_BPRV_WASDEL;
2247 args.firstblock = args.fsbno;
2248 if (args.fsbno == NULLFSBLOCK) {
2249 #ifdef DEBUG
2250 if ((error = xfs_btree_check_lptr_disk(cur, *pp, level))) {
2251 XFS_BMBT_TRACE_CURSOR(cur, ERROR);
2252 return error;
2254 #endif
2255 args.fsbno = be64_to_cpu(*pp);
2256 args.type = XFS_ALLOCTYPE_START_BNO;
2257 } else
2258 args.type = XFS_ALLOCTYPE_NEAR_BNO;
2259 if ((error = xfs_alloc_vextent(&args))) {
2260 XFS_BMBT_TRACE_CURSOR(cur, ERROR);
2261 return error;
2263 if (args.fsbno == NULLFSBLOCK) {
2264 XFS_BMBT_TRACE_CURSOR(cur, EXIT);
2265 *stat = 0;
2266 return 0;
2268 ASSERT(args.len == 1);
2269 cur->bc_private.b.firstblock = args.fsbno;
2270 cur->bc_private.b.allocated++;
2271 cur->bc_private.b.ip->i_d.di_nblocks++;
2272 XFS_TRANS_MOD_DQUOT_BYINO(args.mp, args.tp, cur->bc_private.b.ip,
2273 XFS_TRANS_DQ_BCOUNT, 1L);
2274 bp = xfs_btree_get_bufl(args.mp, cur->bc_tp, args.fsbno, 0);
2275 cblock = XFS_BUF_TO_BMBT_BLOCK(bp);
2276 *cblock = *block;
2277 be16_add_cpu(&block->bb_level, 1);
2278 block->bb_numrecs = cpu_to_be16(1);
2279 cur->bc_nlevels++;
2280 cur->bc_ptrs[level + 1] = 1;
2281 kp = XFS_BMAP_KEY_IADDR(block, 1, cur);
2282 ckp = XFS_BMAP_KEY_IADDR(cblock, 1, cur);
2283 memcpy(ckp, kp, be16_to_cpu(cblock->bb_numrecs) * sizeof(*kp));
2284 cpp = XFS_BMAP_PTR_IADDR(cblock, 1, cur);
2285 #ifdef DEBUG
2286 for (i = 0; i < be16_to_cpu(cblock->bb_numrecs); i++) {
2287 if ((error = xfs_btree_check_lptr_disk(cur, pp[i], level))) {
2288 XFS_BMBT_TRACE_CURSOR(cur, ERROR);
2289 return error;
2292 #endif
2293 memcpy(cpp, pp, be16_to_cpu(cblock->bb_numrecs) * sizeof(*pp));
2294 #ifdef DEBUG
2295 if ((error = xfs_btree_check_lptr(cur, args.fsbno, level))) {
2296 XFS_BMBT_TRACE_CURSOR(cur, ERROR);
2297 return error;
2299 #endif
2300 *pp = cpu_to_be64(args.fsbno);
2301 xfs_iroot_realloc(cur->bc_private.b.ip, 1 - be16_to_cpu(cblock->bb_numrecs),
2302 cur->bc_private.b.whichfork);
2303 xfs_btree_setbuf(cur, level, bp);
2305 * Do all this logging at the end so that
2306 * the root is at the right level.
2308 xfs_bmbt_log_block(cur, bp, XFS_BB_ALL_BITS);
2309 xfs_bmbt_log_keys(cur, bp, 1, be16_to_cpu(cblock->bb_numrecs));
2310 xfs_bmbt_log_ptrs(cur, bp, 1, be16_to_cpu(cblock->bb_numrecs));
2311 XFS_BMBT_TRACE_CURSOR(cur, EXIT);
2312 *logflags |=
2313 XFS_ILOG_CORE | XFS_ILOG_FBROOT(cur->bc_private.b.whichfork);
2314 *stat = 1;
2315 return 0;
2319 * Set all the fields in a bmap extent record from the arguments.
2321 void
2322 xfs_bmbt_set_allf(
2323 xfs_bmbt_rec_host_t *r,
2324 xfs_fileoff_t startoff,
2325 xfs_fsblock_t startblock,
2326 xfs_filblks_t blockcount,
2327 xfs_exntst_t state)
2329 int extent_flag = (state == XFS_EXT_NORM) ? 0 : 1;
2331 ASSERT(state == XFS_EXT_NORM || state == XFS_EXT_UNWRITTEN);
2332 ASSERT((startoff & XFS_MASK64HI(64-BMBT_STARTOFF_BITLEN)) == 0);
2333 ASSERT((blockcount & XFS_MASK64HI(64-BMBT_BLOCKCOUNT_BITLEN)) == 0);
2335 #if XFS_BIG_BLKNOS
2336 ASSERT((startblock & XFS_MASK64HI(64-BMBT_STARTBLOCK_BITLEN)) == 0);
2338 r->l0 = ((xfs_bmbt_rec_base_t)extent_flag << 63) |
2339 ((xfs_bmbt_rec_base_t)startoff << 9) |
2340 ((xfs_bmbt_rec_base_t)startblock >> 43);
2341 r->l1 = ((xfs_bmbt_rec_base_t)startblock << 21) |
2342 ((xfs_bmbt_rec_base_t)blockcount &
2343 (xfs_bmbt_rec_base_t)XFS_MASK64LO(21));
2344 #else /* !XFS_BIG_BLKNOS */
2345 if (ISNULLSTARTBLOCK(startblock)) {
2346 r->l0 = ((xfs_bmbt_rec_base_t)extent_flag << 63) |
2347 ((xfs_bmbt_rec_base_t)startoff << 9) |
2348 (xfs_bmbt_rec_base_t)XFS_MASK64LO(9);
2349 r->l1 = XFS_MASK64HI(11) |
2350 ((xfs_bmbt_rec_base_t)startblock << 21) |
2351 ((xfs_bmbt_rec_base_t)blockcount &
2352 (xfs_bmbt_rec_base_t)XFS_MASK64LO(21));
2353 } else {
2354 r->l0 = ((xfs_bmbt_rec_base_t)extent_flag << 63) |
2355 ((xfs_bmbt_rec_base_t)startoff << 9);
2356 r->l1 = ((xfs_bmbt_rec_base_t)startblock << 21) |
2357 ((xfs_bmbt_rec_base_t)blockcount &
2358 (xfs_bmbt_rec_base_t)XFS_MASK64LO(21));
2360 #endif /* XFS_BIG_BLKNOS */
2364 * Set all the fields in a bmap extent record from the uncompressed form.
2366 void
2367 xfs_bmbt_set_all(
2368 xfs_bmbt_rec_host_t *r,
2369 xfs_bmbt_irec_t *s)
2371 xfs_bmbt_set_allf(r, s->br_startoff, s->br_startblock,
2372 s->br_blockcount, s->br_state);
2377 * Set all the fields in a disk format bmap extent record from the arguments.
2379 void
2380 xfs_bmbt_disk_set_allf(
2381 xfs_bmbt_rec_t *r,
2382 xfs_fileoff_t startoff,
2383 xfs_fsblock_t startblock,
2384 xfs_filblks_t blockcount,
2385 xfs_exntst_t state)
2387 int extent_flag = (state == XFS_EXT_NORM) ? 0 : 1;
2389 ASSERT(state == XFS_EXT_NORM || state == XFS_EXT_UNWRITTEN);
2390 ASSERT((startoff & XFS_MASK64HI(64-BMBT_STARTOFF_BITLEN)) == 0);
2391 ASSERT((blockcount & XFS_MASK64HI(64-BMBT_BLOCKCOUNT_BITLEN)) == 0);
2393 #if XFS_BIG_BLKNOS
2394 ASSERT((startblock & XFS_MASK64HI(64-BMBT_STARTBLOCK_BITLEN)) == 0);
2396 r->l0 = cpu_to_be64(
2397 ((xfs_bmbt_rec_base_t)extent_flag << 63) |
2398 ((xfs_bmbt_rec_base_t)startoff << 9) |
2399 ((xfs_bmbt_rec_base_t)startblock >> 43));
2400 r->l1 = cpu_to_be64(
2401 ((xfs_bmbt_rec_base_t)startblock << 21) |
2402 ((xfs_bmbt_rec_base_t)blockcount &
2403 (xfs_bmbt_rec_base_t)XFS_MASK64LO(21)));
2404 #else /* !XFS_BIG_BLKNOS */
2405 if (ISNULLSTARTBLOCK(startblock)) {
2406 r->l0 = cpu_to_be64(
2407 ((xfs_bmbt_rec_base_t)extent_flag << 63) |
2408 ((xfs_bmbt_rec_base_t)startoff << 9) |
2409 (xfs_bmbt_rec_base_t)XFS_MASK64LO(9));
2410 r->l1 = cpu_to_be64(XFS_MASK64HI(11) |
2411 ((xfs_bmbt_rec_base_t)startblock << 21) |
2412 ((xfs_bmbt_rec_base_t)blockcount &
2413 (xfs_bmbt_rec_base_t)XFS_MASK64LO(21)));
2414 } else {
2415 r->l0 = cpu_to_be64(
2416 ((xfs_bmbt_rec_base_t)extent_flag << 63) |
2417 ((xfs_bmbt_rec_base_t)startoff << 9));
2418 r->l1 = cpu_to_be64(
2419 ((xfs_bmbt_rec_base_t)startblock << 21) |
2420 ((xfs_bmbt_rec_base_t)blockcount &
2421 (xfs_bmbt_rec_base_t)XFS_MASK64LO(21)));
2423 #endif /* XFS_BIG_BLKNOS */
2427 * Set all the fields in a bmap extent record from the uncompressed form.
2429 void
2430 xfs_bmbt_disk_set_all(
2431 xfs_bmbt_rec_t *r,
2432 xfs_bmbt_irec_t *s)
2434 xfs_bmbt_disk_set_allf(r, s->br_startoff, s->br_startblock,
2435 s->br_blockcount, s->br_state);
2439 * Set the blockcount field in a bmap extent record.
2441 void
2442 xfs_bmbt_set_blockcount(
2443 xfs_bmbt_rec_host_t *r,
2444 xfs_filblks_t v)
2446 ASSERT((v & XFS_MASK64HI(43)) == 0);
2447 r->l1 = (r->l1 & (xfs_bmbt_rec_base_t)XFS_MASK64HI(43)) |
2448 (xfs_bmbt_rec_base_t)(v & XFS_MASK64LO(21));
2452 * Set the startblock field in a bmap extent record.
2454 void
2455 xfs_bmbt_set_startblock(
2456 xfs_bmbt_rec_host_t *r,
2457 xfs_fsblock_t v)
2459 #if XFS_BIG_BLKNOS
2460 ASSERT((v & XFS_MASK64HI(12)) == 0);
2461 r->l0 = (r->l0 & (xfs_bmbt_rec_base_t)XFS_MASK64HI(55)) |
2462 (xfs_bmbt_rec_base_t)(v >> 43);
2463 r->l1 = (r->l1 & (xfs_bmbt_rec_base_t)XFS_MASK64LO(21)) |
2464 (xfs_bmbt_rec_base_t)(v << 21);
2465 #else /* !XFS_BIG_BLKNOS */
2466 if (ISNULLSTARTBLOCK(v)) {
2467 r->l0 |= (xfs_bmbt_rec_base_t)XFS_MASK64LO(9);
2468 r->l1 = (xfs_bmbt_rec_base_t)XFS_MASK64HI(11) |
2469 ((xfs_bmbt_rec_base_t)v << 21) |
2470 (r->l1 & (xfs_bmbt_rec_base_t)XFS_MASK64LO(21));
2471 } else {
2472 r->l0 &= ~(xfs_bmbt_rec_base_t)XFS_MASK64LO(9);
2473 r->l1 = ((xfs_bmbt_rec_base_t)v << 21) |
2474 (r->l1 & (xfs_bmbt_rec_base_t)XFS_MASK64LO(21));
2476 #endif /* XFS_BIG_BLKNOS */
2480 * Set the startoff field in a bmap extent record.
2482 void
2483 xfs_bmbt_set_startoff(
2484 xfs_bmbt_rec_host_t *r,
2485 xfs_fileoff_t v)
2487 ASSERT((v & XFS_MASK64HI(9)) == 0);
2488 r->l0 = (r->l0 & (xfs_bmbt_rec_base_t) XFS_MASK64HI(1)) |
2489 ((xfs_bmbt_rec_base_t)v << 9) |
2490 (r->l0 & (xfs_bmbt_rec_base_t)XFS_MASK64LO(9));
2494 * Set the extent state field in a bmap extent record.
2496 void
2497 xfs_bmbt_set_state(
2498 xfs_bmbt_rec_host_t *r,
2499 xfs_exntst_t v)
2501 ASSERT(v == XFS_EXT_NORM || v == XFS_EXT_UNWRITTEN);
2502 if (v == XFS_EXT_NORM)
2503 r->l0 &= XFS_MASK64LO(64 - BMBT_EXNTFLAG_BITLEN);
2504 else
2505 r->l0 |= XFS_MASK64HI(BMBT_EXNTFLAG_BITLEN);
2509 * Convert in-memory form of btree root to on-disk form.
2511 void
2512 xfs_bmbt_to_bmdr(
2513 xfs_bmbt_block_t *rblock,
2514 int rblocklen,
2515 xfs_bmdr_block_t *dblock,
2516 int dblocklen)
2518 int dmxr;
2519 xfs_bmbt_key_t *fkp;
2520 __be64 *fpp;
2521 xfs_bmbt_key_t *tkp;
2522 __be64 *tpp;
2524 ASSERT(be32_to_cpu(rblock->bb_magic) == XFS_BMAP_MAGIC);
2525 ASSERT(be64_to_cpu(rblock->bb_leftsib) == NULLDFSBNO);
2526 ASSERT(be64_to_cpu(rblock->bb_rightsib) == NULLDFSBNO);
2527 ASSERT(be16_to_cpu(rblock->bb_level) > 0);
2528 dblock->bb_level = rblock->bb_level;
2529 dblock->bb_numrecs = rblock->bb_numrecs;
2530 dmxr = (int)XFS_BTREE_BLOCK_MAXRECS(dblocklen, xfs_bmdr, 0);
2531 fkp = XFS_BMAP_BROOT_KEY_ADDR(rblock, 1, rblocklen);
2532 tkp = XFS_BTREE_KEY_ADDR(xfs_bmdr, dblock, 1);
2533 fpp = XFS_BMAP_BROOT_PTR_ADDR(rblock, 1, rblocklen);
2534 tpp = XFS_BTREE_PTR_ADDR(xfs_bmdr, dblock, 1, dmxr);
2535 dmxr = be16_to_cpu(dblock->bb_numrecs);
2536 memcpy(tkp, fkp, sizeof(*fkp) * dmxr);
2537 memcpy(tpp, fpp, sizeof(*fpp) * dmxr);
2541 * Update the record to the passed values.
2544 xfs_bmbt_update(
2545 xfs_btree_cur_t *cur,
2546 xfs_fileoff_t off,
2547 xfs_fsblock_t bno,
2548 xfs_filblks_t len,
2549 xfs_exntst_t state)
2551 xfs_bmbt_block_t *block;
2552 xfs_buf_t *bp;
2553 int error;
2554 xfs_bmbt_key_t key;
2555 int ptr;
2556 xfs_bmbt_rec_t *rp;
2558 XFS_BMBT_TRACE_CURSOR(cur, ENTRY);
2559 XFS_BMBT_TRACE_ARGFFFI(cur, (xfs_dfiloff_t)off, (xfs_dfsbno_t)bno,
2560 (xfs_dfilblks_t)len, (int)state);
2561 block = xfs_bmbt_get_block(cur, 0, &bp);
2562 #ifdef DEBUG
2563 if ((error = xfs_btree_check_lblock(cur, block, 0, bp))) {
2564 XFS_BMBT_TRACE_CURSOR(cur, ERROR);
2565 return error;
2567 #endif
2568 ptr = cur->bc_ptrs[0];
2569 rp = XFS_BMAP_REC_IADDR(block, ptr, cur);
2570 xfs_bmbt_disk_set_allf(rp, off, bno, len, state);
2571 xfs_bmbt_log_recs(cur, bp, ptr, ptr);
2572 if (ptr > 1) {
2573 XFS_BMBT_TRACE_CURSOR(cur, EXIT);
2574 return 0;
2576 key.br_startoff = cpu_to_be64(off);
2577 if ((error = xfs_bmbt_updkey(cur, &key, 1))) {
2578 XFS_BMBT_TRACE_CURSOR(cur, ERROR);
2579 return error;
2581 XFS_BMBT_TRACE_CURSOR(cur, EXIT);
2582 return 0;
2586 * Check extent records, which have just been read, for
2587 * any bit in the extent flag field. ASSERT on debug
2588 * kernels, as this condition should not occur.
2589 * Return an error condition (1) if any flags found,
2590 * otherwise return 0.
2594 xfs_check_nostate_extents(
2595 xfs_ifork_t *ifp,
2596 xfs_extnum_t idx,
2597 xfs_extnum_t num)
2599 for (; num > 0; num--, idx++) {
2600 xfs_bmbt_rec_host_t *ep = xfs_iext_get_ext(ifp, idx);
2601 if ((ep->l0 >>
2602 (64 - BMBT_EXNTFLAG_BITLEN)) != 0) {
2603 ASSERT(0);
2604 return 1;
2607 return 0;