mm/zswap: NUMA aware allocation for zswap_dstmem
[linux/fpc-iii.git] / fs / xfs / xfs_ialloc_btree.c
blob7e309b11e87d75240cfdb41428d1f13dc7d470b8
1 /*
2 * Copyright (c) 2000-2001,2005 Silicon Graphics, Inc.
3 * All Rights Reserved.
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
18 #include "xfs.h"
19 #include "xfs_fs.h"
20 #include "xfs_shared.h"
21 #include "xfs_format.h"
22 #include "xfs_log_format.h"
23 #include "xfs_trans_resv.h"
24 #include "xfs_bit.h"
25 #include "xfs_sb.h"
26 #include "xfs_ag.h"
27 #include "xfs_mount.h"
28 #include "xfs_inode.h"
29 #include "xfs_btree.h"
30 #include "xfs_ialloc.h"
31 #include "xfs_ialloc_btree.h"
32 #include "xfs_alloc.h"
33 #include "xfs_error.h"
34 #include "xfs_trace.h"
35 #include "xfs_cksum.h"
36 #include "xfs_trans.h"
39 STATIC int
40 xfs_inobt_get_minrecs(
41 struct xfs_btree_cur *cur,
42 int level)
44 return cur->bc_mp->m_inobt_mnr[level != 0];
47 STATIC struct xfs_btree_cur *
48 xfs_inobt_dup_cursor(
49 struct xfs_btree_cur *cur)
51 return xfs_inobt_init_cursor(cur->bc_mp, cur->bc_tp,
52 cur->bc_private.a.agbp, cur->bc_private.a.agno);
55 STATIC void
56 xfs_inobt_set_root(
57 struct xfs_btree_cur *cur,
58 union xfs_btree_ptr *nptr,
59 int inc) /* level change */
61 struct xfs_buf *agbp = cur->bc_private.a.agbp;
62 struct xfs_agi *agi = XFS_BUF_TO_AGI(agbp);
64 agi->agi_root = nptr->s;
65 be32_add_cpu(&agi->agi_level, inc);
66 xfs_ialloc_log_agi(cur->bc_tp, agbp, XFS_AGI_ROOT | XFS_AGI_LEVEL);
69 STATIC int
70 xfs_inobt_alloc_block(
71 struct xfs_btree_cur *cur,
72 union xfs_btree_ptr *start,
73 union xfs_btree_ptr *new,
74 int length,
75 int *stat)
77 xfs_alloc_arg_t args; /* block allocation args */
78 int error; /* error return value */
79 xfs_agblock_t sbno = be32_to_cpu(start->s);
81 XFS_BTREE_TRACE_CURSOR(cur, XBT_ENTRY);
83 memset(&args, 0, sizeof(args));
84 args.tp = cur->bc_tp;
85 args.mp = cur->bc_mp;
86 args.fsbno = XFS_AGB_TO_FSB(args.mp, cur->bc_private.a.agno, sbno);
87 args.minlen = 1;
88 args.maxlen = 1;
89 args.prod = 1;
90 args.type = XFS_ALLOCTYPE_NEAR_BNO;
92 error = xfs_alloc_vextent(&args);
93 if (error) {
94 XFS_BTREE_TRACE_CURSOR(cur, XBT_ERROR);
95 return error;
97 if (args.fsbno == NULLFSBLOCK) {
98 XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
99 *stat = 0;
100 return 0;
102 ASSERT(args.len == 1);
103 XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
105 new->s = cpu_to_be32(XFS_FSB_TO_AGBNO(args.mp, args.fsbno));
106 *stat = 1;
107 return 0;
110 STATIC int
111 xfs_inobt_free_block(
112 struct xfs_btree_cur *cur,
113 struct xfs_buf *bp)
115 xfs_fsblock_t fsbno;
116 int error;
118 fsbno = XFS_DADDR_TO_FSB(cur->bc_mp, XFS_BUF_ADDR(bp));
119 error = xfs_free_extent(cur->bc_tp, fsbno, 1);
120 if (error)
121 return error;
123 xfs_trans_binval(cur->bc_tp, bp);
124 return error;
127 STATIC int
128 xfs_inobt_get_maxrecs(
129 struct xfs_btree_cur *cur,
130 int level)
132 return cur->bc_mp->m_inobt_mxr[level != 0];
135 STATIC void
136 xfs_inobt_init_key_from_rec(
137 union xfs_btree_key *key,
138 union xfs_btree_rec *rec)
140 key->inobt.ir_startino = rec->inobt.ir_startino;
143 STATIC void
144 xfs_inobt_init_rec_from_key(
145 union xfs_btree_key *key,
146 union xfs_btree_rec *rec)
148 rec->inobt.ir_startino = key->inobt.ir_startino;
151 STATIC void
152 xfs_inobt_init_rec_from_cur(
153 struct xfs_btree_cur *cur,
154 union xfs_btree_rec *rec)
156 rec->inobt.ir_startino = cpu_to_be32(cur->bc_rec.i.ir_startino);
157 rec->inobt.ir_freecount = cpu_to_be32(cur->bc_rec.i.ir_freecount);
158 rec->inobt.ir_free = cpu_to_be64(cur->bc_rec.i.ir_free);
162 * initial value of ptr for lookup
164 STATIC void
165 xfs_inobt_init_ptr_from_cur(
166 struct xfs_btree_cur *cur,
167 union xfs_btree_ptr *ptr)
169 struct xfs_agi *agi = XFS_BUF_TO_AGI(cur->bc_private.a.agbp);
171 ASSERT(cur->bc_private.a.agno == be32_to_cpu(agi->agi_seqno));
173 ptr->s = agi->agi_root;
176 STATIC __int64_t
177 xfs_inobt_key_diff(
178 struct xfs_btree_cur *cur,
179 union xfs_btree_key *key)
181 return (__int64_t)be32_to_cpu(key->inobt.ir_startino) -
182 cur->bc_rec.i.ir_startino;
185 static int
186 xfs_inobt_verify(
187 struct xfs_buf *bp)
189 struct xfs_mount *mp = bp->b_target->bt_mount;
190 struct xfs_btree_block *block = XFS_BUF_TO_BLOCK(bp);
191 struct xfs_perag *pag = bp->b_pag;
192 unsigned int level;
195 * During growfs operations, we can't verify the exact owner as the
196 * perag is not fully initialised and hence not attached to the buffer.
198 * Similarly, during log recovery we will have a perag structure
199 * attached, but the agi information will not yet have been initialised
200 * from the on disk AGI. We don't currently use any of this information,
201 * but beware of the landmine (i.e. need to check pag->pagi_init) if we
202 * ever do.
204 switch (block->bb_magic) {
205 case cpu_to_be32(XFS_IBT_CRC_MAGIC):
206 if (!xfs_sb_version_hascrc(&mp->m_sb))
207 return false;
208 if (!uuid_equal(&block->bb_u.s.bb_uuid, &mp->m_sb.sb_uuid))
209 return false;
210 if (block->bb_u.s.bb_blkno != cpu_to_be64(bp->b_bn))
211 return false;
212 if (pag &&
213 be32_to_cpu(block->bb_u.s.bb_owner) != pag->pag_agno)
214 return false;
215 /* fall through */
216 case cpu_to_be32(XFS_IBT_MAGIC):
217 break;
218 default:
219 return 0;
222 /* numrecs and level verification */
223 level = be16_to_cpu(block->bb_level);
224 if (level >= mp->m_in_maxlevels)
225 return false;
226 if (be16_to_cpu(block->bb_numrecs) > mp->m_inobt_mxr[level != 0])
227 return false;
229 /* sibling pointer verification */
230 if (!block->bb_u.s.bb_leftsib ||
231 (be32_to_cpu(block->bb_u.s.bb_leftsib) >= mp->m_sb.sb_agblocks &&
232 block->bb_u.s.bb_leftsib != cpu_to_be32(NULLAGBLOCK)))
233 return false;
234 if (!block->bb_u.s.bb_rightsib ||
235 (be32_to_cpu(block->bb_u.s.bb_rightsib) >= mp->m_sb.sb_agblocks &&
236 block->bb_u.s.bb_rightsib != cpu_to_be32(NULLAGBLOCK)))
237 return false;
239 return true;
242 static void
243 xfs_inobt_read_verify(
244 struct xfs_buf *bp)
246 if (!xfs_btree_sblock_verify_crc(bp))
247 xfs_buf_ioerror(bp, EFSBADCRC);
248 else if (!xfs_inobt_verify(bp))
249 xfs_buf_ioerror(bp, EFSCORRUPTED);
251 if (bp->b_error) {
252 trace_xfs_btree_corrupt(bp, _RET_IP_);
253 xfs_verifier_error(bp);
257 static void
258 xfs_inobt_write_verify(
259 struct xfs_buf *bp)
261 if (!xfs_inobt_verify(bp)) {
262 trace_xfs_btree_corrupt(bp, _RET_IP_);
263 xfs_buf_ioerror(bp, EFSCORRUPTED);
264 xfs_verifier_error(bp);
265 return;
267 xfs_btree_sblock_calc_crc(bp);
271 const struct xfs_buf_ops xfs_inobt_buf_ops = {
272 .verify_read = xfs_inobt_read_verify,
273 .verify_write = xfs_inobt_write_verify,
276 #if defined(DEBUG) || defined(XFS_WARN)
277 STATIC int
278 xfs_inobt_keys_inorder(
279 struct xfs_btree_cur *cur,
280 union xfs_btree_key *k1,
281 union xfs_btree_key *k2)
283 return be32_to_cpu(k1->inobt.ir_startino) <
284 be32_to_cpu(k2->inobt.ir_startino);
287 STATIC int
288 xfs_inobt_recs_inorder(
289 struct xfs_btree_cur *cur,
290 union xfs_btree_rec *r1,
291 union xfs_btree_rec *r2)
293 return be32_to_cpu(r1->inobt.ir_startino) + XFS_INODES_PER_CHUNK <=
294 be32_to_cpu(r2->inobt.ir_startino);
296 #endif /* DEBUG */
298 static const struct xfs_btree_ops xfs_inobt_ops = {
299 .rec_len = sizeof(xfs_inobt_rec_t),
300 .key_len = sizeof(xfs_inobt_key_t),
302 .dup_cursor = xfs_inobt_dup_cursor,
303 .set_root = xfs_inobt_set_root,
304 .alloc_block = xfs_inobt_alloc_block,
305 .free_block = xfs_inobt_free_block,
306 .get_minrecs = xfs_inobt_get_minrecs,
307 .get_maxrecs = xfs_inobt_get_maxrecs,
308 .init_key_from_rec = xfs_inobt_init_key_from_rec,
309 .init_rec_from_key = xfs_inobt_init_rec_from_key,
310 .init_rec_from_cur = xfs_inobt_init_rec_from_cur,
311 .init_ptr_from_cur = xfs_inobt_init_ptr_from_cur,
312 .key_diff = xfs_inobt_key_diff,
313 .buf_ops = &xfs_inobt_buf_ops,
314 #if defined(DEBUG) || defined(XFS_WARN)
315 .keys_inorder = xfs_inobt_keys_inorder,
316 .recs_inorder = xfs_inobt_recs_inorder,
317 #endif
321 * Allocate a new inode btree cursor.
323 struct xfs_btree_cur * /* new inode btree cursor */
324 xfs_inobt_init_cursor(
325 struct xfs_mount *mp, /* file system mount point */
326 struct xfs_trans *tp, /* transaction pointer */
327 struct xfs_buf *agbp, /* buffer for agi structure */
328 xfs_agnumber_t agno) /* allocation group number */
330 struct xfs_agi *agi = XFS_BUF_TO_AGI(agbp);
331 struct xfs_btree_cur *cur;
333 cur = kmem_zone_zalloc(xfs_btree_cur_zone, KM_SLEEP);
335 cur->bc_tp = tp;
336 cur->bc_mp = mp;
337 cur->bc_nlevels = be32_to_cpu(agi->agi_level);
338 cur->bc_btnum = XFS_BTNUM_INO;
339 cur->bc_blocklog = mp->m_sb.sb_blocklog;
341 cur->bc_ops = &xfs_inobt_ops;
342 if (xfs_sb_version_hascrc(&mp->m_sb))
343 cur->bc_flags |= XFS_BTREE_CRC_BLOCKS;
345 cur->bc_private.a.agbp = agbp;
346 cur->bc_private.a.agno = agno;
348 return cur;
352 * Calculate number of records in an inobt btree block.
355 xfs_inobt_maxrecs(
356 struct xfs_mount *mp,
357 int blocklen,
358 int leaf)
360 blocklen -= XFS_INOBT_BLOCK_LEN(mp);
362 if (leaf)
363 return blocklen / sizeof(xfs_inobt_rec_t);
364 return blocklen / (sizeof(xfs_inobt_key_t) + sizeof(xfs_inobt_ptr_t));