2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
3 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions
7 * of the GNU General Public License version 2.
10 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
12 #include <linux/slab.h>
13 #include <linux/spinlock.h>
14 #include <linux/completion.h>
15 #include <linux/buffer_head.h>
17 #include <linux/gfs2_ondisk.h>
18 #include <linux/prefetch.h>
19 #include <linux/blkdev.h>
20 #include <linux/rbtree.h>
21 #include <linux/random.h>
36 #include "trace_gfs2.h"
39 #define BFITNOENT ((u32)~0)
40 #define NO_BLOCK ((u64)~0)
42 #if BITS_PER_LONG == 32
43 #define LBITMASK (0x55555555UL)
44 #define LBITSKIP55 (0x55555555UL)
45 #define LBITSKIP00 (0x00000000UL)
47 #define LBITMASK (0x5555555555555555UL)
48 #define LBITSKIP55 (0x5555555555555555UL)
49 #define LBITSKIP00 (0x0000000000000000UL)
53 * These routines are used by the resource group routines (rgrp.c)
54 * to keep track of block allocation. Each block is represented by two
55 * bits. So, each byte represents GFS2_NBBY (i.e. 4) blocks.
58 * 1 = Used (not metadata)
59 * 2 = Unlinked (still in use) inode
68 static const char valid_change
[16] = {
76 static int gfs2_rbm_find(struct gfs2_rbm
*rbm
, u8 state
, u32
*minext
,
77 const struct gfs2_inode
*ip
, bool nowrap
);
81 * gfs2_setbit - Set a bit in the bitmaps
82 * @rbm: The position of the bit to set
83 * @do_clone: Also set the clone bitmap, if it exists
84 * @new_state: the new state of the block
88 static inline void gfs2_setbit(const struct gfs2_rbm
*rbm
, bool do_clone
,
89 unsigned char new_state
)
91 unsigned char *byte1
, *byte2
, *end
, cur_state
;
92 struct gfs2_bitmap
*bi
= rbm_bi(rbm
);
93 unsigned int buflen
= bi
->bi_len
;
94 const unsigned int bit
= (rbm
->offset
% GFS2_NBBY
) * GFS2_BIT_SIZE
;
96 byte1
= bi
->bi_bh
->b_data
+ bi
->bi_offset
+ (rbm
->offset
/ GFS2_NBBY
);
97 end
= bi
->bi_bh
->b_data
+ bi
->bi_offset
+ buflen
;
101 cur_state
= (*byte1
>> bit
) & GFS2_BIT_MASK
;
103 if (unlikely(!valid_change
[new_state
* 4 + cur_state
])) {
104 pr_warn("buf_blk = 0x%x old_state=%d, new_state=%d\n",
105 rbm
->offset
, cur_state
, new_state
);
106 pr_warn("rgrp=0x%llx bi_start=0x%x\n",
107 (unsigned long long)rbm
->rgd
->rd_addr
, bi
->bi_start
);
108 pr_warn("bi_offset=0x%x bi_len=0x%x\n",
109 bi
->bi_offset
, bi
->bi_len
);
111 gfs2_consist_rgrpd(rbm
->rgd
);
114 *byte1
^= (cur_state
^ new_state
) << bit
;
116 if (do_clone
&& bi
->bi_clone
) {
117 byte2
= bi
->bi_clone
+ bi
->bi_offset
+ (rbm
->offset
/ GFS2_NBBY
);
118 cur_state
= (*byte2
>> bit
) & GFS2_BIT_MASK
;
119 *byte2
^= (cur_state
^ new_state
) << bit
;
124 * gfs2_testbit - test a bit in the bitmaps
125 * @rbm: The bit to test
126 * @use_clone: If true, test the clone bitmap, not the official bitmap.
128 * Some callers like gfs2_unaligned_extlen need to test the clone bitmaps,
129 * not the "real" bitmaps, to avoid allocating recently freed blocks.
131 * Returns: The two bit block state of the requested bit
134 static inline u8
gfs2_testbit(const struct gfs2_rbm
*rbm
, bool use_clone
)
136 struct gfs2_bitmap
*bi
= rbm_bi(rbm
);
141 if (use_clone
&& bi
->bi_clone
)
142 buffer
= bi
->bi_clone
;
144 buffer
= bi
->bi_bh
->b_data
;
145 buffer
+= bi
->bi_offset
;
146 byte
= buffer
+ (rbm
->offset
/ GFS2_NBBY
);
147 bit
= (rbm
->offset
% GFS2_NBBY
) * GFS2_BIT_SIZE
;
149 return (*byte
>> bit
) & GFS2_BIT_MASK
;
154 * @ptr: Pointer to bitmap data
155 * @mask: Mask to use (normally 0x55555.... but adjusted for search start)
156 * @state: The state we are searching for
158 * We xor the bitmap data with a patter which is the bitwise opposite
159 * of what we are looking for, this gives rise to a pattern of ones
160 * wherever there is a match. Since we have two bits per entry, we
161 * take this pattern, shift it down by one place and then and it with
162 * the original. All the even bit positions (0,2,4, etc) then represent
163 * successful matches, so we mask with 0x55555..... to remove the unwanted
166 * This allows searching of a whole u64 at once (32 blocks) with a
167 * single test (on 64 bit arches).
170 static inline u64
gfs2_bit_search(const __le64
*ptr
, u64 mask
, u8 state
)
173 static const u64 search
[] = {
174 [0] = 0xffffffffffffffffULL
,
175 [1] = 0xaaaaaaaaaaaaaaaaULL
,
176 [2] = 0x5555555555555555ULL
,
177 [3] = 0x0000000000000000ULL
,
179 tmp
= le64_to_cpu(*ptr
) ^ search
[state
];
186 * rs_cmp - multi-block reservation range compare
187 * @blk: absolute file system block number of the new reservation
188 * @len: number of blocks in the new reservation
189 * @rs: existing reservation to compare against
191 * returns: 1 if the block range is beyond the reach of the reservation
192 * -1 if the block range is before the start of the reservation
193 * 0 if the block range overlaps with the reservation
195 static inline int rs_cmp(u64 blk
, u32 len
, struct gfs2_blkreserv
*rs
)
197 u64 startblk
= gfs2_rbm_to_block(&rs
->rs_rbm
);
199 if (blk
>= startblk
+ rs
->rs_free
)
201 if (blk
+ len
- 1 < startblk
)
207 * gfs2_bitfit - Search an rgrp's bitmap buffer to find a bit-pair representing
208 * a block in a given allocation state.
209 * @buf: the buffer that holds the bitmaps
210 * @len: the length (in bytes) of the buffer
211 * @goal: start search at this block's bit-pair (within @buffer)
212 * @state: GFS2_BLKST_XXX the state of the block we're looking for.
214 * Scope of @goal and returned block number is only within this bitmap buffer,
215 * not entire rgrp or filesystem. @buffer will be offset from the actual
216 * beginning of a bitmap block buffer, skipping any header structures, but
217 * headers are always a multiple of 64 bits long so that the buffer is
218 * always aligned to a 64 bit boundary.
220 * The size of the buffer is in bytes, but is it assumed that it is
221 * always ok to read a complete multiple of 64 bits at the end
222 * of the block in case the end is no aligned to a natural boundary.
224 * Return: the block number (bitmap buffer scope) that was found
227 static u32
gfs2_bitfit(const u8
*buf
, const unsigned int len
,
230 u32 spoint
= (goal
<< 1) & ((8*sizeof(u64
)) - 1);
231 const __le64
*ptr
= ((__le64
*)buf
) + (goal
>> 5);
232 const __le64
*end
= (__le64
*)(buf
+ ALIGN(len
, sizeof(u64
)));
234 u64 mask
= 0x5555555555555555ULL
;
237 /* Mask off bits we don't care about at the start of the search */
239 tmp
= gfs2_bit_search(ptr
, mask
, state
);
241 while(tmp
== 0 && ptr
< end
) {
242 tmp
= gfs2_bit_search(ptr
, 0x5555555555555555ULL
, state
);
245 /* Mask off any bits which are more than len bytes from the start */
246 if (ptr
== end
&& (len
& (sizeof(u64
) - 1)))
247 tmp
&= (((u64
)~0) >> (64 - 8*(len
& (sizeof(u64
) - 1))));
248 /* Didn't find anything, so return */
253 bit
/= 2; /* two bits per entry in the bitmap */
254 return (((const unsigned char *)ptr
- buf
) * GFS2_NBBY
) + bit
;
258 * gfs2_rbm_from_block - Set the rbm based upon rgd and block number
259 * @rbm: The rbm with rgd already set correctly
260 * @block: The block number (filesystem relative)
262 * This sets the bi and offset members of an rbm based on a
263 * resource group and a filesystem relative block number. The
264 * resource group must be set in the rbm on entry, the bi and
265 * offset members will be set by this function.
267 * Returns: 0 on success, or an error code
270 static int gfs2_rbm_from_block(struct gfs2_rbm
*rbm
, u64 block
)
272 u64 rblock
= block
- rbm
->rgd
->rd_data0
;
274 if (WARN_ON_ONCE(rblock
> UINT_MAX
))
276 if (block
>= rbm
->rgd
->rd_data0
+ rbm
->rgd
->rd_data
)
280 rbm
->offset
= (u32
)(rblock
);
281 /* Check if the block is within the first block */
282 if (rbm
->offset
< rbm_bi(rbm
)->bi_blocks
)
285 /* Adjust for the size diff between gfs2_meta_header and gfs2_rgrp */
286 rbm
->offset
+= (sizeof(struct gfs2_rgrp
) -
287 sizeof(struct gfs2_meta_header
)) * GFS2_NBBY
;
288 rbm
->bii
= rbm
->offset
/ rbm
->rgd
->rd_sbd
->sd_blocks_per_bitmap
;
289 rbm
->offset
-= rbm
->bii
* rbm
->rgd
->rd_sbd
->sd_blocks_per_bitmap
;
294 * gfs2_rbm_incr - increment an rbm structure
295 * @rbm: The rbm with rgd already set correctly
297 * This function takes an existing rbm structure and increments it to the next
298 * viable block offset.
300 * Returns: If incrementing the offset would cause the rbm to go past the
301 * end of the rgrp, true is returned, otherwise false.
305 static bool gfs2_rbm_incr(struct gfs2_rbm
*rbm
)
307 if (rbm
->offset
+ 1 < rbm_bi(rbm
)->bi_blocks
) { /* in the same bitmap */
311 if (rbm
->bii
== rbm
->rgd
->rd_length
- 1) /* at the last bitmap */
320 * gfs2_unaligned_extlen - Look for free blocks which are not byte aligned
321 * @rbm: Position to search (value/result)
322 * @n_unaligned: Number of unaligned blocks to check
323 * @len: Decremented for each block found (terminate on zero)
325 * Returns: true if a non-free block is encountered
328 static bool gfs2_unaligned_extlen(struct gfs2_rbm
*rbm
, u32 n_unaligned
, u32
*len
)
333 for (n
= 0; n
< n_unaligned
; n
++) {
334 res
= gfs2_testbit(rbm
, true);
335 if (res
!= GFS2_BLKST_FREE
)
340 if (gfs2_rbm_incr(rbm
))
348 * gfs2_free_extlen - Return extent length of free blocks
349 * @rrbm: Starting position
350 * @len: Max length to check
352 * Starting at the block specified by the rbm, see how many free blocks
353 * there are, not reading more than len blocks ahead. This can be done
354 * using memchr_inv when the blocks are byte aligned, but has to be done
355 * on a block by block basis in case of unaligned blocks. Also this
356 * function can cope with bitmap boundaries (although it must stop on
357 * a resource group boundary)
359 * Returns: Number of free blocks in the extent
362 static u32
gfs2_free_extlen(const struct gfs2_rbm
*rrbm
, u32 len
)
364 struct gfs2_rbm rbm
= *rrbm
;
365 u32 n_unaligned
= rbm
.offset
& 3;
369 u8
*ptr
, *start
, *end
;
371 struct gfs2_bitmap
*bi
;
374 gfs2_unaligned_extlen(&rbm
, 4 - n_unaligned
, &len
))
377 n_unaligned
= len
& 3;
378 /* Start is now byte aligned */
381 start
= bi
->bi_bh
->b_data
;
383 start
= bi
->bi_clone
;
384 start
+= bi
->bi_offset
;
385 end
= start
+ bi
->bi_len
;
386 BUG_ON(rbm
.offset
& 3);
387 start
+= (rbm
.offset
/ GFS2_NBBY
);
388 bytes
= min_t(u32
, len
/ GFS2_NBBY
, (end
- start
));
389 ptr
= memchr_inv(start
, 0, bytes
);
390 chunk_size
= ((ptr
== NULL
) ? bytes
: (ptr
- start
));
391 chunk_size
*= GFS2_NBBY
;
392 BUG_ON(len
< chunk_size
);
394 block
= gfs2_rbm_to_block(&rbm
);
395 if (gfs2_rbm_from_block(&rbm
, block
+ chunk_size
)) {
403 n_unaligned
= len
& 3;
406 /* Deal with any bits left over at the end */
408 gfs2_unaligned_extlen(&rbm
, n_unaligned
, &len
);
414 * gfs2_bitcount - count the number of bits in a certain state
415 * @rgd: the resource group descriptor
416 * @buffer: the buffer that holds the bitmaps
417 * @buflen: the length (in bytes) of the buffer
418 * @state: the state of the block we're looking for
420 * Returns: The number of bits
423 static u32
gfs2_bitcount(struct gfs2_rgrpd
*rgd
, const u8
*buffer
,
424 unsigned int buflen
, u8 state
)
426 const u8
*byte
= buffer
;
427 const u8
*end
= buffer
+ buflen
;
428 const u8 state1
= state
<< 2;
429 const u8 state2
= state
<< 4;
430 const u8 state3
= state
<< 6;
433 for (; byte
< end
; byte
++) {
434 if (((*byte
) & 0x03) == state
)
436 if (((*byte
) & 0x0C) == state1
)
438 if (((*byte
) & 0x30) == state2
)
440 if (((*byte
) & 0xC0) == state3
)
448 * gfs2_rgrp_verify - Verify that a resource group is consistent
453 void gfs2_rgrp_verify(struct gfs2_rgrpd
*rgd
)
455 struct gfs2_sbd
*sdp
= rgd
->rd_sbd
;
456 struct gfs2_bitmap
*bi
= NULL
;
457 u32 length
= rgd
->rd_length
;
461 memset(count
, 0, 4 * sizeof(u32
));
463 /* Count # blocks in each of 4 possible allocation states */
464 for (buf
= 0; buf
< length
; buf
++) {
465 bi
= rgd
->rd_bits
+ buf
;
466 for (x
= 0; x
< 4; x
++)
467 count
[x
] += gfs2_bitcount(rgd
,
473 if (count
[0] != rgd
->rd_free
) {
474 if (gfs2_consist_rgrpd(rgd
))
475 fs_err(sdp
, "free data mismatch: %u != %u\n",
476 count
[0], rgd
->rd_free
);
480 tmp
= rgd
->rd_data
- rgd
->rd_free
- rgd
->rd_dinodes
;
481 if (count
[1] != tmp
) {
482 if (gfs2_consist_rgrpd(rgd
))
483 fs_err(sdp
, "used data mismatch: %u != %u\n",
488 if (count
[2] + count
[3] != rgd
->rd_dinodes
) {
489 if (gfs2_consist_rgrpd(rgd
))
490 fs_err(sdp
, "used metadata mismatch: %u != %u\n",
491 count
[2] + count
[3], rgd
->rd_dinodes
);
497 * gfs2_blk2rgrpd - Find resource group for a given data/meta block number
498 * @sdp: The GFS2 superblock
499 * @blk: The data block number
500 * @exact: True if this needs to be an exact match
502 * The @exact argument should be set to true by most callers. The exception
503 * is when we need to match blocks which are not represented by the rgrp
504 * bitmap, but which are part of the rgrp (i.e. padding blocks) which are
505 * there for alignment purposes. Another way of looking at it is that @exact
506 * matches only valid data/metadata blocks, but with @exact false, it will
507 * match any block within the extent of the rgrp.
509 * Returns: The resource group, or NULL if not found
512 struct gfs2_rgrpd
*gfs2_blk2rgrpd(struct gfs2_sbd
*sdp
, u64 blk
, bool exact
)
514 struct rb_node
*n
, *next
;
515 struct gfs2_rgrpd
*cur
;
517 spin_lock(&sdp
->sd_rindex_spin
);
518 n
= sdp
->sd_rindex_tree
.rb_node
;
520 cur
= rb_entry(n
, struct gfs2_rgrpd
, rd_node
);
522 if (blk
< cur
->rd_addr
)
524 else if (blk
>= cur
->rd_data0
+ cur
->rd_data
)
527 spin_unlock(&sdp
->sd_rindex_spin
);
529 if (blk
< cur
->rd_addr
)
531 if (blk
>= cur
->rd_data0
+ cur
->rd_data
)
538 spin_unlock(&sdp
->sd_rindex_spin
);
544 * gfs2_rgrpd_get_first - get the first Resource Group in the filesystem
545 * @sdp: The GFS2 superblock
547 * Returns: The first rgrp in the filesystem
550 struct gfs2_rgrpd
*gfs2_rgrpd_get_first(struct gfs2_sbd
*sdp
)
552 const struct rb_node
*n
;
553 struct gfs2_rgrpd
*rgd
;
555 spin_lock(&sdp
->sd_rindex_spin
);
556 n
= rb_first(&sdp
->sd_rindex_tree
);
557 rgd
= rb_entry(n
, struct gfs2_rgrpd
, rd_node
);
558 spin_unlock(&sdp
->sd_rindex_spin
);
564 * gfs2_rgrpd_get_next - get the next RG
565 * @rgd: the resource group descriptor
567 * Returns: The next rgrp
570 struct gfs2_rgrpd
*gfs2_rgrpd_get_next(struct gfs2_rgrpd
*rgd
)
572 struct gfs2_sbd
*sdp
= rgd
->rd_sbd
;
573 const struct rb_node
*n
;
575 spin_lock(&sdp
->sd_rindex_spin
);
576 n
= rb_next(&rgd
->rd_node
);
578 n
= rb_first(&sdp
->sd_rindex_tree
);
580 if (unlikely(&rgd
->rd_node
== n
)) {
581 spin_unlock(&sdp
->sd_rindex_spin
);
584 rgd
= rb_entry(n
, struct gfs2_rgrpd
, rd_node
);
585 spin_unlock(&sdp
->sd_rindex_spin
);
589 void check_and_update_goal(struct gfs2_inode
*ip
)
591 struct gfs2_sbd
*sdp
= GFS2_SB(&ip
->i_inode
);
592 if (!ip
->i_goal
|| gfs2_blk2rgrpd(sdp
, ip
->i_goal
, 1) == NULL
)
593 ip
->i_goal
= ip
->i_no_addr
;
596 void gfs2_free_clones(struct gfs2_rgrpd
*rgd
)
600 for (x
= 0; x
< rgd
->rd_length
; x
++) {
601 struct gfs2_bitmap
*bi
= rgd
->rd_bits
+ x
;
608 * gfs2_rsqa_alloc - make sure we have a reservation assigned to the inode
609 * plus a quota allocations data structure, if necessary
610 * @ip: the inode for this reservation
612 int gfs2_rsqa_alloc(struct gfs2_inode
*ip
)
614 return gfs2_qa_alloc(ip
);
617 static void dump_rs(struct seq_file
*seq
, const struct gfs2_blkreserv
*rs
)
619 struct gfs2_inode
*ip
= container_of(rs
, struct gfs2_inode
, i_res
);
621 gfs2_print_dbg(seq
, " B: n:%llu s:%llu b:%u f:%u\n",
622 (unsigned long long)ip
->i_no_addr
,
623 (unsigned long long)gfs2_rbm_to_block(&rs
->rs_rbm
),
624 rs
->rs_rbm
.offset
, rs
->rs_free
);
628 * __rs_deltree - remove a multi-block reservation from the rgd tree
629 * @rs: The reservation to remove
632 static void __rs_deltree(struct gfs2_blkreserv
*rs
)
634 struct gfs2_rgrpd
*rgd
;
636 if (!gfs2_rs_active(rs
))
639 rgd
= rs
->rs_rbm
.rgd
;
640 trace_gfs2_rs(rs
, TRACE_RS_TREEDEL
);
641 rb_erase(&rs
->rs_node
, &rgd
->rd_rstree
);
642 RB_CLEAR_NODE(&rs
->rs_node
);
645 u64 last_block
= gfs2_rbm_to_block(&rs
->rs_rbm
) +
647 struct gfs2_rbm last_rbm
= { .rgd
= rs
->rs_rbm
.rgd
, };
648 struct gfs2_bitmap
*start
, *last
;
650 /* return reserved blocks to the rgrp */
651 BUG_ON(rs
->rs_rbm
.rgd
->rd_reserved
< rs
->rs_free
);
652 rs
->rs_rbm
.rgd
->rd_reserved
-= rs
->rs_free
;
653 /* The rgrp extent failure point is likely not to increase;
654 it will only do so if the freed blocks are somehow
655 contiguous with a span of free blocks that follows. Still,
656 it will force the number to be recalculated later. */
657 rgd
->rd_extfail_pt
+= rs
->rs_free
;
659 if (gfs2_rbm_from_block(&last_rbm
, last_block
))
661 start
= rbm_bi(&rs
->rs_rbm
);
662 last
= rbm_bi(&last_rbm
);
664 clear_bit(GBF_FULL
, &start
->bi_flags
);
665 while (start
++ != last
);
670 * gfs2_rs_deltree - remove a multi-block reservation from the rgd tree
671 * @rs: The reservation to remove
674 void gfs2_rs_deltree(struct gfs2_blkreserv
*rs
)
676 struct gfs2_rgrpd
*rgd
;
678 rgd
= rs
->rs_rbm
.rgd
;
680 spin_lock(&rgd
->rd_rsspin
);
683 spin_unlock(&rgd
->rd_rsspin
);
688 * gfs2_rsqa_delete - delete a multi-block reservation and quota allocation
689 * @ip: The inode for this reservation
690 * @wcount: The inode's write count, or NULL
693 void gfs2_rsqa_delete(struct gfs2_inode
*ip
, atomic_t
*wcount
)
695 down_write(&ip
->i_rw_mutex
);
696 if ((wcount
== NULL
) || (atomic_read(wcount
) <= 1))
697 gfs2_rs_deltree(&ip
->i_res
);
698 up_write(&ip
->i_rw_mutex
);
699 gfs2_qa_delete(ip
, wcount
);
703 * return_all_reservations - return all reserved blocks back to the rgrp.
704 * @rgd: the rgrp that needs its space back
706 * We previously reserved a bunch of blocks for allocation. Now we need to
707 * give them back. This leave the reservation structures in tact, but removes
708 * all of their corresponding "no-fly zones".
710 static void return_all_reservations(struct gfs2_rgrpd
*rgd
)
713 struct gfs2_blkreserv
*rs
;
715 spin_lock(&rgd
->rd_rsspin
);
716 while ((n
= rb_first(&rgd
->rd_rstree
))) {
717 rs
= rb_entry(n
, struct gfs2_blkreserv
, rs_node
);
720 spin_unlock(&rgd
->rd_rsspin
);
723 void gfs2_clear_rgrpd(struct gfs2_sbd
*sdp
)
726 struct gfs2_rgrpd
*rgd
;
727 struct gfs2_glock
*gl
;
729 while ((n
= rb_first(&sdp
->sd_rindex_tree
))) {
730 rgd
= rb_entry(n
, struct gfs2_rgrpd
, rd_node
);
733 rb_erase(n
, &sdp
->sd_rindex_tree
);
736 glock_clear_object(gl
, rgd
);
737 gfs2_rgrp_brelse(rgd
);
741 gfs2_free_clones(rgd
);
744 return_all_reservations(rgd
);
745 kmem_cache_free(gfs2_rgrpd_cachep
, rgd
);
749 static void gfs2_rindex_print(const struct gfs2_rgrpd
*rgd
)
751 pr_info("ri_addr = %llu\n", (unsigned long long)rgd
->rd_addr
);
752 pr_info("ri_length = %u\n", rgd
->rd_length
);
753 pr_info("ri_data0 = %llu\n", (unsigned long long)rgd
->rd_data0
);
754 pr_info("ri_data = %u\n", rgd
->rd_data
);
755 pr_info("ri_bitbytes = %u\n", rgd
->rd_bitbytes
);
759 * gfs2_compute_bitstructs - Compute the bitmap sizes
760 * @rgd: The resource group descriptor
762 * Calculates bitmap descriptors, one for each block that contains bitmap data
767 static int compute_bitstructs(struct gfs2_rgrpd
*rgd
)
769 struct gfs2_sbd
*sdp
= rgd
->rd_sbd
;
770 struct gfs2_bitmap
*bi
;
771 u32 length
= rgd
->rd_length
; /* # blocks in hdr & bitmap */
772 u32 bytes_left
, bytes
;
778 rgd
->rd_bits
= kcalloc(length
, sizeof(struct gfs2_bitmap
), GFP_NOFS
);
782 bytes_left
= rgd
->rd_bitbytes
;
784 for (x
= 0; x
< length
; x
++) {
785 bi
= rgd
->rd_bits
+ x
;
788 /* small rgrp; bitmap stored completely in header block */
791 bi
->bi_offset
= sizeof(struct gfs2_rgrp
);
794 bi
->bi_blocks
= bytes
* GFS2_NBBY
;
797 bytes
= sdp
->sd_sb
.sb_bsize
- sizeof(struct gfs2_rgrp
);
798 bi
->bi_offset
= sizeof(struct gfs2_rgrp
);
801 bi
->bi_blocks
= bytes
* GFS2_NBBY
;
803 } else if (x
+ 1 == length
) {
805 bi
->bi_offset
= sizeof(struct gfs2_meta_header
);
806 bi
->bi_start
= rgd
->rd_bitbytes
- bytes_left
;
808 bi
->bi_blocks
= bytes
* GFS2_NBBY
;
811 bytes
= sdp
->sd_sb
.sb_bsize
-
812 sizeof(struct gfs2_meta_header
);
813 bi
->bi_offset
= sizeof(struct gfs2_meta_header
);
814 bi
->bi_start
= rgd
->rd_bitbytes
- bytes_left
;
816 bi
->bi_blocks
= bytes
* GFS2_NBBY
;
823 gfs2_consist_rgrpd(rgd
);
826 bi
= rgd
->rd_bits
+ (length
- 1);
827 if ((bi
->bi_start
+ bi
->bi_len
) * GFS2_NBBY
!= rgd
->rd_data
) {
828 if (gfs2_consist_rgrpd(rgd
)) {
829 gfs2_rindex_print(rgd
);
830 fs_err(sdp
, "start=%u len=%u offset=%u\n",
831 bi
->bi_start
, bi
->bi_len
, bi
->bi_offset
);
840 * gfs2_ri_total - Total up the file system space, according to the rindex.
841 * @sdp: the filesystem
844 u64
gfs2_ri_total(struct gfs2_sbd
*sdp
)
847 struct inode
*inode
= sdp
->sd_rindex
;
848 struct gfs2_inode
*ip
= GFS2_I(inode
);
849 char buf
[sizeof(struct gfs2_rindex
)];
852 for (rgrps
= 0;; rgrps
++) {
853 loff_t pos
= rgrps
* sizeof(struct gfs2_rindex
);
855 if (pos
+ sizeof(struct gfs2_rindex
) > i_size_read(inode
))
857 error
= gfs2_internal_read(ip
, buf
, &pos
,
858 sizeof(struct gfs2_rindex
));
859 if (error
!= sizeof(struct gfs2_rindex
))
861 total_data
+= be32_to_cpu(((struct gfs2_rindex
*)buf
)->ri_data
);
866 static int rgd_insert(struct gfs2_rgrpd
*rgd
)
868 struct gfs2_sbd
*sdp
= rgd
->rd_sbd
;
869 struct rb_node
**newn
= &sdp
->sd_rindex_tree
.rb_node
, *parent
= NULL
;
871 /* Figure out where to put new node */
873 struct gfs2_rgrpd
*cur
= rb_entry(*newn
, struct gfs2_rgrpd
,
877 if (rgd
->rd_addr
< cur
->rd_addr
)
878 newn
= &((*newn
)->rb_left
);
879 else if (rgd
->rd_addr
> cur
->rd_addr
)
880 newn
= &((*newn
)->rb_right
);
885 rb_link_node(&rgd
->rd_node
, parent
, newn
);
886 rb_insert_color(&rgd
->rd_node
, &sdp
->sd_rindex_tree
);
892 * read_rindex_entry - Pull in a new resource index entry from the disk
893 * @ip: Pointer to the rindex inode
895 * Returns: 0 on success, > 0 on EOF, error code otherwise
898 static int read_rindex_entry(struct gfs2_inode
*ip
)
900 struct gfs2_sbd
*sdp
= GFS2_SB(&ip
->i_inode
);
901 const unsigned bsize
= sdp
->sd_sb
.sb_bsize
;
902 loff_t pos
= sdp
->sd_rgrps
* sizeof(struct gfs2_rindex
);
903 struct gfs2_rindex buf
;
905 struct gfs2_rgrpd
*rgd
;
907 if (pos
>= i_size_read(&ip
->i_inode
))
910 error
= gfs2_internal_read(ip
, (char *)&buf
, &pos
,
911 sizeof(struct gfs2_rindex
));
913 if (error
!= sizeof(struct gfs2_rindex
))
914 return (error
== 0) ? 1 : error
;
916 rgd
= kmem_cache_zalloc(gfs2_rgrpd_cachep
, GFP_NOFS
);
922 rgd
->rd_addr
= be64_to_cpu(buf
.ri_addr
);
923 rgd
->rd_length
= be32_to_cpu(buf
.ri_length
);
924 rgd
->rd_data0
= be64_to_cpu(buf
.ri_data0
);
925 rgd
->rd_data
= be32_to_cpu(buf
.ri_data
);
926 rgd
->rd_bitbytes
= be32_to_cpu(buf
.ri_bitbytes
);
927 spin_lock_init(&rgd
->rd_rsspin
);
929 error
= compute_bitstructs(rgd
);
933 error
= gfs2_glock_get(sdp
, rgd
->rd_addr
,
934 &gfs2_rgrp_glops
, CREATE
, &rgd
->rd_gl
);
938 rgd
->rd_rgl
= (struct gfs2_rgrp_lvb
*)rgd
->rd_gl
->gl_lksb
.sb_lvbptr
;
939 rgd
->rd_flags
&= ~(GFS2_RDF_UPTODATE
| GFS2_RDF_PREFERRED
);
940 if (rgd
->rd_data
> sdp
->sd_max_rg_data
)
941 sdp
->sd_max_rg_data
= rgd
->rd_data
;
942 spin_lock(&sdp
->sd_rindex_spin
);
943 error
= rgd_insert(rgd
);
944 spin_unlock(&sdp
->sd_rindex_spin
);
946 glock_set_object(rgd
->rd_gl
, rgd
);
947 rgd
->rd_gl
->gl_vm
.start
= (rgd
->rd_addr
* bsize
) & PAGE_MASK
;
948 rgd
->rd_gl
->gl_vm
.end
= PAGE_ALIGN((rgd
->rd_addr
+
949 rgd
->rd_length
) * bsize
) - 1;
953 error
= 0; /* someone else read in the rgrp; free it and ignore it */
954 gfs2_glock_put(rgd
->rd_gl
);
959 kmem_cache_free(gfs2_rgrpd_cachep
, rgd
);
964 * set_rgrp_preferences - Run all the rgrps, selecting some we prefer to use
965 * @sdp: the GFS2 superblock
967 * The purpose of this function is to select a subset of the resource groups
968 * and mark them as PREFERRED. We do it in such a way that each node prefers
969 * to use a unique set of rgrps to minimize glock contention.
971 static void set_rgrp_preferences(struct gfs2_sbd
*sdp
)
973 struct gfs2_rgrpd
*rgd
, *first
;
976 /* Skip an initial number of rgrps, based on this node's journal ID.
977 That should start each node out on its own set. */
978 rgd
= gfs2_rgrpd_get_first(sdp
);
979 for (i
= 0; i
< sdp
->sd_lockstruct
.ls_jid
; i
++)
980 rgd
= gfs2_rgrpd_get_next(rgd
);
984 rgd
->rd_flags
|= GFS2_RDF_PREFERRED
;
985 for (i
= 0; i
< sdp
->sd_journals
; i
++) {
986 rgd
= gfs2_rgrpd_get_next(rgd
);
987 if (!rgd
|| rgd
== first
)
990 } while (rgd
&& rgd
!= first
);
994 * gfs2_ri_update - Pull in a new resource index from the disk
995 * @ip: pointer to the rindex inode
997 * Returns: 0 on successful update, error code otherwise
1000 static int gfs2_ri_update(struct gfs2_inode
*ip
)
1002 struct gfs2_sbd
*sdp
= GFS2_SB(&ip
->i_inode
);
1006 error
= read_rindex_entry(ip
);
1007 } while (error
== 0);
1012 set_rgrp_preferences(sdp
);
1014 sdp
->sd_rindex_uptodate
= 1;
1019 * gfs2_rindex_update - Update the rindex if required
1020 * @sdp: The GFS2 superblock
1022 * We grab a lock on the rindex inode to make sure that it doesn't
1023 * change whilst we are performing an operation. We keep this lock
1024 * for quite long periods of time compared to other locks. This
1025 * doesn't matter, since it is shared and it is very, very rarely
1026 * accessed in the exclusive mode (i.e. only when expanding the filesystem).
1028 * This makes sure that we're using the latest copy of the resource index
1029 * special file, which might have been updated if someone expanded the
1030 * filesystem (via gfs2_grow utility), which adds new resource groups.
1032 * Returns: 0 on succeess, error code otherwise
1035 int gfs2_rindex_update(struct gfs2_sbd
*sdp
)
1037 struct gfs2_inode
*ip
= GFS2_I(sdp
->sd_rindex
);
1038 struct gfs2_glock
*gl
= ip
->i_gl
;
1039 struct gfs2_holder ri_gh
;
1041 int unlock_required
= 0;
1043 /* Read new copy from disk if we don't have the latest */
1044 if (!sdp
->sd_rindex_uptodate
) {
1045 if (!gfs2_glock_is_locked_by_me(gl
)) {
1046 error
= gfs2_glock_nq_init(gl
, LM_ST_SHARED
, 0, &ri_gh
);
1049 unlock_required
= 1;
1051 if (!sdp
->sd_rindex_uptodate
)
1052 error
= gfs2_ri_update(ip
);
1053 if (unlock_required
)
1054 gfs2_glock_dq_uninit(&ri_gh
);
1060 static void gfs2_rgrp_in(struct gfs2_rgrpd
*rgd
, const void *buf
)
1062 const struct gfs2_rgrp
*str
= buf
;
1065 rg_flags
= be32_to_cpu(str
->rg_flags
);
1066 rg_flags
&= ~GFS2_RDF_MASK
;
1067 rgd
->rd_flags
&= GFS2_RDF_MASK
;
1068 rgd
->rd_flags
|= rg_flags
;
1069 rgd
->rd_free
= be32_to_cpu(str
->rg_free
);
1070 rgd
->rd_dinodes
= be32_to_cpu(str
->rg_dinodes
);
1071 rgd
->rd_igeneration
= be64_to_cpu(str
->rg_igeneration
);
1072 /* rd_data0, rd_data and rd_bitbytes already set from rindex */
1075 static void gfs2_rgrp_ondisk2lvb(struct gfs2_rgrp_lvb
*rgl
, const void *buf
)
1077 const struct gfs2_rgrp
*str
= buf
;
1079 rgl
->rl_magic
= cpu_to_be32(GFS2_MAGIC
);
1080 rgl
->rl_flags
= str
->rg_flags
;
1081 rgl
->rl_free
= str
->rg_free
;
1082 rgl
->rl_dinodes
= str
->rg_dinodes
;
1083 rgl
->rl_igeneration
= str
->rg_igeneration
;
1087 static void gfs2_rgrp_out(struct gfs2_rgrpd
*rgd
, void *buf
)
1089 struct gfs2_rgrpd
*next
= gfs2_rgrpd_get_next(rgd
);
1090 struct gfs2_rgrp
*str
= buf
;
1093 str
->rg_flags
= cpu_to_be32(rgd
->rd_flags
& ~GFS2_RDF_MASK
);
1094 str
->rg_free
= cpu_to_be32(rgd
->rd_free
);
1095 str
->rg_dinodes
= cpu_to_be32(rgd
->rd_dinodes
);
1098 else if (next
->rd_addr
> rgd
->rd_addr
)
1099 str
->rg_skip
= cpu_to_be32(next
->rd_addr
- rgd
->rd_addr
);
1100 str
->rg_igeneration
= cpu_to_be64(rgd
->rd_igeneration
);
1101 str
->rg_data0
= cpu_to_be64(rgd
->rd_data0
);
1102 str
->rg_data
= cpu_to_be32(rgd
->rd_data
);
1103 str
->rg_bitbytes
= cpu_to_be32(rgd
->rd_bitbytes
);
1105 crc
= gfs2_disk_hash(buf
, sizeof(struct gfs2_rgrp
));
1106 str
->rg_crc
= cpu_to_be32(crc
);
1108 memset(&str
->rg_reserved
, 0, sizeof(str
->rg_reserved
));
1109 gfs2_rgrp_ondisk2lvb(rgd
->rd_rgl
, buf
);
1112 static int gfs2_rgrp_lvb_valid(struct gfs2_rgrpd
*rgd
)
1114 struct gfs2_rgrp_lvb
*rgl
= rgd
->rd_rgl
;
1115 struct gfs2_rgrp
*str
= (struct gfs2_rgrp
*)rgd
->rd_bits
[0].bi_bh
->b_data
;
1117 if (rgl
->rl_flags
!= str
->rg_flags
|| rgl
->rl_free
!= str
->rg_free
||
1118 rgl
->rl_dinodes
!= str
->rg_dinodes
||
1119 rgl
->rl_igeneration
!= str
->rg_igeneration
)
1124 static u32
count_unlinked(struct gfs2_rgrpd
*rgd
)
1126 struct gfs2_bitmap
*bi
;
1127 const u32 length
= rgd
->rd_length
;
1128 const u8
*buffer
= NULL
;
1129 u32 i
, goal
, count
= 0;
1131 for (i
= 0, bi
= rgd
->rd_bits
; i
< length
; i
++, bi
++) {
1133 buffer
= bi
->bi_bh
->b_data
+ bi
->bi_offset
;
1134 WARN_ON(!buffer_uptodate(bi
->bi_bh
));
1135 while (goal
< bi
->bi_len
* GFS2_NBBY
) {
1136 goal
= gfs2_bitfit(buffer
, bi
->bi_len
, goal
,
1137 GFS2_BLKST_UNLINKED
);
1138 if (goal
== BFITNOENT
)
1150 * gfs2_rgrp_bh_get - Read in a RG's header and bitmaps
1151 * @rgd: the struct gfs2_rgrpd describing the RG to read in
1153 * Read in all of a Resource Group's header and bitmap blocks.
1154 * Caller must eventually call gfs2_rgrp_brelse() to free the bitmaps.
1159 static int gfs2_rgrp_bh_get(struct gfs2_rgrpd
*rgd
)
1161 struct gfs2_sbd
*sdp
= rgd
->rd_sbd
;
1162 struct gfs2_glock
*gl
= rgd
->rd_gl
;
1163 unsigned int length
= rgd
->rd_length
;
1164 struct gfs2_bitmap
*bi
;
1168 if (rgd
->rd_bits
[0].bi_bh
!= NULL
)
1171 for (x
= 0; x
< length
; x
++) {
1172 bi
= rgd
->rd_bits
+ x
;
1173 error
= gfs2_meta_read(gl
, rgd
->rd_addr
+ x
, 0, 0, &bi
->bi_bh
);
1178 for (y
= length
; y
--;) {
1179 bi
= rgd
->rd_bits
+ y
;
1180 error
= gfs2_meta_wait(sdp
, bi
->bi_bh
);
1183 if (gfs2_metatype_check(sdp
, bi
->bi_bh
, y
? GFS2_METATYPE_RB
:
1184 GFS2_METATYPE_RG
)) {
1190 if (!(rgd
->rd_flags
& GFS2_RDF_UPTODATE
)) {
1191 for (x
= 0; x
< length
; x
++)
1192 clear_bit(GBF_FULL
, &rgd
->rd_bits
[x
].bi_flags
);
1193 gfs2_rgrp_in(rgd
, (rgd
->rd_bits
[0].bi_bh
)->b_data
);
1194 rgd
->rd_flags
|= (GFS2_RDF_UPTODATE
| GFS2_RDF_CHECK
);
1195 rgd
->rd_free_clone
= rgd
->rd_free
;
1196 /* max out the rgrp allocation failure point */
1197 rgd
->rd_extfail_pt
= rgd
->rd_free
;
1199 if (cpu_to_be32(GFS2_MAGIC
) != rgd
->rd_rgl
->rl_magic
) {
1200 rgd
->rd_rgl
->rl_unlinked
= cpu_to_be32(count_unlinked(rgd
));
1201 gfs2_rgrp_ondisk2lvb(rgd
->rd_rgl
,
1202 rgd
->rd_bits
[0].bi_bh
->b_data
);
1204 else if (sdp
->sd_args
.ar_rgrplvb
) {
1205 if (!gfs2_rgrp_lvb_valid(rgd
)){
1206 gfs2_consist_rgrpd(rgd
);
1210 if (rgd
->rd_rgl
->rl_unlinked
== 0)
1211 rgd
->rd_flags
&= ~GFS2_RDF_CHECK
;
1217 bi
= rgd
->rd_bits
+ x
;
1220 gfs2_assert_warn(sdp
, !bi
->bi_clone
);
1226 static int update_rgrp_lvb(struct gfs2_rgrpd
*rgd
)
1230 if (rgd
->rd_flags
& GFS2_RDF_UPTODATE
)
1233 if (cpu_to_be32(GFS2_MAGIC
) != rgd
->rd_rgl
->rl_magic
)
1234 return gfs2_rgrp_bh_get(rgd
);
1236 rl_flags
= be32_to_cpu(rgd
->rd_rgl
->rl_flags
);
1237 rl_flags
&= ~GFS2_RDF_MASK
;
1238 rgd
->rd_flags
&= GFS2_RDF_MASK
;
1239 rgd
->rd_flags
|= (rl_flags
| GFS2_RDF_CHECK
);
1240 if (rgd
->rd_rgl
->rl_unlinked
== 0)
1241 rgd
->rd_flags
&= ~GFS2_RDF_CHECK
;
1242 rgd
->rd_free
= be32_to_cpu(rgd
->rd_rgl
->rl_free
);
1243 rgd
->rd_free_clone
= rgd
->rd_free
;
1244 rgd
->rd_dinodes
= be32_to_cpu(rgd
->rd_rgl
->rl_dinodes
);
1245 rgd
->rd_igeneration
= be64_to_cpu(rgd
->rd_rgl
->rl_igeneration
);
1249 int gfs2_rgrp_go_lock(struct gfs2_holder
*gh
)
1251 struct gfs2_rgrpd
*rgd
= gh
->gh_gl
->gl_object
;
1252 struct gfs2_sbd
*sdp
= rgd
->rd_sbd
;
1254 if (gh
->gh_flags
& GL_SKIP
&& sdp
->sd_args
.ar_rgrplvb
)
1256 return gfs2_rgrp_bh_get(rgd
);
1260 * gfs2_rgrp_brelse - Release RG bitmaps read in with gfs2_rgrp_bh_get()
1261 * @rgd: The resource group
1265 void gfs2_rgrp_brelse(struct gfs2_rgrpd
*rgd
)
1267 int x
, length
= rgd
->rd_length
;
1269 for (x
= 0; x
< length
; x
++) {
1270 struct gfs2_bitmap
*bi
= rgd
->rd_bits
+ x
;
1280 * gfs2_rgrp_go_unlock - Unlock a rgrp glock
1281 * @gh: The glock holder for the resource group
1285 void gfs2_rgrp_go_unlock(struct gfs2_holder
*gh
)
1287 struct gfs2_rgrpd
*rgd
= gh
->gh_gl
->gl_object
;
1288 int demote_requested
= test_bit(GLF_DEMOTE
, &gh
->gh_gl
->gl_flags
) |
1289 test_bit(GLF_PENDING_DEMOTE
, &gh
->gh_gl
->gl_flags
);
1291 if (rgd
&& demote_requested
)
1292 gfs2_rgrp_brelse(rgd
);
1295 int gfs2_rgrp_send_discards(struct gfs2_sbd
*sdp
, u64 offset
,
1296 struct buffer_head
*bh
,
1297 const struct gfs2_bitmap
*bi
, unsigned minlen
, u64
*ptrimmed
)
1299 struct super_block
*sb
= sdp
->sd_vfs
;
1302 sector_t nr_blks
= 0;
1308 for (x
= 0; x
< bi
->bi_len
; x
++) {
1309 const u8
*clone
= bi
->bi_clone
? bi
->bi_clone
: bi
->bi_bh
->b_data
;
1310 clone
+= bi
->bi_offset
;
1313 const u8
*orig
= bh
->b_data
+ bi
->bi_offset
+ x
;
1314 diff
= ~(*orig
| (*orig
>> 1)) & (*clone
| (*clone
>> 1));
1316 diff
= ~(*clone
| (*clone
>> 1));
1321 blk
= offset
+ ((bi
->bi_start
+ x
) * GFS2_NBBY
);
1325 goto start_new_extent
;
1326 if ((start
+ nr_blks
) != blk
) {
1327 if (nr_blks
>= minlen
) {
1328 rv
= sb_issue_discard(sb
,
1345 if (nr_blks
>= minlen
) {
1346 rv
= sb_issue_discard(sb
, start
, nr_blks
, GFP_NOFS
, 0);
1352 *ptrimmed
= trimmed
;
1356 if (sdp
->sd_args
.ar_discard
)
1357 fs_warn(sdp
, "error %d on discard request, turning discards off for this filesystem\n", rv
);
1358 sdp
->sd_args
.ar_discard
= 0;
1363 * gfs2_fitrim - Generate discard requests for unused bits of the filesystem
1364 * @filp: Any file on the filesystem
1365 * @argp: Pointer to the arguments (also used to pass result)
1367 * Returns: 0 on success, otherwise error code
1370 int gfs2_fitrim(struct file
*filp
, void __user
*argp
)
1372 struct inode
*inode
= file_inode(filp
);
1373 struct gfs2_sbd
*sdp
= GFS2_SB(inode
);
1374 struct request_queue
*q
= bdev_get_queue(sdp
->sd_vfs
->s_bdev
);
1375 struct buffer_head
*bh
;
1376 struct gfs2_rgrpd
*rgd
;
1377 struct gfs2_rgrpd
*rgd_end
;
1378 struct gfs2_holder gh
;
1379 struct fstrim_range r
;
1383 u64 start
, end
, minlen
;
1385 unsigned bs_shift
= sdp
->sd_sb
.sb_bsize_shift
;
1387 if (!capable(CAP_SYS_ADMIN
))
1390 if (!blk_queue_discard(q
))
1393 if (copy_from_user(&r
, argp
, sizeof(r
)))
1396 ret
= gfs2_rindex_update(sdp
);
1400 start
= r
.start
>> bs_shift
;
1401 end
= start
+ (r
.len
>> bs_shift
);
1402 minlen
= max_t(u64
, r
.minlen
,
1403 q
->limits
.discard_granularity
) >> bs_shift
;
1405 if (end
<= start
|| minlen
> sdp
->sd_max_rg_data
)
1408 rgd
= gfs2_blk2rgrpd(sdp
, start
, 0);
1409 rgd_end
= gfs2_blk2rgrpd(sdp
, end
, 0);
1411 if ((gfs2_rgrpd_get_first(sdp
) == gfs2_rgrpd_get_next(rgd_end
))
1412 && (start
> rgd_end
->rd_data0
+ rgd_end
->rd_data
))
1413 return -EINVAL
; /* start is beyond the end of the fs */
1417 ret
= gfs2_glock_nq_init(rgd
->rd_gl
, LM_ST_EXCLUSIVE
, 0, &gh
);
1421 if (!(rgd
->rd_flags
& GFS2_RGF_TRIMMED
)) {
1422 /* Trim each bitmap in the rgrp */
1423 for (x
= 0; x
< rgd
->rd_length
; x
++) {
1424 struct gfs2_bitmap
*bi
= rgd
->rd_bits
+ x
;
1425 ret
= gfs2_rgrp_send_discards(sdp
,
1426 rgd
->rd_data0
, NULL
, bi
, minlen
,
1429 gfs2_glock_dq_uninit(&gh
);
1435 /* Mark rgrp as having been trimmed */
1436 ret
= gfs2_trans_begin(sdp
, RES_RG_HDR
, 0);
1438 bh
= rgd
->rd_bits
[0].bi_bh
;
1439 rgd
->rd_flags
|= GFS2_RGF_TRIMMED
;
1440 gfs2_trans_add_meta(rgd
->rd_gl
, bh
);
1441 gfs2_rgrp_out(rgd
, bh
->b_data
);
1442 gfs2_trans_end(sdp
);
1445 gfs2_glock_dq_uninit(&gh
);
1450 rgd
= gfs2_rgrpd_get_next(rgd
);
1454 r
.len
= trimmed
<< bs_shift
;
1455 if (copy_to_user(argp
, &r
, sizeof(r
)))
1462 * rs_insert - insert a new multi-block reservation into the rgrp's rb_tree
1463 * @ip: the inode structure
1466 static void rs_insert(struct gfs2_inode
*ip
)
1468 struct rb_node
**newn
, *parent
= NULL
;
1470 struct gfs2_blkreserv
*rs
= &ip
->i_res
;
1471 struct gfs2_rgrpd
*rgd
= rs
->rs_rbm
.rgd
;
1472 u64 fsblock
= gfs2_rbm_to_block(&rs
->rs_rbm
);
1474 BUG_ON(gfs2_rs_active(rs
));
1476 spin_lock(&rgd
->rd_rsspin
);
1477 newn
= &rgd
->rd_rstree
.rb_node
;
1479 struct gfs2_blkreserv
*cur
=
1480 rb_entry(*newn
, struct gfs2_blkreserv
, rs_node
);
1483 rc
= rs_cmp(fsblock
, rs
->rs_free
, cur
);
1485 newn
= &((*newn
)->rb_right
);
1487 newn
= &((*newn
)->rb_left
);
1489 spin_unlock(&rgd
->rd_rsspin
);
1495 rb_link_node(&rs
->rs_node
, parent
, newn
);
1496 rb_insert_color(&rs
->rs_node
, &rgd
->rd_rstree
);
1498 /* Do our rgrp accounting for the reservation */
1499 rgd
->rd_reserved
+= rs
->rs_free
; /* blocks reserved */
1500 spin_unlock(&rgd
->rd_rsspin
);
1501 trace_gfs2_rs(rs
, TRACE_RS_INSERT
);
1505 * rgd_free - return the number of free blocks we can allocate.
1506 * @rgd: the resource group
1508 * This function returns the number of free blocks for an rgrp.
1509 * That's the clone-free blocks (blocks that are free, not including those
1510 * still being used for unlinked files that haven't been deleted.)
1512 * It also subtracts any blocks reserved by someone else, but does not
1513 * include free blocks that are still part of our current reservation,
1514 * because obviously we can (and will) allocate them.
1516 static inline u32
rgd_free(struct gfs2_rgrpd
*rgd
, struct gfs2_blkreserv
*rs
)
1518 u32 tot_reserved
, tot_free
;
1520 if (WARN_ON_ONCE(rgd
->rd_reserved
< rs
->rs_free
))
1522 tot_reserved
= rgd
->rd_reserved
- rs
->rs_free
;
1524 if (rgd
->rd_free_clone
< tot_reserved
)
1527 tot_free
= rgd
->rd_free_clone
- tot_reserved
;
1533 * rg_mblk_search - find a group of multiple free blocks to form a reservation
1534 * @rgd: the resource group descriptor
1535 * @ip: pointer to the inode for which we're reserving blocks
1536 * @ap: the allocation parameters
1540 static void rg_mblk_search(struct gfs2_rgrpd
*rgd
, struct gfs2_inode
*ip
,
1541 const struct gfs2_alloc_parms
*ap
)
1543 struct gfs2_rbm rbm
= { .rgd
= rgd
, };
1545 struct gfs2_blkreserv
*rs
= &ip
->i_res
;
1547 u32 free_blocks
= rgd_free(rgd
, rs
);
1549 struct inode
*inode
= &ip
->i_inode
;
1551 if (S_ISDIR(inode
->i_mode
))
1554 extlen
= max_t(u32
, atomic_read(&rs
->rs_sizehint
), ap
->target
);
1555 extlen
= clamp(extlen
, RGRP_RSRV_MINBLKS
, free_blocks
);
1557 if ((rgd
->rd_free_clone
< rgd
->rd_reserved
) || (free_blocks
< extlen
))
1560 /* Find bitmap block that contains bits for goal block */
1561 if (rgrp_contains_block(rgd
, ip
->i_goal
))
1564 goal
= rgd
->rd_last_alloc
+ rgd
->rd_data0
;
1566 if (WARN_ON(gfs2_rbm_from_block(&rbm
, goal
)))
1569 ret
= gfs2_rbm_find(&rbm
, GFS2_BLKST_FREE
, &extlen
, ip
, true);
1572 rs
->rs_free
= extlen
;
1575 if (goal
== rgd
->rd_last_alloc
+ rgd
->rd_data0
)
1576 rgd
->rd_last_alloc
= 0;
1581 * gfs2_next_unreserved_block - Return next block that is not reserved
1582 * @rgd: The resource group
1583 * @block: The starting block
1584 * @length: The required length
1585 * @ip: Ignore any reservations for this inode
1587 * If the block does not appear in any reservation, then return the
1588 * block number unchanged. If it does appear in the reservation, then
1589 * keep looking through the tree of reservations in order to find the
1590 * first block number which is not reserved.
1593 static u64
gfs2_next_unreserved_block(struct gfs2_rgrpd
*rgd
, u64 block
,
1595 const struct gfs2_inode
*ip
)
1597 struct gfs2_blkreserv
*rs
;
1601 spin_lock(&rgd
->rd_rsspin
);
1602 n
= rgd
->rd_rstree
.rb_node
;
1604 rs
= rb_entry(n
, struct gfs2_blkreserv
, rs_node
);
1605 rc
= rs_cmp(block
, length
, rs
);
1615 while ((rs_cmp(block
, length
, rs
) == 0) && (&ip
->i_res
!= rs
)) {
1616 block
= gfs2_rbm_to_block(&rs
->rs_rbm
) + rs
->rs_free
;
1620 rs
= rb_entry(n
, struct gfs2_blkreserv
, rs_node
);
1624 spin_unlock(&rgd
->rd_rsspin
);
1629 * gfs2_reservation_check_and_update - Check for reservations during block alloc
1630 * @rbm: The current position in the resource group
1631 * @ip: The inode for which we are searching for blocks
1632 * @minext: The minimum extent length
1633 * @maxext: A pointer to the maximum extent structure
1635 * This checks the current position in the rgrp to see whether there is
1636 * a reservation covering this block. If not then this function is a
1637 * no-op. If there is, then the position is moved to the end of the
1638 * contiguous reservation(s) so that we are pointing at the first
1639 * non-reserved block.
1641 * Returns: 0 if no reservation, 1 if @rbm has changed, otherwise an error
1644 static int gfs2_reservation_check_and_update(struct gfs2_rbm
*rbm
,
1645 const struct gfs2_inode
*ip
,
1647 struct gfs2_extent
*maxext
)
1649 u64 block
= gfs2_rbm_to_block(rbm
);
1655 * If we have a minimum extent length, then skip over any extent
1656 * which is less than the min extent length in size.
1659 extlen
= gfs2_free_extlen(rbm
, minext
);
1660 if (extlen
<= maxext
->len
)
1665 * Check the extent which has been found against the reservations
1666 * and skip if parts of it are already reserved
1668 nblock
= gfs2_next_unreserved_block(rbm
->rgd
, block
, extlen
, ip
);
1669 if (nblock
== block
) {
1670 if (!minext
|| extlen
>= minext
)
1673 if (extlen
> maxext
->len
) {
1674 maxext
->len
= extlen
;
1678 nblock
= block
+ extlen
;
1680 ret
= gfs2_rbm_from_block(rbm
, nblock
);
1687 * gfs2_rbm_find - Look for blocks of a particular state
1688 * @rbm: Value/result starting position and final position
1689 * @state: The state which we want to find
1690 * @minext: Pointer to the requested extent length (NULL for a single block)
1691 * This is updated to be the actual reservation size.
1692 * @ip: If set, check for reservations
1693 * @nowrap: Stop looking at the end of the rgrp, rather than wrapping
1694 * around until we've reached the starting point.
1697 * - If looking for free blocks, we set GBF_FULL on each bitmap which
1698 * has no free blocks in it.
1699 * - If looking for free blocks, we set rd_extfail_pt on each rgrp which
1700 * has come up short on a free block search.
1702 * Returns: 0 on success, -ENOSPC if there is no block of the requested state
1705 static int gfs2_rbm_find(struct gfs2_rbm
*rbm
, u8 state
, u32
*minext
,
1706 const struct gfs2_inode
*ip
, bool nowrap
)
1708 struct buffer_head
*bh
;
1711 int first_bii
= rbm
->bii
;
1712 u32 first_offset
= rbm
->offset
;
1716 int iters
= rbm
->rgd
->rd_length
;
1718 struct gfs2_bitmap
*bi
;
1719 struct gfs2_extent maxext
= { .rbm
.rgd
= rbm
->rgd
, };
1721 /* If we are not starting at the beginning of a bitmap, then we
1722 * need to add one to the bitmap count to ensure that we search
1723 * the starting bitmap twice.
1725 if (rbm
->offset
!= 0)
1730 if ((ip
== NULL
|| !gfs2_rs_active(&ip
->i_res
)) &&
1731 test_bit(GBF_FULL
, &bi
->bi_flags
) &&
1732 (state
== GFS2_BLKST_FREE
))
1736 buffer
= bh
->b_data
+ bi
->bi_offset
;
1737 WARN_ON(!buffer_uptodate(bh
));
1738 if (state
!= GFS2_BLKST_UNLINKED
&& bi
->bi_clone
)
1739 buffer
= bi
->bi_clone
+ bi
->bi_offset
;
1740 initial_offset
= rbm
->offset
;
1741 offset
= gfs2_bitfit(buffer
, bi
->bi_len
, rbm
->offset
, state
);
1742 if (offset
== BFITNOENT
)
1744 rbm
->offset
= offset
;
1748 initial_bii
= rbm
->bii
;
1749 ret
= gfs2_reservation_check_and_update(rbm
, ip
,
1750 minext
? *minext
: 0,
1755 n
+= (rbm
->bii
- initial_bii
);
1758 if (ret
== -E2BIG
) {
1761 n
+= (rbm
->bii
- initial_bii
);
1762 goto res_covered_end_of_rgrp
;
1766 bitmap_full
: /* Mark bitmap as full and fall through */
1767 if ((state
== GFS2_BLKST_FREE
) && initial_offset
== 0)
1768 set_bit(GBF_FULL
, &bi
->bi_flags
);
1770 next_bitmap
: /* Find next bitmap in the rgrp */
1773 if (rbm
->bii
== rbm
->rgd
->rd_length
)
1775 res_covered_end_of_rgrp
:
1776 if ((rbm
->bii
== 0) && nowrap
)
1784 if (minext
== NULL
|| state
!= GFS2_BLKST_FREE
)
1787 /* If the extent was too small, and it's smaller than the smallest
1788 to have failed before, remember for future reference that it's
1789 useless to search this rgrp again for this amount or more. */
1790 if ((first_offset
== 0) && (first_bii
== 0) &&
1791 (*minext
< rbm
->rgd
->rd_extfail_pt
))
1792 rbm
->rgd
->rd_extfail_pt
= *minext
;
1794 /* If the maximum extent we found is big enough to fulfill the
1795 minimum requirements, use it anyway. */
1798 *minext
= maxext
.len
;
1806 * try_rgrp_unlink - Look for any unlinked, allocated, but unused inodes
1808 * @last_unlinked: block address of the last dinode we unlinked
1809 * @skip: block address we should explicitly not unlink
1811 * Returns: 0 if no error
1812 * The inode, if one has been found, in inode.
1815 static void try_rgrp_unlink(struct gfs2_rgrpd
*rgd
, u64
*last_unlinked
, u64 skip
)
1818 struct gfs2_sbd
*sdp
= rgd
->rd_sbd
;
1819 struct gfs2_glock
*gl
;
1820 struct gfs2_inode
*ip
;
1823 struct gfs2_rbm rbm
= { .rgd
= rgd
, .bii
= 0, .offset
= 0 };
1826 down_write(&sdp
->sd_log_flush_lock
);
1827 error
= gfs2_rbm_find(&rbm
, GFS2_BLKST_UNLINKED
, NULL
, NULL
,
1829 up_write(&sdp
->sd_log_flush_lock
);
1830 if (error
== -ENOSPC
)
1832 if (WARN_ON_ONCE(error
))
1835 block
= gfs2_rbm_to_block(&rbm
);
1836 if (gfs2_rbm_from_block(&rbm
, block
+ 1))
1838 if (*last_unlinked
!= NO_BLOCK
&& block
<= *last_unlinked
)
1842 *last_unlinked
= block
;
1844 error
= gfs2_glock_get(sdp
, block
, &gfs2_iopen_glops
, CREATE
, &gl
);
1848 /* If the inode is already in cache, we can ignore it here
1849 * because the existing inode disposal code will deal with
1850 * it when all refs have gone away. Accessing gl_object like
1851 * this is not safe in general. Here it is ok because we do
1852 * not dereference the pointer, and we only need an approx
1853 * answer to whether it is NULL or not.
1857 if (ip
|| queue_work(gfs2_delete_workqueue
, &gl
->gl_delete
) == 0)
1862 /* Limit reclaim to sensible number of tasks */
1863 if (found
> NR_CPUS
)
1867 rgd
->rd_flags
&= ~GFS2_RDF_CHECK
;
1872 * gfs2_rgrp_congested - Use stats to figure out whether an rgrp is congested
1873 * @rgd: The rgrp in question
1874 * @loops: An indication of how picky we can be (0=very, 1=less so)
1876 * This function uses the recently added glock statistics in order to
1877 * figure out whether a parciular resource group is suffering from
1878 * contention from multiple nodes. This is done purely on the basis
1879 * of timings, since this is the only data we have to work with and
1880 * our aim here is to reject a resource group which is highly contended
1881 * but (very important) not to do this too often in order to ensure that
1882 * we do not land up introducing fragmentation by changing resource
1883 * groups when not actually required.
1885 * The calculation is fairly simple, we want to know whether the SRTTB
1886 * (i.e. smoothed round trip time for blocking operations) to acquire
1887 * the lock for this rgrp's glock is significantly greater than the
1888 * time taken for resource groups on average. We introduce a margin in
1889 * the form of the variable @var which is computed as the sum of the two
1890 * respective variences, and multiplied by a factor depending on @loops
1891 * and whether we have a lot of data to base the decision on. This is
1892 * then tested against the square difference of the means in order to
1893 * decide whether the result is statistically significant or not.
1895 * Returns: A boolean verdict on the congestion status
1898 static bool gfs2_rgrp_congested(const struct gfs2_rgrpd
*rgd
, int loops
)
1900 const struct gfs2_glock
*gl
= rgd
->rd_gl
;
1901 const struct gfs2_sbd
*sdp
= gl
->gl_name
.ln_sbd
;
1902 struct gfs2_lkstats
*st
;
1903 u64 r_dcount
, l_dcount
;
1904 u64 l_srttb
, a_srttb
= 0;
1908 int cpu
, nonzero
= 0;
1911 for_each_present_cpu(cpu
) {
1912 st
= &per_cpu_ptr(sdp
->sd_lkstats
, cpu
)->lkstats
[LM_TYPE_RGRP
];
1913 if (st
->stats
[GFS2_LKS_SRTTB
]) {
1914 a_srttb
+= st
->stats
[GFS2_LKS_SRTTB
];
1918 st
= &this_cpu_ptr(sdp
->sd_lkstats
)->lkstats
[LM_TYPE_RGRP
];
1920 do_div(a_srttb
, nonzero
);
1921 r_dcount
= st
->stats
[GFS2_LKS_DCOUNT
];
1922 var
= st
->stats
[GFS2_LKS_SRTTVARB
] +
1923 gl
->gl_stats
.stats
[GFS2_LKS_SRTTVARB
];
1926 l_srttb
= gl
->gl_stats
.stats
[GFS2_LKS_SRTTB
];
1927 l_dcount
= gl
->gl_stats
.stats
[GFS2_LKS_DCOUNT
];
1929 if ((l_dcount
< 1) || (r_dcount
< 1) || (a_srttb
== 0))
1932 srttb_diff
= a_srttb
- l_srttb
;
1933 sqr_diff
= srttb_diff
* srttb_diff
;
1936 if (l_dcount
< 8 || r_dcount
< 8)
1941 return ((srttb_diff
< 0) && (sqr_diff
> var
));
1945 * gfs2_rgrp_used_recently
1946 * @rs: The block reservation with the rgrp to test
1947 * @msecs: The time limit in milliseconds
1949 * Returns: True if the rgrp glock has been used within the time limit
1951 static bool gfs2_rgrp_used_recently(const struct gfs2_blkreserv
*rs
,
1956 tdiff
= ktime_to_ns(ktime_sub(ktime_get_real(),
1957 rs
->rs_rbm
.rgd
->rd_gl
->gl_dstamp
));
1959 return tdiff
> (msecs
* 1000 * 1000);
1962 static u32
gfs2_orlov_skip(const struct gfs2_inode
*ip
)
1964 const struct gfs2_sbd
*sdp
= GFS2_SB(&ip
->i_inode
);
1967 get_random_bytes(&skip
, sizeof(skip
));
1968 return skip
% sdp
->sd_rgrps
;
1971 static bool gfs2_select_rgrp(struct gfs2_rgrpd
**pos
, const struct gfs2_rgrpd
*begin
)
1973 struct gfs2_rgrpd
*rgd
= *pos
;
1974 struct gfs2_sbd
*sdp
= rgd
->rd_sbd
;
1976 rgd
= gfs2_rgrpd_get_next(rgd
);
1978 rgd
= gfs2_rgrpd_get_first(sdp
);
1980 if (rgd
!= begin
) /* If we didn't wrap */
1986 * fast_to_acquire - determine if a resource group will be fast to acquire
1988 * If this is one of our preferred rgrps, it should be quicker to acquire,
1989 * because we tried to set ourselves up as dlm lock master.
1991 static inline int fast_to_acquire(struct gfs2_rgrpd
*rgd
)
1993 struct gfs2_glock
*gl
= rgd
->rd_gl
;
1995 if (gl
->gl_state
!= LM_ST_UNLOCKED
&& list_empty(&gl
->gl_holders
) &&
1996 !test_bit(GLF_DEMOTE_IN_PROGRESS
, &gl
->gl_flags
) &&
1997 !test_bit(GLF_DEMOTE
, &gl
->gl_flags
))
1999 if (rgd
->rd_flags
& GFS2_RDF_PREFERRED
)
2005 * gfs2_inplace_reserve - Reserve space in the filesystem
2006 * @ip: the inode to reserve space for
2007 * @ap: the allocation parameters
2009 * We try our best to find an rgrp that has at least ap->target blocks
2010 * available. After a couple of passes (loops == 2), the prospects of finding
2011 * such an rgrp diminish. At this stage, we return the first rgrp that has
2012 * atleast ap->min_target blocks available. Either way, we set ap->allowed to
2013 * the number of blocks available in the chosen rgrp.
2015 * Returns: 0 on success,
2016 * -ENOMEM if a suitable rgrp can't be found
2020 int gfs2_inplace_reserve(struct gfs2_inode
*ip
, struct gfs2_alloc_parms
*ap
)
2022 struct gfs2_sbd
*sdp
= GFS2_SB(&ip
->i_inode
);
2023 struct gfs2_rgrpd
*begin
= NULL
;
2024 struct gfs2_blkreserv
*rs
= &ip
->i_res
;
2025 int error
= 0, rg_locked
, flags
= 0;
2026 u64 last_unlinked
= NO_BLOCK
;
2028 u32 free_blocks
, skip
= 0;
2030 if (sdp
->sd_args
.ar_rgrplvb
)
2032 if (gfs2_assert_warn(sdp
, ap
->target
))
2034 if (gfs2_rs_active(rs
)) {
2035 begin
= rs
->rs_rbm
.rgd
;
2036 } else if (rs
->rs_rbm
.rgd
&&
2037 rgrp_contains_block(rs
->rs_rbm
.rgd
, ip
->i_goal
)) {
2038 begin
= rs
->rs_rbm
.rgd
;
2040 check_and_update_goal(ip
);
2041 rs
->rs_rbm
.rgd
= begin
= gfs2_blk2rgrpd(sdp
, ip
->i_goal
, 1);
2043 if (S_ISDIR(ip
->i_inode
.i_mode
) && (ap
->aflags
& GFS2_AF_ORLOV
))
2044 skip
= gfs2_orlov_skip(ip
);
2045 if (rs
->rs_rbm
.rgd
== NULL
)
2051 if (!gfs2_glock_is_locked_by_me(rs
->rs_rbm
.rgd
->rd_gl
)) {
2055 if (!gfs2_rs_active(rs
)) {
2057 !fast_to_acquire(rs
->rs_rbm
.rgd
))
2060 gfs2_rgrp_used_recently(rs
, 1000) &&
2061 gfs2_rgrp_congested(rs
->rs_rbm
.rgd
, loops
))
2064 error
= gfs2_glock_nq_init(rs
->rs_rbm
.rgd
->rd_gl
,
2065 LM_ST_EXCLUSIVE
, flags
,
2067 if (unlikely(error
))
2069 if (!gfs2_rs_active(rs
) && (loops
< 2) &&
2070 gfs2_rgrp_congested(rs
->rs_rbm
.rgd
, loops
))
2072 if (sdp
->sd_args
.ar_rgrplvb
) {
2073 error
= update_rgrp_lvb(rs
->rs_rbm
.rgd
);
2074 if (unlikely(error
)) {
2075 gfs2_glock_dq_uninit(&rs
->rs_rgd_gh
);
2081 /* Skip unuseable resource groups */
2082 if ((rs
->rs_rbm
.rgd
->rd_flags
& (GFS2_RGF_NOALLOC
|
2084 (loops
== 0 && ap
->target
> rs
->rs_rbm
.rgd
->rd_extfail_pt
))
2087 if (sdp
->sd_args
.ar_rgrplvb
)
2088 gfs2_rgrp_bh_get(rs
->rs_rbm
.rgd
);
2090 /* Get a reservation if we don't already have one */
2091 if (!gfs2_rs_active(rs
))
2092 rg_mblk_search(rs
->rs_rbm
.rgd
, ip
, ap
);
2094 /* Skip rgrps when we can't get a reservation on first pass */
2095 if (!gfs2_rs_active(rs
) && (loops
< 1))
2098 /* If rgrp has enough free space, use it */
2099 free_blocks
= rgd_free(rs
->rs_rbm
.rgd
, rs
);
2100 if (free_blocks
>= ap
->target
||
2101 (loops
== 2 && ap
->min_target
&&
2102 free_blocks
>= ap
->min_target
)) {
2103 ap
->allowed
= free_blocks
;
2107 /* Check for unlinked inodes which can be reclaimed */
2108 if (rs
->rs_rbm
.rgd
->rd_flags
& GFS2_RDF_CHECK
)
2109 try_rgrp_unlink(rs
->rs_rbm
.rgd
, &last_unlinked
,
2112 /* Drop reservation, if we couldn't use reserved rgrp */
2113 if (gfs2_rs_active(rs
))
2114 gfs2_rs_deltree(rs
);
2116 /* Unlock rgrp if required */
2118 gfs2_glock_dq_uninit(&rs
->rs_rgd_gh
);
2120 /* Find the next rgrp, and continue looking */
2121 if (gfs2_select_rgrp(&rs
->rs_rbm
.rgd
, begin
))
2126 /* If we've scanned all the rgrps, but found no free blocks
2127 * then this checks for some less likely conditions before
2131 /* Check that fs hasn't grown if writing to rindex */
2132 if (ip
== GFS2_I(sdp
->sd_rindex
) && !sdp
->sd_rindex_uptodate
) {
2133 error
= gfs2_ri_update(ip
);
2137 /* Flushing the log may release space */
2139 gfs2_log_flush(sdp
, NULL
, GFS2_LOG_HEAD_FLUSH_NORMAL
|
2140 GFS2_LFC_INPLACE_RESERVE
);
2147 * gfs2_inplace_release - release an inplace reservation
2148 * @ip: the inode the reservation was taken out on
2150 * Release a reservation made by gfs2_inplace_reserve().
2153 void gfs2_inplace_release(struct gfs2_inode
*ip
)
2155 struct gfs2_blkreserv
*rs
= &ip
->i_res
;
2157 if (gfs2_holder_initialized(&rs
->rs_rgd_gh
))
2158 gfs2_glock_dq_uninit(&rs
->rs_rgd_gh
);
2162 * gfs2_alloc_extent - allocate an extent from a given bitmap
2163 * @rbm: the resource group information
2164 * @dinode: TRUE if the first block we allocate is for a dinode
2165 * @n: The extent length (value/result)
2167 * Add the bitmap buffer to the transaction.
2168 * Set the found bits to @new_state to change block's allocation state.
2170 static void gfs2_alloc_extent(const struct gfs2_rbm
*rbm
, bool dinode
,
2173 struct gfs2_rbm pos
= { .rgd
= rbm
->rgd
, };
2174 const unsigned int elen
= *n
;
2179 block
= gfs2_rbm_to_block(rbm
);
2180 gfs2_trans_add_meta(rbm
->rgd
->rd_gl
, rbm_bi(rbm
)->bi_bh
);
2181 gfs2_setbit(rbm
, true, dinode
? GFS2_BLKST_DINODE
: GFS2_BLKST_USED
);
2184 ret
= gfs2_rbm_from_block(&pos
, block
);
2185 if (ret
|| gfs2_testbit(&pos
, true) != GFS2_BLKST_FREE
)
2187 gfs2_trans_add_meta(pos
.rgd
->rd_gl
, rbm_bi(&pos
)->bi_bh
);
2188 gfs2_setbit(&pos
, true, GFS2_BLKST_USED
);
2195 * rgblk_free - Change alloc state of given block(s)
2196 * @sdp: the filesystem
2197 * @bstart: the start of a run of blocks to free
2198 * @blen: the length of the block run (all must lie within ONE RG!)
2199 * @new_state: GFS2_BLKST_XXX the after-allocation block state
2201 * Returns: Resource group containing the block(s)
2204 static struct gfs2_rgrpd
*rgblk_free(struct gfs2_sbd
*sdp
, u64 bstart
,
2205 u32 blen
, unsigned char new_state
)
2207 struct gfs2_rbm rbm
;
2208 struct gfs2_bitmap
*bi
, *bi_prev
= NULL
;
2210 rbm
.rgd
= gfs2_blk2rgrpd(sdp
, bstart
, 1);
2212 if (gfs2_consist(sdp
))
2213 fs_err(sdp
, "block = %llu\n", (unsigned long long)bstart
);
2217 gfs2_rbm_from_block(&rbm
, bstart
);
2220 if (bi
!= bi_prev
) {
2221 if (!bi
->bi_clone
) {
2222 bi
->bi_clone
= kmalloc(bi
->bi_bh
->b_size
,
2223 GFP_NOFS
| __GFP_NOFAIL
);
2224 memcpy(bi
->bi_clone
+ bi
->bi_offset
,
2225 bi
->bi_bh
->b_data
+ bi
->bi_offset
,
2228 gfs2_trans_add_meta(rbm
.rgd
->rd_gl
, bi
->bi_bh
);
2231 gfs2_setbit(&rbm
, false, new_state
);
2232 gfs2_rbm_incr(&rbm
);
2239 * gfs2_rgrp_dump - print out an rgrp
2240 * @seq: The iterator
2241 * @gl: The glock in question
2245 void gfs2_rgrp_dump(struct seq_file
*seq
, const struct gfs2_glock
*gl
)
2247 struct gfs2_rgrpd
*rgd
= gl
->gl_object
;
2248 struct gfs2_blkreserv
*trs
;
2249 const struct rb_node
*n
;
2253 gfs2_print_dbg(seq
, " R: n:%llu f:%02x b:%u/%u i:%u r:%u e:%u\n",
2254 (unsigned long long)rgd
->rd_addr
, rgd
->rd_flags
,
2255 rgd
->rd_free
, rgd
->rd_free_clone
, rgd
->rd_dinodes
,
2256 rgd
->rd_reserved
, rgd
->rd_extfail_pt
);
2257 spin_lock(&rgd
->rd_rsspin
);
2258 for (n
= rb_first(&rgd
->rd_rstree
); n
; n
= rb_next(&trs
->rs_node
)) {
2259 trs
= rb_entry(n
, struct gfs2_blkreserv
, rs_node
);
2262 spin_unlock(&rgd
->rd_rsspin
);
2265 static void gfs2_rgrp_error(struct gfs2_rgrpd
*rgd
)
2267 struct gfs2_sbd
*sdp
= rgd
->rd_sbd
;
2268 fs_warn(sdp
, "rgrp %llu has an error, marking it readonly until umount\n",
2269 (unsigned long long)rgd
->rd_addr
);
2270 fs_warn(sdp
, "umount on all nodes and run fsck.gfs2 to fix the error\n");
2271 gfs2_rgrp_dump(NULL
, rgd
->rd_gl
);
2272 rgd
->rd_flags
|= GFS2_RDF_ERROR
;
2276 * gfs2_adjust_reservation - Adjust (or remove) a reservation after allocation
2277 * @ip: The inode we have just allocated blocks for
2278 * @rbm: The start of the allocated blocks
2279 * @len: The extent length
2281 * Adjusts a reservation after an allocation has taken place. If the
2282 * reservation does not match the allocation, or if it is now empty
2283 * then it is removed.
2286 static void gfs2_adjust_reservation(struct gfs2_inode
*ip
,
2287 const struct gfs2_rbm
*rbm
, unsigned len
)
2289 struct gfs2_blkreserv
*rs
= &ip
->i_res
;
2290 struct gfs2_rgrpd
*rgd
= rbm
->rgd
;
2295 spin_lock(&rgd
->rd_rsspin
);
2296 if (gfs2_rs_active(rs
)) {
2297 if (gfs2_rbm_eq(&rs
->rs_rbm
, rbm
)) {
2298 block
= gfs2_rbm_to_block(rbm
);
2299 ret
= gfs2_rbm_from_block(&rs
->rs_rbm
, block
+ len
);
2300 rlen
= min(rs
->rs_free
, len
);
2301 rs
->rs_free
-= rlen
;
2302 rgd
->rd_reserved
-= rlen
;
2303 trace_gfs2_rs(rs
, TRACE_RS_CLAIM
);
2304 if (rs
->rs_free
&& !ret
)
2306 /* We used up our block reservation, so we should
2307 reserve more blocks next time. */
2308 atomic_add(RGRP_RSRV_ADDBLKS
, &rs
->rs_sizehint
);
2313 spin_unlock(&rgd
->rd_rsspin
);
2317 * gfs2_set_alloc_start - Set starting point for block allocation
2318 * @rbm: The rbm which will be set to the required location
2319 * @ip: The gfs2 inode
2320 * @dinode: Flag to say if allocation includes a new inode
2322 * This sets the starting point from the reservation if one is active
2323 * otherwise it falls back to guessing a start point based on the
2324 * inode's goal block or the last allocation point in the rgrp.
2327 static void gfs2_set_alloc_start(struct gfs2_rbm
*rbm
,
2328 const struct gfs2_inode
*ip
, bool dinode
)
2332 if (gfs2_rs_active(&ip
->i_res
)) {
2333 *rbm
= ip
->i_res
.rs_rbm
;
2337 if (!dinode
&& rgrp_contains_block(rbm
->rgd
, ip
->i_goal
))
2340 goal
= rbm
->rgd
->rd_last_alloc
+ rbm
->rgd
->rd_data0
;
2342 gfs2_rbm_from_block(rbm
, goal
);
2346 * gfs2_alloc_blocks - Allocate one or more blocks of data and/or a dinode
2347 * @ip: the inode to allocate the block for
2348 * @bn: Used to return the starting block number
2349 * @nblocks: requested number of blocks/extent length (value/result)
2350 * @dinode: 1 if we're allocating a dinode block, else 0
2351 * @generation: the generation number of the inode
2353 * Returns: 0 or error
2356 int gfs2_alloc_blocks(struct gfs2_inode
*ip
, u64
*bn
, unsigned int *nblocks
,
2357 bool dinode
, u64
*generation
)
2359 struct gfs2_sbd
*sdp
= GFS2_SB(&ip
->i_inode
);
2360 struct buffer_head
*dibh
;
2361 struct gfs2_rbm rbm
= { .rgd
= ip
->i_res
.rs_rbm
.rgd
, };
2363 u64 block
; /* block, within the file system scope */
2366 gfs2_set_alloc_start(&rbm
, ip
, dinode
);
2367 error
= gfs2_rbm_find(&rbm
, GFS2_BLKST_FREE
, NULL
, ip
, false);
2369 if (error
== -ENOSPC
) {
2370 gfs2_set_alloc_start(&rbm
, ip
, dinode
);
2371 error
= gfs2_rbm_find(&rbm
, GFS2_BLKST_FREE
, NULL
, NULL
, false);
2374 /* Since all blocks are reserved in advance, this shouldn't happen */
2376 fs_warn(sdp
, "inum=%llu error=%d, nblocks=%u, full=%d fail_pt=%d\n",
2377 (unsigned long long)ip
->i_no_addr
, error
, *nblocks
,
2378 test_bit(GBF_FULL
, &rbm
.rgd
->rd_bits
->bi_flags
),
2379 rbm
.rgd
->rd_extfail_pt
);
2383 gfs2_alloc_extent(&rbm
, dinode
, nblocks
);
2384 block
= gfs2_rbm_to_block(&rbm
);
2385 rbm
.rgd
->rd_last_alloc
= block
- rbm
.rgd
->rd_data0
;
2386 if (gfs2_rs_active(&ip
->i_res
))
2387 gfs2_adjust_reservation(ip
, &rbm
, *nblocks
);
2393 ip
->i_goal
= block
+ ndata
- 1;
2394 error
= gfs2_meta_inode_buffer(ip
, &dibh
);
2396 struct gfs2_dinode
*di
=
2397 (struct gfs2_dinode
*)dibh
->b_data
;
2398 gfs2_trans_add_meta(ip
->i_gl
, dibh
);
2399 di
->di_goal_meta
= di
->di_goal_data
=
2400 cpu_to_be64(ip
->i_goal
);
2404 if (rbm
.rgd
->rd_free
< *nblocks
) {
2405 pr_warn("nblocks=%u\n", *nblocks
);
2409 rbm
.rgd
->rd_free
-= *nblocks
;
2411 rbm
.rgd
->rd_dinodes
++;
2412 *generation
= rbm
.rgd
->rd_igeneration
++;
2413 if (*generation
== 0)
2414 *generation
= rbm
.rgd
->rd_igeneration
++;
2417 gfs2_trans_add_meta(rbm
.rgd
->rd_gl
, rbm
.rgd
->rd_bits
[0].bi_bh
);
2418 gfs2_rgrp_out(rbm
.rgd
, rbm
.rgd
->rd_bits
[0].bi_bh
->b_data
);
2420 gfs2_statfs_change(sdp
, 0, -(s64
)*nblocks
, dinode
? 1 : 0);
2422 gfs2_trans_add_unrevoke(sdp
, block
, *nblocks
);
2424 gfs2_quota_change(ip
, *nblocks
, ip
->i_inode
.i_uid
, ip
->i_inode
.i_gid
);
2426 rbm
.rgd
->rd_free_clone
-= *nblocks
;
2427 trace_gfs2_block_alloc(ip
, rbm
.rgd
, block
, *nblocks
,
2428 dinode
? GFS2_BLKST_DINODE
: GFS2_BLKST_USED
);
2433 gfs2_rgrp_error(rbm
.rgd
);
2438 * __gfs2_free_blocks - free a contiguous run of block(s)
2439 * @ip: the inode these blocks are being freed from
2440 * @bstart: first block of a run of contiguous blocks
2441 * @blen: the length of the block run
2442 * @meta: 1 if the blocks represent metadata
2446 void __gfs2_free_blocks(struct gfs2_inode
*ip
, u64 bstart
, u32 blen
, int meta
)
2448 struct gfs2_sbd
*sdp
= GFS2_SB(&ip
->i_inode
);
2449 struct gfs2_rgrpd
*rgd
;
2451 rgd
= rgblk_free(sdp
, bstart
, blen
, GFS2_BLKST_FREE
);
2454 trace_gfs2_block_alloc(ip
, rgd
, bstart
, blen
, GFS2_BLKST_FREE
);
2455 rgd
->rd_free
+= blen
;
2456 rgd
->rd_flags
&= ~GFS2_RGF_TRIMMED
;
2457 gfs2_trans_add_meta(rgd
->rd_gl
, rgd
->rd_bits
[0].bi_bh
);
2458 gfs2_rgrp_out(rgd
, rgd
->rd_bits
[0].bi_bh
->b_data
);
2460 /* Directories keep their data in the metadata address space */
2461 if (meta
|| ip
->i_depth
)
2462 gfs2_meta_wipe(ip
, bstart
, blen
);
2466 * gfs2_free_meta - free a contiguous run of data block(s)
2467 * @ip: the inode these blocks are being freed from
2468 * @bstart: first block of a run of contiguous blocks
2469 * @blen: the length of the block run
2473 void gfs2_free_meta(struct gfs2_inode
*ip
, u64 bstart
, u32 blen
)
2475 struct gfs2_sbd
*sdp
= GFS2_SB(&ip
->i_inode
);
2477 __gfs2_free_blocks(ip
, bstart
, blen
, 1);
2478 gfs2_statfs_change(sdp
, 0, +blen
, 0);
2479 gfs2_quota_change(ip
, -(s64
)blen
, ip
->i_inode
.i_uid
, ip
->i_inode
.i_gid
);
2482 void gfs2_unlink_di(struct inode
*inode
)
2484 struct gfs2_inode
*ip
= GFS2_I(inode
);
2485 struct gfs2_sbd
*sdp
= GFS2_SB(inode
);
2486 struct gfs2_rgrpd
*rgd
;
2487 u64 blkno
= ip
->i_no_addr
;
2489 rgd
= rgblk_free(sdp
, blkno
, 1, GFS2_BLKST_UNLINKED
);
2492 trace_gfs2_block_alloc(ip
, rgd
, blkno
, 1, GFS2_BLKST_UNLINKED
);
2493 gfs2_trans_add_meta(rgd
->rd_gl
, rgd
->rd_bits
[0].bi_bh
);
2494 gfs2_rgrp_out(rgd
, rgd
->rd_bits
[0].bi_bh
->b_data
);
2495 be32_add_cpu(&rgd
->rd_rgl
->rl_unlinked
, 1);
2498 void gfs2_free_di(struct gfs2_rgrpd
*rgd
, struct gfs2_inode
*ip
)
2500 struct gfs2_sbd
*sdp
= rgd
->rd_sbd
;
2501 struct gfs2_rgrpd
*tmp_rgd
;
2503 tmp_rgd
= rgblk_free(sdp
, ip
->i_no_addr
, 1, GFS2_BLKST_FREE
);
2506 gfs2_assert_withdraw(sdp
, rgd
== tmp_rgd
);
2508 if (!rgd
->rd_dinodes
)
2509 gfs2_consist_rgrpd(rgd
);
2513 gfs2_trans_add_meta(rgd
->rd_gl
, rgd
->rd_bits
[0].bi_bh
);
2514 gfs2_rgrp_out(rgd
, rgd
->rd_bits
[0].bi_bh
->b_data
);
2515 be32_add_cpu(&rgd
->rd_rgl
->rl_unlinked
, -1);
2517 gfs2_statfs_change(sdp
, 0, +1, -1);
2518 trace_gfs2_block_alloc(ip
, rgd
, ip
->i_no_addr
, 1, GFS2_BLKST_FREE
);
2519 gfs2_quota_change(ip
, -1, ip
->i_inode
.i_uid
, ip
->i_inode
.i_gid
);
2520 gfs2_meta_wipe(ip
, ip
->i_no_addr
, 1);
2524 * gfs2_check_blk_type - Check the type of a block
2525 * @sdp: The superblock
2526 * @no_addr: The block number to check
2527 * @type: The block type we are looking for
2529 * Returns: 0 if the block type matches the expected type
2530 * -ESTALE if it doesn't match
2531 * or -ve errno if something went wrong while checking
2534 int gfs2_check_blk_type(struct gfs2_sbd
*sdp
, u64 no_addr
, unsigned int type
)
2536 struct gfs2_rgrpd
*rgd
;
2537 struct gfs2_holder rgd_gh
;
2538 struct gfs2_rbm rbm
;
2539 int error
= -EINVAL
;
2541 rgd
= gfs2_blk2rgrpd(sdp
, no_addr
, 1);
2545 error
= gfs2_glock_nq_init(rgd
->rd_gl
, LM_ST_SHARED
, 0, &rgd_gh
);
2550 error
= gfs2_rbm_from_block(&rbm
, no_addr
);
2551 WARN_ON_ONCE(error
!= 0);
2553 if (gfs2_testbit(&rbm
, false) != type
)
2556 gfs2_glock_dq_uninit(&rgd_gh
);
2562 * gfs2_rlist_add - add a RG to a list of RGs
2564 * @rlist: the list of resource groups
2567 * Figure out what RG a block belongs to and add that RG to the list
2569 * FIXME: Don't use NOFAIL
2573 void gfs2_rlist_add(struct gfs2_inode
*ip
, struct gfs2_rgrp_list
*rlist
,
2576 struct gfs2_sbd
*sdp
= GFS2_SB(&ip
->i_inode
);
2577 struct gfs2_rgrpd
*rgd
;
2578 struct gfs2_rgrpd
**tmp
;
2579 unsigned int new_space
;
2582 if (gfs2_assert_warn(sdp
, !rlist
->rl_ghs
))
2586 * The resource group last accessed is kept in the last position.
2589 if (rlist
->rl_rgrps
) {
2590 rgd
= rlist
->rl_rgd
[rlist
->rl_rgrps
- 1];
2591 if (rgrp_contains_block(rgd
, block
))
2593 rgd
= gfs2_blk2rgrpd(sdp
, block
, 1);
2595 rgd
= ip
->i_res
.rs_rbm
.rgd
;
2596 if (!rgd
|| !rgrp_contains_block(rgd
, block
))
2597 rgd
= gfs2_blk2rgrpd(sdp
, block
, 1);
2601 fs_err(sdp
, "rlist_add: no rgrp for block %llu\n",
2602 (unsigned long long)block
);
2606 for (x
= 0; x
< rlist
->rl_rgrps
; x
++) {
2607 if (rlist
->rl_rgd
[x
] == rgd
) {
2608 swap(rlist
->rl_rgd
[x
],
2609 rlist
->rl_rgd
[rlist
->rl_rgrps
- 1]);
2614 if (rlist
->rl_rgrps
== rlist
->rl_space
) {
2615 new_space
= rlist
->rl_space
+ 10;
2617 tmp
= kcalloc(new_space
, sizeof(struct gfs2_rgrpd
*),
2618 GFP_NOFS
| __GFP_NOFAIL
);
2620 if (rlist
->rl_rgd
) {
2621 memcpy(tmp
, rlist
->rl_rgd
,
2622 rlist
->rl_space
* sizeof(struct gfs2_rgrpd
*));
2623 kfree(rlist
->rl_rgd
);
2626 rlist
->rl_space
= new_space
;
2627 rlist
->rl_rgd
= tmp
;
2630 rlist
->rl_rgd
[rlist
->rl_rgrps
++] = rgd
;
2634 * gfs2_rlist_alloc - all RGs have been added to the rlist, now allocate
2635 * and initialize an array of glock holders for them
2636 * @rlist: the list of resource groups
2637 * @state: the lock state to acquire the RG lock in
2639 * FIXME: Don't use NOFAIL
2643 void gfs2_rlist_alloc(struct gfs2_rgrp_list
*rlist
, unsigned int state
)
2647 rlist
->rl_ghs
= kmalloc_array(rlist
->rl_rgrps
,
2648 sizeof(struct gfs2_holder
),
2649 GFP_NOFS
| __GFP_NOFAIL
);
2650 for (x
= 0; x
< rlist
->rl_rgrps
; x
++)
2651 gfs2_holder_init(rlist
->rl_rgd
[x
]->rd_gl
,
2657 * gfs2_rlist_free - free a resource group list
2658 * @rlist: the list of resource groups
2662 void gfs2_rlist_free(struct gfs2_rgrp_list
*rlist
)
2666 kfree(rlist
->rl_rgd
);
2668 if (rlist
->rl_ghs
) {
2669 for (x
= 0; x
< rlist
->rl_rgrps
; x
++)
2670 gfs2_holder_uninit(&rlist
->rl_ghs
[x
]);
2671 kfree(rlist
->rl_ghs
);
2672 rlist
->rl_ghs
= NULL
;