1 // SPDX-License-Identifier: GPL-2.0
3 * linux/fs/ufs/balloc.c
6 * Daniel Pirkl <daniel.pirkl@email.cz>
7 * Charles University, Faculty of Mathematics and Physics
9 * UFS2 write support Evgeniy Dushistov <dushistov@mail.ru>, 2007
13 #include <linux/stat.h>
14 #include <linux/time.h>
15 #include <linux/string.h>
16 #include <linux/buffer_head.h>
17 #include <linux/capability.h>
18 #include <linux/bitops.h>
19 #include <linux/bio.h>
20 #include <asm/byteorder.h>
27 #define INVBLOCK ((u64)-1L)
29 static u64
ufs_add_fragments(struct inode
*, u64
, unsigned, unsigned);
30 static u64
ufs_alloc_fragments(struct inode
*, unsigned, u64
, unsigned, int *);
31 static u64
ufs_alloccg_block(struct inode
*, struct ufs_cg_private_info
*, u64
, int *);
32 static u64
ufs_bitmap_search (struct super_block
*, struct ufs_cg_private_info
*, u64
, unsigned);
33 static unsigned char ufs_fragtable_8fpb
[], ufs_fragtable_other
[];
34 static void ufs_clusteracct(struct super_block
*, struct ufs_cg_private_info
*, unsigned, int);
37 * Free 'count' fragments from fragment number 'fragment'
39 void ufs_free_fragments(struct inode
*inode
, u64 fragment
, unsigned count
)
41 struct super_block
* sb
;
42 struct ufs_sb_private_info
* uspi
;
43 struct ufs_cg_private_info
* ucpi
;
44 struct ufs_cylinder_group
* ucg
;
45 unsigned cgno
, bit
, end_bit
, bbase
, blkmap
, i
;
49 uspi
= UFS_SB(sb
)->s_uspi
;
51 UFSD("ENTER, fragment %llu, count %u\n",
52 (unsigned long long)fragment
, count
);
54 if (ufs_fragnum(fragment
) + count
> uspi
->s_fpg
)
55 ufs_error (sb
, "ufs_free_fragments", "internal error");
57 mutex_lock(&UFS_SB(sb
)->s_lock
);
59 cgno
= ufs_dtog(uspi
, fragment
);
60 bit
= ufs_dtogd(uspi
, fragment
);
61 if (cgno
>= uspi
->s_ncg
) {
62 ufs_panic (sb
, "ufs_free_fragments", "freeing blocks are outside device");
66 ucpi
= ufs_load_cylinder (sb
, cgno
);
69 ucg
= ubh_get_ucg (UCPI_UBH(ucpi
));
70 if (!ufs_cg_chkmagic(sb
, ucg
)) {
71 ufs_panic (sb
, "ufs_free_fragments", "internal error, bad magic number on cg %u", cgno
);
75 end_bit
= bit
+ count
;
76 bbase
= ufs_blknum (bit
);
77 blkmap
= ubh_blkmap (UCPI_UBH(ucpi
), ucpi
->c_freeoff
, bbase
);
78 ufs_fragacct (sb
, blkmap
, ucg
->cg_frsum
, -1);
79 for (i
= bit
; i
< end_bit
; i
++) {
80 if (ubh_isclr (UCPI_UBH(ucpi
), ucpi
->c_freeoff
, i
))
81 ubh_setbit (UCPI_UBH(ucpi
), ucpi
->c_freeoff
, i
);
83 ufs_error (sb
, "ufs_free_fragments",
84 "bit already cleared for fragment %u", i
);
87 inode_sub_bytes(inode
, count
<< uspi
->s_fshift
);
88 fs32_add(sb
, &ucg
->cg_cs
.cs_nffree
, count
);
89 uspi
->cs_total
.cs_nffree
+= count
;
90 fs32_add(sb
, &UFS_SB(sb
)->fs_cs(cgno
).cs_nffree
, count
);
91 blkmap
= ubh_blkmap (UCPI_UBH(ucpi
), ucpi
->c_freeoff
, bbase
);
92 ufs_fragacct(sb
, blkmap
, ucg
->cg_frsum
, 1);
95 * Trying to reassemble free fragments into block
97 blkno
= ufs_fragstoblks (bbase
);
98 if (ubh_isblockset(UCPI_UBH(ucpi
), ucpi
->c_freeoff
, blkno
)) {
99 fs32_sub(sb
, &ucg
->cg_cs
.cs_nffree
, uspi
->s_fpb
);
100 uspi
->cs_total
.cs_nffree
-= uspi
->s_fpb
;
101 fs32_sub(sb
, &UFS_SB(sb
)->fs_cs(cgno
).cs_nffree
, uspi
->s_fpb
);
102 if ((UFS_SB(sb
)->s_flags
& UFS_CG_MASK
) == UFS_CG_44BSD
)
103 ufs_clusteracct (sb
, ucpi
, blkno
, 1);
104 fs32_add(sb
, &ucg
->cg_cs
.cs_nbfree
, 1);
105 uspi
->cs_total
.cs_nbfree
++;
106 fs32_add(sb
, &UFS_SB(sb
)->fs_cs(cgno
).cs_nbfree
, 1);
107 if (uspi
->fs_magic
!= UFS2_MAGIC
) {
108 unsigned cylno
= ufs_cbtocylno (bbase
);
110 fs16_add(sb
, &ubh_cg_blks(ucpi
, cylno
,
111 ufs_cbtorpos(bbase
)), 1);
112 fs32_add(sb
, &ubh_cg_blktot(ucpi
, cylno
), 1);
116 ubh_mark_buffer_dirty (USPI_UBH(uspi
));
117 ubh_mark_buffer_dirty (UCPI_UBH(ucpi
));
118 if (sb
->s_flags
& SB_SYNCHRONOUS
)
119 ubh_sync_block(UCPI_UBH(ucpi
));
120 ufs_mark_sb_dirty(sb
);
122 mutex_unlock(&UFS_SB(sb
)->s_lock
);
127 mutex_unlock(&UFS_SB(sb
)->s_lock
);
128 UFSD("EXIT (FAILED)\n");
133 * Free 'count' fragments from fragment number 'fragment' (free whole blocks)
135 void ufs_free_blocks(struct inode
*inode
, u64 fragment
, unsigned count
)
137 struct super_block
* sb
;
138 struct ufs_sb_private_info
* uspi
;
139 struct ufs_cg_private_info
* ucpi
;
140 struct ufs_cylinder_group
* ucg
;
141 unsigned overflow
, cgno
, bit
, end_bit
, i
;
145 uspi
= UFS_SB(sb
)->s_uspi
;
147 UFSD("ENTER, fragment %llu, count %u\n",
148 (unsigned long long)fragment
, count
);
150 if ((fragment
& uspi
->s_fpbmask
) || (count
& uspi
->s_fpbmask
)) {
151 ufs_error (sb
, "ufs_free_blocks", "internal error, "
152 "fragment %llu, count %u\n",
153 (unsigned long long)fragment
, count
);
157 mutex_lock(&UFS_SB(sb
)->s_lock
);
161 cgno
= ufs_dtog(uspi
, fragment
);
162 bit
= ufs_dtogd(uspi
, fragment
);
163 if (cgno
>= uspi
->s_ncg
) {
164 ufs_panic (sb
, "ufs_free_blocks", "freeing blocks are outside device");
167 end_bit
= bit
+ count
;
168 if (end_bit
> uspi
->s_fpg
) {
169 overflow
= bit
+ count
- uspi
->s_fpg
;
174 ucpi
= ufs_load_cylinder (sb
, cgno
);
177 ucg
= ubh_get_ucg (UCPI_UBH(ucpi
));
178 if (!ufs_cg_chkmagic(sb
, ucg
)) {
179 ufs_panic (sb
, "ufs_free_blocks", "internal error, bad magic number on cg %u", cgno
);
183 for (i
= bit
; i
< end_bit
; i
+= uspi
->s_fpb
) {
184 blkno
= ufs_fragstoblks(i
);
185 if (ubh_isblockset(UCPI_UBH(ucpi
), ucpi
->c_freeoff
, blkno
)) {
186 ufs_error(sb
, "ufs_free_blocks", "freeing free fragment");
188 ubh_setblock(UCPI_UBH(ucpi
), ucpi
->c_freeoff
, blkno
);
189 inode_sub_bytes(inode
, uspi
->s_fpb
<< uspi
->s_fshift
);
190 if ((UFS_SB(sb
)->s_flags
& UFS_CG_MASK
) == UFS_CG_44BSD
)
191 ufs_clusteracct (sb
, ucpi
, blkno
, 1);
193 fs32_add(sb
, &ucg
->cg_cs
.cs_nbfree
, 1);
194 uspi
->cs_total
.cs_nbfree
++;
195 fs32_add(sb
, &UFS_SB(sb
)->fs_cs(cgno
).cs_nbfree
, 1);
197 if (uspi
->fs_magic
!= UFS2_MAGIC
) {
198 unsigned cylno
= ufs_cbtocylno(i
);
200 fs16_add(sb
, &ubh_cg_blks(ucpi
, cylno
,
201 ufs_cbtorpos(i
)), 1);
202 fs32_add(sb
, &ubh_cg_blktot(ucpi
, cylno
), 1);
206 ubh_mark_buffer_dirty (USPI_UBH(uspi
));
207 ubh_mark_buffer_dirty (UCPI_UBH(ucpi
));
208 if (sb
->s_flags
& SB_SYNCHRONOUS
)
209 ubh_sync_block(UCPI_UBH(ucpi
));
217 ufs_mark_sb_dirty(sb
);
218 mutex_unlock(&UFS_SB(sb
)->s_lock
);
223 mutex_unlock(&UFS_SB(sb
)->s_lock
);
225 UFSD("EXIT (FAILED)\n");
230 * Modify inode page cache in such way:
231 * have - blocks with b_blocknr equal to oldb...oldb+count-1
232 * get - blocks with b_blocknr equal to newb...newb+count-1
233 * also we suppose that oldb...oldb+count-1 blocks
234 * situated at the end of file.
236 * We can come here from ufs_writepage or ufs_prepare_write,
237 * locked_page is argument of these functions, so we already lock it.
239 static void ufs_change_blocknr(struct inode
*inode
, sector_t beg
,
240 unsigned int count
, sector_t oldb
,
241 sector_t newb
, struct page
*locked_page
)
243 const unsigned blks_per_page
=
244 1 << (PAGE_SHIFT
- inode
->i_blkbits
);
245 const unsigned mask
= blks_per_page
- 1;
246 struct address_space
* const mapping
= inode
->i_mapping
;
247 pgoff_t index
, cur_index
, last_index
;
248 unsigned pos
, j
, lblock
;
251 struct buffer_head
*head
, *bh
;
253 UFSD("ENTER, ino %lu, count %u, oldb %llu, newb %llu\n",
255 (unsigned long long)oldb
, (unsigned long long)newb
);
257 BUG_ON(!locked_page
);
258 BUG_ON(!PageLocked(locked_page
));
260 cur_index
= locked_page
->index
;
262 last_index
= end
>> (PAGE_SHIFT
- inode
->i_blkbits
);
263 for (i
= beg
; i
< end
; i
= (i
| mask
) + 1) {
264 index
= i
>> (PAGE_SHIFT
- inode
->i_blkbits
);
266 if (likely(cur_index
!= index
)) {
267 page
= ufs_get_locked_page(mapping
, index
);
268 if (!page
)/* it was truncated */
270 if (IS_ERR(page
)) {/* or EIO */
271 ufs_error(inode
->i_sb
, __func__
,
272 "read of page %llu failed\n",
273 (unsigned long long)index
);
279 head
= page_buffers(page
);
282 for (j
= 0; j
< pos
; ++j
)
283 bh
= bh
->b_this_page
;
286 if (unlikely(index
== last_index
))
289 lblock
= blks_per_page
;
296 if (!buffer_mapped(bh
))
297 map_bh(bh
, inode
->i_sb
, oldb
+ pos
);
298 if (!buffer_uptodate(bh
)) {
299 ll_rw_block(REQ_OP_READ
, 0, 1, &bh
);
301 if (!buffer_uptodate(bh
)) {
302 ufs_error(inode
->i_sb
, __func__
,
303 "read of block failed\n");
308 UFSD(" change from %llu to %llu, pos %u\n",
309 (unsigned long long)(pos
+ oldb
),
310 (unsigned long long)(pos
+ newb
), pos
);
312 bh
->b_blocknr
= newb
+ pos
;
313 clean_bdev_bh_alias(bh
);
314 mark_buffer_dirty(bh
);
316 bh
= bh
->b_this_page
;
317 } while (bh
!= head
);
319 if (likely(cur_index
!= index
))
320 ufs_put_locked_page(page
);
325 static void ufs_clear_frags(struct inode
*inode
, sector_t beg
, unsigned int n
,
328 struct buffer_head
*bh
;
329 sector_t end
= beg
+ n
;
331 for (; beg
< end
; ++beg
) {
332 bh
= sb_getblk(inode
->i_sb
, beg
);
334 memset(bh
->b_data
, 0, inode
->i_sb
->s_blocksize
);
335 set_buffer_uptodate(bh
);
336 mark_buffer_dirty(bh
);
338 if (IS_SYNC(inode
) || sync
)
339 sync_dirty_buffer(bh
);
344 u64
ufs_new_fragments(struct inode
*inode
, void *p
, u64 fragment
,
345 u64 goal
, unsigned count
, int *err
,
346 struct page
*locked_page
)
348 struct super_block
* sb
;
349 struct ufs_sb_private_info
* uspi
;
350 struct ufs_super_block_first
* usb1
;
351 unsigned cgno
, oldcount
, newcount
;
352 u64 tmp
, request
, result
;
354 UFSD("ENTER, ino %lu, fragment %llu, goal %llu, count %u\n",
355 inode
->i_ino
, (unsigned long long)fragment
,
356 (unsigned long long)goal
, count
);
359 uspi
= UFS_SB(sb
)->s_uspi
;
360 usb1
= ubh_get_usb_first(uspi
);
363 mutex_lock(&UFS_SB(sb
)->s_lock
);
364 tmp
= ufs_data_ptr_to_cpu(sb
, p
);
366 if (count
+ ufs_fragnum(fragment
) > uspi
->s_fpb
) {
367 ufs_warning(sb
, "ufs_new_fragments", "internal warning"
368 " fragment %llu, count %u",
369 (unsigned long long)fragment
, count
);
370 count
= uspi
->s_fpb
- ufs_fragnum(fragment
);
372 oldcount
= ufs_fragnum (fragment
);
373 newcount
= oldcount
+ count
;
376 * Somebody else has just allocated our fragments
380 ufs_error(sb
, "ufs_new_fragments", "internal error, "
381 "fragment %llu, tmp %llu\n",
382 (unsigned long long)fragment
,
383 (unsigned long long)tmp
);
384 mutex_unlock(&UFS_SB(sb
)->s_lock
);
387 if (fragment
< UFS_I(inode
)->i_lastfrag
) {
388 UFSD("EXIT (ALREADY ALLOCATED)\n");
389 mutex_unlock(&UFS_SB(sb
)->s_lock
);
395 UFSD("EXIT (ALREADY ALLOCATED)\n");
396 mutex_unlock(&UFS_SB(sb
)->s_lock
);
402 * There is not enough space for user on the device
404 if (unlikely(ufs_freefrags(uspi
) <= uspi
->s_root_blocks
)) {
405 if (!capable(CAP_SYS_RESOURCE
)) {
406 mutex_unlock(&UFS_SB(sb
)->s_lock
);
407 UFSD("EXIT (FAILED)\n");
412 if (goal
>= uspi
->s_size
)
415 cgno
= ufs_inotocg (inode
->i_ino
);
417 cgno
= ufs_dtog(uspi
, goal
);
420 * allocate new fragment
423 result
= ufs_alloc_fragments (inode
, cgno
, goal
, count
, err
);
425 ufs_clear_frags(inode
, result
+ oldcount
,
426 newcount
- oldcount
, locked_page
!= NULL
);
428 write_seqlock(&UFS_I(inode
)->meta_lock
);
429 ufs_cpu_to_data_ptr(sb
, p
, result
);
430 UFS_I(inode
)->i_lastfrag
=
431 max(UFS_I(inode
)->i_lastfrag
, fragment
+ count
);
432 write_sequnlock(&UFS_I(inode
)->meta_lock
);
434 mutex_unlock(&UFS_SB(sb
)->s_lock
);
435 UFSD("EXIT, result %llu\n", (unsigned long long)result
);
442 result
= ufs_add_fragments(inode
, tmp
, oldcount
, newcount
);
445 read_seqlock_excl(&UFS_I(inode
)->meta_lock
);
446 UFS_I(inode
)->i_lastfrag
= max(UFS_I(inode
)->i_lastfrag
,
448 read_sequnlock_excl(&UFS_I(inode
)->meta_lock
);
449 ufs_clear_frags(inode
, result
+ oldcount
, newcount
- oldcount
,
450 locked_page
!= NULL
);
451 mutex_unlock(&UFS_SB(sb
)->s_lock
);
452 UFSD("EXIT, result %llu\n", (unsigned long long)result
);
457 * allocate new block and move data
459 if (fs32_to_cpu(sb
, usb1
->fs_optim
) == UFS_OPTSPACE
) {
461 if (uspi
->cs_total
.cs_nffree
< uspi
->s_space_to_time
)
462 usb1
->fs_optim
= cpu_to_fs32(sb
, UFS_OPTTIME
);
464 request
= uspi
->s_fpb
;
465 if (uspi
->cs_total
.cs_nffree
> uspi
->s_time_to_space
)
466 usb1
->fs_optim
= cpu_to_fs32(sb
, UFS_OPTSPACE
);
468 result
= ufs_alloc_fragments (inode
, cgno
, goal
, request
, err
);
470 ufs_clear_frags(inode
, result
+ oldcount
, newcount
- oldcount
,
471 locked_page
!= NULL
);
472 mutex_unlock(&UFS_SB(sb
)->s_lock
);
473 ufs_change_blocknr(inode
, fragment
- oldcount
, oldcount
,
474 uspi
->s_sbbase
+ tmp
,
475 uspi
->s_sbbase
+ result
, locked_page
);
477 write_seqlock(&UFS_I(inode
)->meta_lock
);
478 ufs_cpu_to_data_ptr(sb
, p
, result
);
479 UFS_I(inode
)->i_lastfrag
= max(UFS_I(inode
)->i_lastfrag
,
481 write_sequnlock(&UFS_I(inode
)->meta_lock
);
482 if (newcount
< request
)
483 ufs_free_fragments (inode
, result
+ newcount
, request
- newcount
);
484 ufs_free_fragments (inode
, tmp
, oldcount
);
485 UFSD("EXIT, result %llu\n", (unsigned long long)result
);
489 mutex_unlock(&UFS_SB(sb
)->s_lock
);
490 UFSD("EXIT (FAILED)\n");
494 static bool try_add_frags(struct inode
*inode
, unsigned frags
)
496 unsigned size
= frags
* i_blocksize(inode
);
497 spin_lock(&inode
->i_lock
);
498 __inode_add_bytes(inode
, size
);
499 if (unlikely((u32
)inode
->i_blocks
!= inode
->i_blocks
)) {
500 __inode_sub_bytes(inode
, size
);
501 spin_unlock(&inode
->i_lock
);
504 spin_unlock(&inode
->i_lock
);
508 static u64
ufs_add_fragments(struct inode
*inode
, u64 fragment
,
509 unsigned oldcount
, unsigned newcount
)
511 struct super_block
* sb
;
512 struct ufs_sb_private_info
* uspi
;
513 struct ufs_cg_private_info
* ucpi
;
514 struct ufs_cylinder_group
* ucg
;
515 unsigned cgno
, fragno
, fragoff
, count
, fragsize
, i
;
517 UFSD("ENTER, fragment %llu, oldcount %u, newcount %u\n",
518 (unsigned long long)fragment
, oldcount
, newcount
);
521 uspi
= UFS_SB(sb
)->s_uspi
;
522 count
= newcount
- oldcount
;
524 cgno
= ufs_dtog(uspi
, fragment
);
525 if (fs32_to_cpu(sb
, UFS_SB(sb
)->fs_cs(cgno
).cs_nffree
) < count
)
527 if ((ufs_fragnum (fragment
) + newcount
) > uspi
->s_fpb
)
529 ucpi
= ufs_load_cylinder (sb
, cgno
);
532 ucg
= ubh_get_ucg (UCPI_UBH(ucpi
));
533 if (!ufs_cg_chkmagic(sb
, ucg
)) {
534 ufs_panic (sb
, "ufs_add_fragments",
535 "internal error, bad magic number on cg %u", cgno
);
539 fragno
= ufs_dtogd(uspi
, fragment
);
540 fragoff
= ufs_fragnum (fragno
);
541 for (i
= oldcount
; i
< newcount
; i
++)
542 if (ubh_isclr (UCPI_UBH(ucpi
), ucpi
->c_freeoff
, fragno
+ i
))
545 if (!try_add_frags(inode
, count
))
548 * Block can be extended
550 ucg
->cg_time
= cpu_to_fs32(sb
, get_seconds());
551 for (i
= newcount
; i
< (uspi
->s_fpb
- fragoff
); i
++)
552 if (ubh_isclr (UCPI_UBH(ucpi
), ucpi
->c_freeoff
, fragno
+ i
))
554 fragsize
= i
- oldcount
;
555 if (!fs32_to_cpu(sb
, ucg
->cg_frsum
[fragsize
]))
556 ufs_panic (sb
, "ufs_add_fragments",
557 "internal error or corrupted bitmap on cg %u", cgno
);
558 fs32_sub(sb
, &ucg
->cg_frsum
[fragsize
], 1);
559 if (fragsize
!= count
)
560 fs32_add(sb
, &ucg
->cg_frsum
[fragsize
- count
], 1);
561 for (i
= oldcount
; i
< newcount
; i
++)
562 ubh_clrbit (UCPI_UBH(ucpi
), ucpi
->c_freeoff
, fragno
+ i
);
564 fs32_sub(sb
, &ucg
->cg_cs
.cs_nffree
, count
);
565 fs32_sub(sb
, &UFS_SB(sb
)->fs_cs(cgno
).cs_nffree
, count
);
566 uspi
->cs_total
.cs_nffree
-= count
;
568 ubh_mark_buffer_dirty (USPI_UBH(uspi
));
569 ubh_mark_buffer_dirty (UCPI_UBH(ucpi
));
570 if (sb
->s_flags
& SB_SYNCHRONOUS
)
571 ubh_sync_block(UCPI_UBH(ucpi
));
572 ufs_mark_sb_dirty(sb
);
574 UFSD("EXIT, fragment %llu\n", (unsigned long long)fragment
);
579 #define UFS_TEST_FREE_SPACE_CG \
580 ucg = (struct ufs_cylinder_group *) UFS_SB(sb)->s_ucg[cgno]->b_data; \
581 if (fs32_to_cpu(sb, ucg->cg_cs.cs_nbfree)) \
583 for (k = count; k < uspi->s_fpb; k++) \
584 if (fs32_to_cpu(sb, ucg->cg_frsum[k])) \
587 static u64
ufs_alloc_fragments(struct inode
*inode
, unsigned cgno
,
588 u64 goal
, unsigned count
, int *err
)
590 struct super_block
* sb
;
591 struct ufs_sb_private_info
* uspi
;
592 struct ufs_cg_private_info
* ucpi
;
593 struct ufs_cylinder_group
* ucg
;
594 unsigned oldcg
, i
, j
, k
, allocsize
;
597 UFSD("ENTER, ino %lu, cgno %u, goal %llu, count %u\n",
598 inode
->i_ino
, cgno
, (unsigned long long)goal
, count
);
601 uspi
= UFS_SB(sb
)->s_uspi
;
605 * 1. searching on preferred cylinder group
607 UFS_TEST_FREE_SPACE_CG
610 * 2. quadratic rehash
612 for (j
= 1; j
< uspi
->s_ncg
; j
*= 2) {
614 if (cgno
>= uspi
->s_ncg
)
616 UFS_TEST_FREE_SPACE_CG
620 * 3. brute force search
621 * We start at i = 2 ( 0 is checked at 1.step, 1 at 2.step )
623 cgno
= (oldcg
+ 1) % uspi
->s_ncg
;
624 for (j
= 2; j
< uspi
->s_ncg
; j
++) {
626 if (cgno
>= uspi
->s_ncg
)
628 UFS_TEST_FREE_SPACE_CG
631 UFSD("EXIT (FAILED)\n");
635 ucpi
= ufs_load_cylinder (sb
, cgno
);
638 ucg
= ubh_get_ucg (UCPI_UBH(ucpi
));
639 if (!ufs_cg_chkmagic(sb
, ucg
))
640 ufs_panic (sb
, "ufs_alloc_fragments",
641 "internal error, bad magic number on cg %u", cgno
);
642 ucg
->cg_time
= cpu_to_fs32(sb
, get_seconds());
644 if (count
== uspi
->s_fpb
) {
645 result
= ufs_alloccg_block (inode
, ucpi
, goal
, err
);
646 if (result
== INVBLOCK
)
651 for (allocsize
= count
; allocsize
< uspi
->s_fpb
; allocsize
++)
652 if (fs32_to_cpu(sb
, ucg
->cg_frsum
[allocsize
]) != 0)
655 if (allocsize
== uspi
->s_fpb
) {
656 result
= ufs_alloccg_block (inode
, ucpi
, goal
, err
);
657 if (result
== INVBLOCK
)
659 goal
= ufs_dtogd(uspi
, result
);
660 for (i
= count
; i
< uspi
->s_fpb
; i
++)
661 ubh_setbit (UCPI_UBH(ucpi
), ucpi
->c_freeoff
, goal
+ i
);
662 i
= uspi
->s_fpb
- count
;
664 inode_sub_bytes(inode
, i
<< uspi
->s_fshift
);
665 fs32_add(sb
, &ucg
->cg_cs
.cs_nffree
, i
);
666 uspi
->cs_total
.cs_nffree
+= i
;
667 fs32_add(sb
, &UFS_SB(sb
)->fs_cs(cgno
).cs_nffree
, i
);
668 fs32_add(sb
, &ucg
->cg_frsum
[i
], 1);
672 result
= ufs_bitmap_search (sb
, ucpi
, goal
, allocsize
);
673 if (result
== INVBLOCK
)
675 if (!try_add_frags(inode
, count
))
677 for (i
= 0; i
< count
; i
++)
678 ubh_clrbit (UCPI_UBH(ucpi
), ucpi
->c_freeoff
, result
+ i
);
680 fs32_sub(sb
, &ucg
->cg_cs
.cs_nffree
, count
);
681 uspi
->cs_total
.cs_nffree
-= count
;
682 fs32_sub(sb
, &UFS_SB(sb
)->fs_cs(cgno
).cs_nffree
, count
);
683 fs32_sub(sb
, &ucg
->cg_frsum
[allocsize
], 1);
685 if (count
!= allocsize
)
686 fs32_add(sb
, &ucg
->cg_frsum
[allocsize
- count
], 1);
689 ubh_mark_buffer_dirty (USPI_UBH(uspi
));
690 ubh_mark_buffer_dirty (UCPI_UBH(ucpi
));
691 if (sb
->s_flags
& SB_SYNCHRONOUS
)
692 ubh_sync_block(UCPI_UBH(ucpi
));
693 ufs_mark_sb_dirty(sb
);
695 result
+= cgno
* uspi
->s_fpg
;
696 UFSD("EXIT3, result %llu\n", (unsigned long long)result
);
700 static u64
ufs_alloccg_block(struct inode
*inode
,
701 struct ufs_cg_private_info
*ucpi
,
704 struct super_block
* sb
;
705 struct ufs_sb_private_info
* uspi
;
706 struct ufs_cylinder_group
* ucg
;
709 UFSD("ENTER, goal %llu\n", (unsigned long long)goal
);
712 uspi
= UFS_SB(sb
)->s_uspi
;
713 ucg
= ubh_get_ucg(UCPI_UBH(ucpi
));
716 goal
= ucpi
->c_rotor
;
719 goal
= ufs_blknum (goal
);
720 goal
= ufs_dtogd(uspi
, goal
);
723 * If the requested block is available, use it.
725 if (ubh_isblockset(UCPI_UBH(ucpi
), ucpi
->c_freeoff
, ufs_fragstoblks(goal
))) {
731 result
= ufs_bitmap_search (sb
, ucpi
, goal
, uspi
->s_fpb
);
732 if (result
== INVBLOCK
)
734 ucpi
->c_rotor
= result
;
736 if (!try_add_frags(inode
, uspi
->s_fpb
))
738 blkno
= ufs_fragstoblks(result
);
739 ubh_clrblock (UCPI_UBH(ucpi
), ucpi
->c_freeoff
, blkno
);
740 if ((UFS_SB(sb
)->s_flags
& UFS_CG_MASK
) == UFS_CG_44BSD
)
741 ufs_clusteracct (sb
, ucpi
, blkno
, -1);
743 fs32_sub(sb
, &ucg
->cg_cs
.cs_nbfree
, 1);
744 uspi
->cs_total
.cs_nbfree
--;
745 fs32_sub(sb
, &UFS_SB(sb
)->fs_cs(ucpi
->c_cgx
).cs_nbfree
, 1);
747 if (uspi
->fs_magic
!= UFS2_MAGIC
) {
748 unsigned cylno
= ufs_cbtocylno((unsigned)result
);
750 fs16_sub(sb
, &ubh_cg_blks(ucpi
, cylno
,
751 ufs_cbtorpos((unsigned)result
)), 1);
752 fs32_sub(sb
, &ubh_cg_blktot(ucpi
, cylno
), 1);
755 UFSD("EXIT, result %llu\n", (unsigned long long)result
);
760 static unsigned ubh_scanc(struct ufs_sb_private_info
*uspi
,
761 struct ufs_buffer_head
*ubh
,
762 unsigned begin
, unsigned size
,
763 unsigned char *table
, unsigned char mask
)
765 unsigned rest
, offset
;
769 offset
= begin
& ~uspi
->s_fmask
;
770 begin
>>= uspi
->s_fshift
;
772 if ((offset
+ size
) < uspi
->s_fsize
)
775 rest
= uspi
->s_fsize
- offset
;
777 cp
= ubh
->bh
[begin
]->b_data
+ offset
;
778 while ((table
[*cp
++] & mask
) == 0 && --rest
)
785 return (size
+ rest
);
789 * Find a block of the specified size in the specified cylinder group.
790 * @sp: pointer to super block
791 * @ucpi: pointer to cylinder group info
792 * @goal: near which block we want find new one
793 * @count: specified size
795 static u64
ufs_bitmap_search(struct super_block
*sb
,
796 struct ufs_cg_private_info
*ucpi
,
797 u64 goal
, unsigned count
)
800 * Bit patterns for identifying fragments in the block map
801 * used as ((map & mask_arr) == want_arr)
803 static const int mask_arr
[9] = {
804 0x3, 0x7, 0xf, 0x1f, 0x3f, 0x7f, 0xff, 0x1ff, 0x3ff
806 static const int want_arr
[9] = {
807 0x0, 0x2, 0x6, 0xe, 0x1e, 0x3e, 0x7e, 0xfe, 0x1fe
809 struct ufs_sb_private_info
*uspi
= UFS_SB(sb
)->s_uspi
;
810 unsigned start
, length
, loc
;
811 unsigned pos
, want
, blockmap
, mask
, end
;
814 UFSD("ENTER, cg %u, goal %llu, count %u\n", ucpi
->c_cgx
,
815 (unsigned long long)goal
, count
);
818 start
= ufs_dtogd(uspi
, goal
) >> 3;
820 start
= ucpi
->c_frotor
>> 3;
822 length
= ((uspi
->s_fpg
+ 7) >> 3) - start
;
823 loc
= ubh_scanc(uspi
, UCPI_UBH(ucpi
), ucpi
->c_freeoff
+ start
, length
,
824 (uspi
->s_fpb
== 8) ? ufs_fragtable_8fpb
: ufs_fragtable_other
,
825 1 << (count
- 1 + (uspi
->s_fpb
& 7)));
828 loc
= ubh_scanc(uspi
, UCPI_UBH(ucpi
), ucpi
->c_freeoff
, length
,
829 (uspi
->s_fpb
== 8) ? ufs_fragtable_8fpb
:
831 1 << (count
- 1 + (uspi
->s_fpb
& 7)));
833 ufs_error(sb
, "ufs_bitmap_search",
834 "bitmap corrupted on cg %u, start %u,"
835 " length %u, count %u, freeoff %u\n",
836 ucpi
->c_cgx
, start
, length
, count
,
842 result
= (start
+ length
- loc
) << 3;
843 ucpi
->c_frotor
= result
;
846 * found the byte in the map
849 for (end
= result
+ 8; result
< end
; result
+= uspi
->s_fpb
) {
850 blockmap
= ubh_blkmap(UCPI_UBH(ucpi
), ucpi
->c_freeoff
, result
);
852 mask
= mask_arr
[count
];
853 want
= want_arr
[count
];
854 for (pos
= 0; pos
<= uspi
->s_fpb
- count
; pos
++) {
855 if ((blockmap
& mask
) == want
) {
856 UFSD("EXIT, result %llu\n",
857 (unsigned long long)result
);
865 ufs_error(sb
, "ufs_bitmap_search", "block not in map on cg %u\n",
867 UFSD("EXIT (FAILED)\n");
871 static void ufs_clusteracct(struct super_block
* sb
,
872 struct ufs_cg_private_info
* ucpi
, unsigned blkno
, int cnt
)
874 struct ufs_sb_private_info
* uspi
;
875 int i
, start
, end
, forw
, back
;
877 uspi
= UFS_SB(sb
)->s_uspi
;
878 if (uspi
->s_contigsumsize
<= 0)
882 ubh_setbit(UCPI_UBH(ucpi
), ucpi
->c_clusteroff
, blkno
);
884 ubh_clrbit(UCPI_UBH(ucpi
), ucpi
->c_clusteroff
, blkno
);
887 * Find the size of the cluster going forward.
890 end
= start
+ uspi
->s_contigsumsize
;
891 if ( end
>= ucpi
->c_nclusterblks
)
892 end
= ucpi
->c_nclusterblks
;
893 i
= ubh_find_next_zero_bit (UCPI_UBH(ucpi
), ucpi
->c_clusteroff
, end
, start
);
899 * Find the size of the cluster going backward.
902 end
= start
- uspi
->s_contigsumsize
;
905 i
= ubh_find_last_zero_bit (UCPI_UBH(ucpi
), ucpi
->c_clusteroff
, start
, end
);
911 * Account for old cluster and the possibly new forward and
915 if (i
> uspi
->s_contigsumsize
)
916 i
= uspi
->s_contigsumsize
;
917 fs32_add(sb
, (__fs32
*)ubh_get_addr(UCPI_UBH(ucpi
), ucpi
->c_clustersumoff
+ (i
<< 2)), cnt
);
919 fs32_sub(sb
, (__fs32
*)ubh_get_addr(UCPI_UBH(ucpi
), ucpi
->c_clustersumoff
+ (back
<< 2)), cnt
);
921 fs32_sub(sb
, (__fs32
*)ubh_get_addr(UCPI_UBH(ucpi
), ucpi
->c_clustersumoff
+ (forw
<< 2)), cnt
);
925 static unsigned char ufs_fragtable_8fpb
[] = {
926 0x00, 0x01, 0x01, 0x02, 0x01, 0x01, 0x02, 0x04, 0x01, 0x01, 0x01, 0x03, 0x02, 0x03, 0x04, 0x08,
927 0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x02, 0x03, 0x03, 0x02, 0x04, 0x05, 0x08, 0x10,
928 0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x01, 0x01, 0x01, 0x03, 0x03, 0x03, 0x05, 0x09,
929 0x02, 0x03, 0x03, 0x02, 0x03, 0x03, 0x02, 0x06, 0x04, 0x05, 0x05, 0x06, 0x08, 0x09, 0x10, 0x20,
930 0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x01, 0x01, 0x01, 0x03, 0x03, 0x03, 0x05, 0x09,
931 0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x03, 0x03, 0x03, 0x03, 0x05, 0x05, 0x09, 0x11,
932 0x02, 0x03, 0x03, 0x02, 0x03, 0x03, 0x02, 0x06, 0x03, 0x03, 0x03, 0x03, 0x02, 0x03, 0x06, 0x0A,
933 0x04, 0x05, 0x05, 0x06, 0x05, 0x05, 0x06, 0x04, 0x08, 0x09, 0x09, 0x0A, 0x10, 0x11, 0x20, 0x40,
934 0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x01, 0x01, 0x01, 0x03, 0x03, 0x03, 0x05, 0x09,
935 0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x03, 0x03, 0x03, 0x03, 0x05, 0x05, 0x09, 0x11,
936 0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x01, 0x01, 0x01, 0x03, 0x03, 0x03, 0x05, 0x09,
937 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x07, 0x05, 0x05, 0x05, 0x07, 0x09, 0x09, 0x11, 0x21,
938 0x02, 0x03, 0x03, 0x02, 0x03, 0x03, 0x02, 0x06, 0x03, 0x03, 0x03, 0x03, 0x02, 0x03, 0x06, 0x0A,
939 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x07, 0x02, 0x03, 0x03, 0x02, 0x06, 0x07, 0x0A, 0x12,
940 0x04, 0x05, 0x05, 0x06, 0x05, 0x05, 0x06, 0x04, 0x05, 0x05, 0x05, 0x07, 0x06, 0x07, 0x04, 0x0C,
941 0x08, 0x09, 0x09, 0x0A, 0x09, 0x09, 0x0A, 0x0C, 0x10, 0x11, 0x11, 0x12, 0x20, 0x21, 0x40, 0x80,
944 static unsigned char ufs_fragtable_other
[] = {
945 0x00, 0x16, 0x16, 0x2A, 0x16, 0x16, 0x26, 0x4E, 0x16, 0x16, 0x16, 0x3E, 0x2A, 0x3E, 0x4E, 0x8A,
946 0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
947 0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
948 0x2A, 0x3E, 0x3E, 0x2A, 0x3E, 0x3E, 0x2E, 0x6E, 0x3E, 0x3E, 0x3E, 0x3E, 0x2A, 0x3E, 0x6E, 0xAA,
949 0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
950 0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
951 0x26, 0x36, 0x36, 0x2E, 0x36, 0x36, 0x26, 0x6E, 0x36, 0x36, 0x36, 0x3E, 0x2E, 0x3E, 0x6E, 0xAE,
952 0x4E, 0x5E, 0x5E, 0x6E, 0x5E, 0x5E, 0x6E, 0x4E, 0x5E, 0x5E, 0x5E, 0x7E, 0x6E, 0x7E, 0x4E, 0xCE,
953 0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
954 0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
955 0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
956 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x7E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x7E, 0xBE,
957 0x2A, 0x3E, 0x3E, 0x2A, 0x3E, 0x3E, 0x2E, 0x6E, 0x3E, 0x3E, 0x3E, 0x3E, 0x2A, 0x3E, 0x6E, 0xAA,
958 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x7E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x7E, 0xBE,
959 0x4E, 0x5E, 0x5E, 0x6E, 0x5E, 0x5E, 0x6E, 0x4E, 0x5E, 0x5E, 0x5E, 0x7E, 0x6E, 0x7E, 0x4E, 0xCE,
960 0x8A, 0x9E, 0x9E, 0xAA, 0x9E, 0x9E, 0xAE, 0xCE, 0x9E, 0x9E, 0x9E, 0xBE, 0xAA, 0xBE, 0xCE, 0x8A,