2 * linux/fs/ufs/balloc.c
5 * Daniel Pirkl <daniel.pirkl@email.cz>
6 * Charles University, Faculty of Mathematics and Physics
8 * UFS2 write support Evgeniy Dushistov <dushistov@mail.ru>, 2007
12 #include <linux/stat.h>
13 #include <linux/time.h>
14 #include <linux/string.h>
15 #include <linux/buffer_head.h>
16 #include <linux/capability.h>
17 #include <linux/bitops.h>
18 #include <asm/byteorder.h>
25 #define INVBLOCK ((u64)-1L)
27 static u64
ufs_add_fragments(struct inode
*, u64
, unsigned, unsigned);
28 static u64
ufs_alloc_fragments(struct inode
*, unsigned, u64
, unsigned, int *);
29 static u64
ufs_alloccg_block(struct inode
*, struct ufs_cg_private_info
*, u64
, int *);
30 static u64
ufs_bitmap_search (struct super_block
*, struct ufs_cg_private_info
*, u64
, unsigned);
31 static unsigned char ufs_fragtable_8fpb
[], ufs_fragtable_other
[];
32 static void ufs_clusteracct(struct super_block
*, struct ufs_cg_private_info
*, unsigned, int);
35 * Free 'count' fragments from fragment number 'fragment'
37 void ufs_free_fragments(struct inode
*inode
, u64 fragment
, unsigned count
)
39 struct super_block
* sb
;
40 struct ufs_sb_private_info
* uspi
;
41 struct ufs_cg_private_info
* ucpi
;
42 struct ufs_cylinder_group
* ucg
;
43 unsigned cgno
, bit
, end_bit
, bbase
, blkmap
, i
;
47 uspi
= UFS_SB(sb
)->s_uspi
;
49 UFSD("ENTER, fragment %llu, count %u\n",
50 (unsigned long long)fragment
, count
);
52 if (ufs_fragnum(fragment
) + count
> uspi
->s_fpg
)
53 ufs_error (sb
, "ufs_free_fragments", "internal error");
55 mutex_lock(&UFS_SB(sb
)->s_lock
);
57 cgno
= ufs_dtog(uspi
, fragment
);
58 bit
= ufs_dtogd(uspi
, fragment
);
59 if (cgno
>= uspi
->s_ncg
) {
60 ufs_panic (sb
, "ufs_free_fragments", "freeing blocks are outside device");
64 ucpi
= ufs_load_cylinder (sb
, cgno
);
67 ucg
= ubh_get_ucg (UCPI_UBH(ucpi
));
68 if (!ufs_cg_chkmagic(sb
, ucg
)) {
69 ufs_panic (sb
, "ufs_free_fragments", "internal error, bad magic number on cg %u", cgno
);
73 end_bit
= bit
+ count
;
74 bbase
= ufs_blknum (bit
);
75 blkmap
= ubh_blkmap (UCPI_UBH(ucpi
), ucpi
->c_freeoff
, bbase
);
76 ufs_fragacct (sb
, blkmap
, ucg
->cg_frsum
, -1);
77 for (i
= bit
; i
< end_bit
; i
++) {
78 if (ubh_isclr (UCPI_UBH(ucpi
), ucpi
->c_freeoff
, i
))
79 ubh_setbit (UCPI_UBH(ucpi
), ucpi
->c_freeoff
, i
);
81 ufs_error (sb
, "ufs_free_fragments",
82 "bit already cleared for fragment %u", i
);
85 fs32_add(sb
, &ucg
->cg_cs
.cs_nffree
, count
);
86 uspi
->cs_total
.cs_nffree
+= count
;
87 fs32_add(sb
, &UFS_SB(sb
)->fs_cs(cgno
).cs_nffree
, count
);
88 blkmap
= ubh_blkmap (UCPI_UBH(ucpi
), ucpi
->c_freeoff
, bbase
);
89 ufs_fragacct(sb
, blkmap
, ucg
->cg_frsum
, 1);
92 * Trying to reassemble free fragments into block
94 blkno
= ufs_fragstoblks (bbase
);
95 if (ubh_isblockset(UCPI_UBH(ucpi
), ucpi
->c_freeoff
, blkno
)) {
96 fs32_sub(sb
, &ucg
->cg_cs
.cs_nffree
, uspi
->s_fpb
);
97 uspi
->cs_total
.cs_nffree
-= uspi
->s_fpb
;
98 fs32_sub(sb
, &UFS_SB(sb
)->fs_cs(cgno
).cs_nffree
, uspi
->s_fpb
);
99 if ((UFS_SB(sb
)->s_flags
& UFS_CG_MASK
) == UFS_CG_44BSD
)
100 ufs_clusteracct (sb
, ucpi
, blkno
, 1);
101 fs32_add(sb
, &ucg
->cg_cs
.cs_nbfree
, 1);
102 uspi
->cs_total
.cs_nbfree
++;
103 fs32_add(sb
, &UFS_SB(sb
)->fs_cs(cgno
).cs_nbfree
, 1);
104 if (uspi
->fs_magic
!= UFS2_MAGIC
) {
105 unsigned cylno
= ufs_cbtocylno (bbase
);
107 fs16_add(sb
, &ubh_cg_blks(ucpi
, cylno
,
108 ufs_cbtorpos(bbase
)), 1);
109 fs32_add(sb
, &ubh_cg_blktot(ucpi
, cylno
), 1);
113 ubh_mark_buffer_dirty (USPI_UBH(uspi
));
114 ubh_mark_buffer_dirty (UCPI_UBH(ucpi
));
115 if (sb
->s_flags
& MS_SYNCHRONOUS
)
116 ubh_sync_block(UCPI_UBH(ucpi
));
117 ufs_mark_sb_dirty(sb
);
119 mutex_unlock(&UFS_SB(sb
)->s_lock
);
124 mutex_unlock(&UFS_SB(sb
)->s_lock
);
125 UFSD("EXIT (FAILED)\n");
130 * Free 'count' fragments from fragment number 'fragment' (free whole blocks)
132 void ufs_free_blocks(struct inode
*inode
, u64 fragment
, unsigned count
)
134 struct super_block
* sb
;
135 struct ufs_sb_private_info
* uspi
;
136 struct ufs_cg_private_info
* ucpi
;
137 struct ufs_cylinder_group
* ucg
;
138 unsigned overflow
, cgno
, bit
, end_bit
, i
;
142 uspi
= UFS_SB(sb
)->s_uspi
;
144 UFSD("ENTER, fragment %llu, count %u\n",
145 (unsigned long long)fragment
, count
);
147 if ((fragment
& uspi
->s_fpbmask
) || (count
& uspi
->s_fpbmask
)) {
148 ufs_error (sb
, "ufs_free_blocks", "internal error, "
149 "fragment %llu, count %u\n",
150 (unsigned long long)fragment
, count
);
154 mutex_lock(&UFS_SB(sb
)->s_lock
);
158 cgno
= ufs_dtog(uspi
, fragment
);
159 bit
= ufs_dtogd(uspi
, fragment
);
160 if (cgno
>= uspi
->s_ncg
) {
161 ufs_panic (sb
, "ufs_free_blocks", "freeing blocks are outside device");
164 end_bit
= bit
+ count
;
165 if (end_bit
> uspi
->s_fpg
) {
166 overflow
= bit
+ count
- uspi
->s_fpg
;
171 ucpi
= ufs_load_cylinder (sb
, cgno
);
174 ucg
= ubh_get_ucg (UCPI_UBH(ucpi
));
175 if (!ufs_cg_chkmagic(sb
, ucg
)) {
176 ufs_panic (sb
, "ufs_free_blocks", "internal error, bad magic number on cg %u", cgno
);
180 for (i
= bit
; i
< end_bit
; i
+= uspi
->s_fpb
) {
181 blkno
= ufs_fragstoblks(i
);
182 if (ubh_isblockset(UCPI_UBH(ucpi
), ucpi
->c_freeoff
, blkno
)) {
183 ufs_error(sb
, "ufs_free_blocks", "freeing free fragment");
185 ubh_setblock(UCPI_UBH(ucpi
), ucpi
->c_freeoff
, blkno
);
186 if ((UFS_SB(sb
)->s_flags
& UFS_CG_MASK
) == UFS_CG_44BSD
)
187 ufs_clusteracct (sb
, ucpi
, blkno
, 1);
189 fs32_add(sb
, &ucg
->cg_cs
.cs_nbfree
, 1);
190 uspi
->cs_total
.cs_nbfree
++;
191 fs32_add(sb
, &UFS_SB(sb
)->fs_cs(cgno
).cs_nbfree
, 1);
193 if (uspi
->fs_magic
!= UFS2_MAGIC
) {
194 unsigned cylno
= ufs_cbtocylno(i
);
196 fs16_add(sb
, &ubh_cg_blks(ucpi
, cylno
,
197 ufs_cbtorpos(i
)), 1);
198 fs32_add(sb
, &ubh_cg_blktot(ucpi
, cylno
), 1);
202 ubh_mark_buffer_dirty (USPI_UBH(uspi
));
203 ubh_mark_buffer_dirty (UCPI_UBH(ucpi
));
204 if (sb
->s_flags
& MS_SYNCHRONOUS
)
205 ubh_sync_block(UCPI_UBH(ucpi
));
213 ufs_mark_sb_dirty(sb
);
214 mutex_unlock(&UFS_SB(sb
)->s_lock
);
219 mutex_unlock(&UFS_SB(sb
)->s_lock
);
221 UFSD("EXIT (FAILED)\n");
226 * Modify inode page cache in such way:
227 * have - blocks with b_blocknr equal to oldb...oldb+count-1
228 * get - blocks with b_blocknr equal to newb...newb+count-1
229 * also we suppose that oldb...oldb+count-1 blocks
230 * situated at the end of file.
232 * We can come here from ufs_writepage or ufs_prepare_write,
233 * locked_page is argument of these functions, so we already lock it.
235 static void ufs_change_blocknr(struct inode
*inode
, sector_t beg
,
236 unsigned int count
, sector_t oldb
,
237 sector_t newb
, struct page
*locked_page
)
239 const unsigned blks_per_page
=
240 1 << (PAGE_CACHE_SHIFT
- inode
->i_blkbits
);
241 const unsigned mask
= blks_per_page
- 1;
242 struct address_space
* const mapping
= inode
->i_mapping
;
243 pgoff_t index
, cur_index
, last_index
;
244 unsigned pos
, j
, lblock
;
247 struct buffer_head
*head
, *bh
;
249 UFSD("ENTER, ino %lu, count %u, oldb %llu, newb %llu\n",
251 (unsigned long long)oldb
, (unsigned long long)newb
);
253 BUG_ON(!locked_page
);
254 BUG_ON(!PageLocked(locked_page
));
256 cur_index
= locked_page
->index
;
258 last_index
= end
>> (PAGE_CACHE_SHIFT
- inode
->i_blkbits
);
259 for (i
= beg
; i
< end
; i
= (i
| mask
) + 1) {
260 index
= i
>> (PAGE_CACHE_SHIFT
- inode
->i_blkbits
);
262 if (likely(cur_index
!= index
)) {
263 page
= ufs_get_locked_page(mapping
, index
);
264 if (!page
)/* it was truncated */
266 if (IS_ERR(page
)) {/* or EIO */
267 ufs_error(inode
->i_sb
, __func__
,
268 "read of page %llu failed\n",
269 (unsigned long long)index
);
275 head
= page_buffers(page
);
278 for (j
= 0; j
< pos
; ++j
)
279 bh
= bh
->b_this_page
;
282 if (unlikely(index
== last_index
))
285 lblock
= blks_per_page
;
292 if (!buffer_mapped(bh
))
293 map_bh(bh
, inode
->i_sb
, oldb
+ pos
);
294 if (!buffer_uptodate(bh
)) {
295 ll_rw_block(READ
, 1, &bh
);
297 if (!buffer_uptodate(bh
)) {
298 ufs_error(inode
->i_sb
, __func__
,
299 "read of block failed\n");
304 UFSD(" change from %llu to %llu, pos %u\n",
305 (unsigned long long)(pos
+ oldb
),
306 (unsigned long long)(pos
+ newb
), pos
);
308 bh
->b_blocknr
= newb
+ pos
;
309 unmap_underlying_metadata(bh
->b_bdev
,
311 mark_buffer_dirty(bh
);
313 bh
= bh
->b_this_page
;
314 } while (bh
!= head
);
316 if (likely(cur_index
!= index
))
317 ufs_put_locked_page(page
);
322 static void ufs_clear_frags(struct inode
*inode
, sector_t beg
, unsigned int n
,
325 struct buffer_head
*bh
;
326 sector_t end
= beg
+ n
;
328 for (; beg
< end
; ++beg
) {
329 bh
= sb_getblk(inode
->i_sb
, beg
);
331 memset(bh
->b_data
, 0, inode
->i_sb
->s_blocksize
);
332 set_buffer_uptodate(bh
);
333 mark_buffer_dirty(bh
);
335 if (IS_SYNC(inode
) || sync
)
336 sync_dirty_buffer(bh
);
341 u64
ufs_new_fragments(struct inode
*inode
, void *p
, u64 fragment
,
342 u64 goal
, unsigned count
, int *err
,
343 struct page
*locked_page
)
345 struct super_block
* sb
;
346 struct ufs_sb_private_info
* uspi
;
347 struct ufs_super_block_first
* usb1
;
348 unsigned cgno
, oldcount
, newcount
;
349 u64 tmp
, request
, result
;
351 UFSD("ENTER, ino %lu, fragment %llu, goal %llu, count %u\n",
352 inode
->i_ino
, (unsigned long long)fragment
,
353 (unsigned long long)goal
, count
);
356 uspi
= UFS_SB(sb
)->s_uspi
;
357 usb1
= ubh_get_usb_first(uspi
);
360 mutex_lock(&UFS_SB(sb
)->s_lock
);
361 tmp
= ufs_data_ptr_to_cpu(sb
, p
);
363 if (count
+ ufs_fragnum(fragment
) > uspi
->s_fpb
) {
364 ufs_warning(sb
, "ufs_new_fragments", "internal warning"
365 " fragment %llu, count %u",
366 (unsigned long long)fragment
, count
);
367 count
= uspi
->s_fpb
- ufs_fragnum(fragment
);
369 oldcount
= ufs_fragnum (fragment
);
370 newcount
= oldcount
+ count
;
373 * Somebody else has just allocated our fragments
377 ufs_error(sb
, "ufs_new_fragments", "internal error, "
378 "fragment %llu, tmp %llu\n",
379 (unsigned long long)fragment
,
380 (unsigned long long)tmp
);
381 mutex_unlock(&UFS_SB(sb
)->s_lock
);
384 if (fragment
< UFS_I(inode
)->i_lastfrag
) {
385 UFSD("EXIT (ALREADY ALLOCATED)\n");
386 mutex_unlock(&UFS_SB(sb
)->s_lock
);
392 UFSD("EXIT (ALREADY ALLOCATED)\n");
393 mutex_unlock(&UFS_SB(sb
)->s_lock
);
399 * There is not enough space for user on the device
401 if (!capable(CAP_SYS_RESOURCE
) && ufs_freespace(uspi
, UFS_MINFREE
) <= 0) {
402 mutex_unlock(&UFS_SB(sb
)->s_lock
);
403 UFSD("EXIT (FAILED)\n");
407 if (goal
>= uspi
->s_size
)
410 cgno
= ufs_inotocg (inode
->i_ino
);
412 cgno
= ufs_dtog(uspi
, goal
);
415 * allocate new fragment
418 result
= ufs_alloc_fragments (inode
, cgno
, goal
, count
, err
);
420 ufs_cpu_to_data_ptr(sb
, p
, result
);
422 UFS_I(inode
)->i_lastfrag
=
423 max(UFS_I(inode
)->i_lastfrag
, fragment
+ count
);
424 ufs_clear_frags(inode
, result
+ oldcount
,
425 newcount
- oldcount
, locked_page
!= NULL
);
427 mutex_unlock(&UFS_SB(sb
)->s_lock
);
428 UFSD("EXIT, result %llu\n", (unsigned long long)result
);
435 result
= ufs_add_fragments(inode
, tmp
, oldcount
, newcount
);
438 UFS_I(inode
)->i_lastfrag
= max(UFS_I(inode
)->i_lastfrag
,
440 ufs_clear_frags(inode
, result
+ oldcount
, newcount
- oldcount
,
441 locked_page
!= NULL
);
442 mutex_unlock(&UFS_SB(sb
)->s_lock
);
443 UFSD("EXIT, result %llu\n", (unsigned long long)result
);
448 * allocate new block and move data
450 switch (fs32_to_cpu(sb
, usb1
->fs_optim
)) {
453 if (uspi
->s_minfree
< 5 || uspi
->cs_total
.cs_nffree
454 > uspi
->s_dsize
* uspi
->s_minfree
/ (2 * 100))
456 usb1
->fs_optim
= cpu_to_fs32(sb
, UFS_OPTTIME
);
459 usb1
->fs_optim
= cpu_to_fs32(sb
, UFS_OPTTIME
);
462 request
= uspi
->s_fpb
;
463 if (uspi
->cs_total
.cs_nffree
< uspi
->s_dsize
*
464 (uspi
->s_minfree
- 2) / 100)
466 usb1
->fs_optim
= cpu_to_fs32(sb
, UFS_OPTTIME
);
469 result
= ufs_alloc_fragments (inode
, cgno
, goal
, request
, err
);
471 ufs_clear_frags(inode
, result
+ oldcount
, newcount
- oldcount
,
472 locked_page
!= NULL
);
473 ufs_change_blocknr(inode
, fragment
- oldcount
, oldcount
,
474 uspi
->s_sbbase
+ tmp
,
475 uspi
->s_sbbase
+ result
, locked_page
);
476 ufs_cpu_to_data_ptr(sb
, p
, result
);
478 UFS_I(inode
)->i_lastfrag
= max(UFS_I(inode
)->i_lastfrag
,
480 mutex_unlock(&UFS_SB(sb
)->s_lock
);
481 if (newcount
< request
)
482 ufs_free_fragments (inode
, result
+ newcount
, request
- newcount
);
483 ufs_free_fragments (inode
, tmp
, oldcount
);
484 UFSD("EXIT, result %llu\n", (unsigned long long)result
);
488 mutex_unlock(&UFS_SB(sb
)->s_lock
);
489 UFSD("EXIT (FAILED)\n");
493 static u64
ufs_add_fragments(struct inode
*inode
, u64 fragment
,
494 unsigned oldcount
, unsigned newcount
)
496 struct super_block
* sb
;
497 struct ufs_sb_private_info
* uspi
;
498 struct ufs_cg_private_info
* ucpi
;
499 struct ufs_cylinder_group
* ucg
;
500 unsigned cgno
, fragno
, fragoff
, count
, fragsize
, i
;
502 UFSD("ENTER, fragment %llu, oldcount %u, newcount %u\n",
503 (unsigned long long)fragment
, oldcount
, newcount
);
506 uspi
= UFS_SB(sb
)->s_uspi
;
507 count
= newcount
- oldcount
;
509 cgno
= ufs_dtog(uspi
, fragment
);
510 if (fs32_to_cpu(sb
, UFS_SB(sb
)->fs_cs(cgno
).cs_nffree
) < count
)
512 if ((ufs_fragnum (fragment
) + newcount
) > uspi
->s_fpb
)
514 ucpi
= ufs_load_cylinder (sb
, cgno
);
517 ucg
= ubh_get_ucg (UCPI_UBH(ucpi
));
518 if (!ufs_cg_chkmagic(sb
, ucg
)) {
519 ufs_panic (sb
, "ufs_add_fragments",
520 "internal error, bad magic number on cg %u", cgno
);
524 fragno
= ufs_dtogd(uspi
, fragment
);
525 fragoff
= ufs_fragnum (fragno
);
526 for (i
= oldcount
; i
< newcount
; i
++)
527 if (ubh_isclr (UCPI_UBH(ucpi
), ucpi
->c_freeoff
, fragno
+ i
))
530 * Block can be extended
532 ucg
->cg_time
= cpu_to_fs32(sb
, get_seconds());
533 for (i
= newcount
; i
< (uspi
->s_fpb
- fragoff
); i
++)
534 if (ubh_isclr (UCPI_UBH(ucpi
), ucpi
->c_freeoff
, fragno
+ i
))
536 fragsize
= i
- oldcount
;
537 if (!fs32_to_cpu(sb
, ucg
->cg_frsum
[fragsize
]))
538 ufs_panic (sb
, "ufs_add_fragments",
539 "internal error or corrupted bitmap on cg %u", cgno
);
540 fs32_sub(sb
, &ucg
->cg_frsum
[fragsize
], 1);
541 if (fragsize
!= count
)
542 fs32_add(sb
, &ucg
->cg_frsum
[fragsize
- count
], 1);
543 for (i
= oldcount
; i
< newcount
; i
++)
544 ubh_clrbit (UCPI_UBH(ucpi
), ucpi
->c_freeoff
, fragno
+ i
);
546 fs32_sub(sb
, &ucg
->cg_cs
.cs_nffree
, count
);
547 fs32_sub(sb
, &UFS_SB(sb
)->fs_cs(cgno
).cs_nffree
, count
);
548 uspi
->cs_total
.cs_nffree
-= count
;
550 ubh_mark_buffer_dirty (USPI_UBH(uspi
));
551 ubh_mark_buffer_dirty (UCPI_UBH(ucpi
));
552 if (sb
->s_flags
& MS_SYNCHRONOUS
)
553 ubh_sync_block(UCPI_UBH(ucpi
));
554 ufs_mark_sb_dirty(sb
);
556 UFSD("EXIT, fragment %llu\n", (unsigned long long)fragment
);
561 #define UFS_TEST_FREE_SPACE_CG \
562 ucg = (struct ufs_cylinder_group *) UFS_SB(sb)->s_ucg[cgno]->b_data; \
563 if (fs32_to_cpu(sb, ucg->cg_cs.cs_nbfree)) \
565 for (k = count; k < uspi->s_fpb; k++) \
566 if (fs32_to_cpu(sb, ucg->cg_frsum[k])) \
569 static u64
ufs_alloc_fragments(struct inode
*inode
, unsigned cgno
,
570 u64 goal
, unsigned count
, int *err
)
572 struct super_block
* sb
;
573 struct ufs_sb_private_info
* uspi
;
574 struct ufs_cg_private_info
* ucpi
;
575 struct ufs_cylinder_group
* ucg
;
576 unsigned oldcg
, i
, j
, k
, allocsize
;
579 UFSD("ENTER, ino %lu, cgno %u, goal %llu, count %u\n",
580 inode
->i_ino
, cgno
, (unsigned long long)goal
, count
);
583 uspi
= UFS_SB(sb
)->s_uspi
;
587 * 1. searching on preferred cylinder group
589 UFS_TEST_FREE_SPACE_CG
592 * 2. quadratic rehash
594 for (j
= 1; j
< uspi
->s_ncg
; j
*= 2) {
596 if (cgno
>= uspi
->s_ncg
)
598 UFS_TEST_FREE_SPACE_CG
602 * 3. brute force search
603 * We start at i = 2 ( 0 is checked at 1.step, 1 at 2.step )
605 cgno
= (oldcg
+ 1) % uspi
->s_ncg
;
606 for (j
= 2; j
< uspi
->s_ncg
; j
++) {
608 if (cgno
>= uspi
->s_ncg
)
610 UFS_TEST_FREE_SPACE_CG
613 UFSD("EXIT (FAILED)\n");
617 ucpi
= ufs_load_cylinder (sb
, cgno
);
620 ucg
= ubh_get_ucg (UCPI_UBH(ucpi
));
621 if (!ufs_cg_chkmagic(sb
, ucg
))
622 ufs_panic (sb
, "ufs_alloc_fragments",
623 "internal error, bad magic number on cg %u", cgno
);
624 ucg
->cg_time
= cpu_to_fs32(sb
, get_seconds());
626 if (count
== uspi
->s_fpb
) {
627 result
= ufs_alloccg_block (inode
, ucpi
, goal
, err
);
628 if (result
== INVBLOCK
)
633 for (allocsize
= count
; allocsize
< uspi
->s_fpb
; allocsize
++)
634 if (fs32_to_cpu(sb
, ucg
->cg_frsum
[allocsize
]) != 0)
637 if (allocsize
== uspi
->s_fpb
) {
638 result
= ufs_alloccg_block (inode
, ucpi
, goal
, err
);
639 if (result
== INVBLOCK
)
641 goal
= ufs_dtogd(uspi
, result
);
642 for (i
= count
; i
< uspi
->s_fpb
; i
++)
643 ubh_setbit (UCPI_UBH(ucpi
), ucpi
->c_freeoff
, goal
+ i
);
644 i
= uspi
->s_fpb
- count
;
646 fs32_add(sb
, &ucg
->cg_cs
.cs_nffree
, i
);
647 uspi
->cs_total
.cs_nffree
+= i
;
648 fs32_add(sb
, &UFS_SB(sb
)->fs_cs(cgno
).cs_nffree
, i
);
649 fs32_add(sb
, &ucg
->cg_frsum
[i
], 1);
653 result
= ufs_bitmap_search (sb
, ucpi
, goal
, allocsize
);
654 if (result
== INVBLOCK
)
656 for (i
= 0; i
< count
; i
++)
657 ubh_clrbit (UCPI_UBH(ucpi
), ucpi
->c_freeoff
, result
+ i
);
659 fs32_sub(sb
, &ucg
->cg_cs
.cs_nffree
, count
);
660 uspi
->cs_total
.cs_nffree
-= count
;
661 fs32_sub(sb
, &UFS_SB(sb
)->fs_cs(cgno
).cs_nffree
, count
);
662 fs32_sub(sb
, &ucg
->cg_frsum
[allocsize
], 1);
664 if (count
!= allocsize
)
665 fs32_add(sb
, &ucg
->cg_frsum
[allocsize
- count
], 1);
668 ubh_mark_buffer_dirty (USPI_UBH(uspi
));
669 ubh_mark_buffer_dirty (UCPI_UBH(ucpi
));
670 if (sb
->s_flags
& MS_SYNCHRONOUS
)
671 ubh_sync_block(UCPI_UBH(ucpi
));
672 ufs_mark_sb_dirty(sb
);
674 result
+= cgno
* uspi
->s_fpg
;
675 UFSD("EXIT3, result %llu\n", (unsigned long long)result
);
679 static u64
ufs_alloccg_block(struct inode
*inode
,
680 struct ufs_cg_private_info
*ucpi
,
683 struct super_block
* sb
;
684 struct ufs_sb_private_info
* uspi
;
685 struct ufs_cylinder_group
* ucg
;
688 UFSD("ENTER, goal %llu\n", (unsigned long long)goal
);
691 uspi
= UFS_SB(sb
)->s_uspi
;
692 ucg
= ubh_get_ucg(UCPI_UBH(ucpi
));
695 goal
= ucpi
->c_rotor
;
698 goal
= ufs_blknum (goal
);
699 goal
= ufs_dtogd(uspi
, goal
);
702 * If the requested block is available, use it.
704 if (ubh_isblockset(UCPI_UBH(ucpi
), ucpi
->c_freeoff
, ufs_fragstoblks(goal
))) {
710 result
= ufs_bitmap_search (sb
, ucpi
, goal
, uspi
->s_fpb
);
711 if (result
== INVBLOCK
)
713 ucpi
->c_rotor
= result
;
715 blkno
= ufs_fragstoblks(result
);
716 ubh_clrblock (UCPI_UBH(ucpi
), ucpi
->c_freeoff
, blkno
);
717 if ((UFS_SB(sb
)->s_flags
& UFS_CG_MASK
) == UFS_CG_44BSD
)
718 ufs_clusteracct (sb
, ucpi
, blkno
, -1);
720 fs32_sub(sb
, &ucg
->cg_cs
.cs_nbfree
, 1);
721 uspi
->cs_total
.cs_nbfree
--;
722 fs32_sub(sb
, &UFS_SB(sb
)->fs_cs(ucpi
->c_cgx
).cs_nbfree
, 1);
724 if (uspi
->fs_magic
!= UFS2_MAGIC
) {
725 unsigned cylno
= ufs_cbtocylno((unsigned)result
);
727 fs16_sub(sb
, &ubh_cg_blks(ucpi
, cylno
,
728 ufs_cbtorpos((unsigned)result
)), 1);
729 fs32_sub(sb
, &ubh_cg_blktot(ucpi
, cylno
), 1);
732 UFSD("EXIT, result %llu\n", (unsigned long long)result
);
737 static unsigned ubh_scanc(struct ufs_sb_private_info
*uspi
,
738 struct ufs_buffer_head
*ubh
,
739 unsigned begin
, unsigned size
,
740 unsigned char *table
, unsigned char mask
)
742 unsigned rest
, offset
;
746 offset
= begin
& ~uspi
->s_fmask
;
747 begin
>>= uspi
->s_fshift
;
749 if ((offset
+ size
) < uspi
->s_fsize
)
752 rest
= uspi
->s_fsize
- offset
;
754 cp
= ubh
->bh
[begin
]->b_data
+ offset
;
755 while ((table
[*cp
++] & mask
) == 0 && --rest
)
762 return (size
+ rest
);
766 * Find a block of the specified size in the specified cylinder group.
767 * @sp: pointer to super block
768 * @ucpi: pointer to cylinder group info
769 * @goal: near which block we want find new one
770 * @count: specified size
772 static u64
ufs_bitmap_search(struct super_block
*sb
,
773 struct ufs_cg_private_info
*ucpi
,
774 u64 goal
, unsigned count
)
777 * Bit patterns for identifying fragments in the block map
778 * used as ((map & mask_arr) == want_arr)
780 static const int mask_arr
[9] = {
781 0x3, 0x7, 0xf, 0x1f, 0x3f, 0x7f, 0xff, 0x1ff, 0x3ff
783 static const int want_arr
[9] = {
784 0x0, 0x2, 0x6, 0xe, 0x1e, 0x3e, 0x7e, 0xfe, 0x1fe
786 struct ufs_sb_private_info
*uspi
= UFS_SB(sb
)->s_uspi
;
787 unsigned start
, length
, loc
;
788 unsigned pos
, want
, blockmap
, mask
, end
;
791 UFSD("ENTER, cg %u, goal %llu, count %u\n", ucpi
->c_cgx
,
792 (unsigned long long)goal
, count
);
795 start
= ufs_dtogd(uspi
, goal
) >> 3;
797 start
= ucpi
->c_frotor
>> 3;
799 length
= ((uspi
->s_fpg
+ 7) >> 3) - start
;
800 loc
= ubh_scanc(uspi
, UCPI_UBH(ucpi
), ucpi
->c_freeoff
+ start
, length
,
801 (uspi
->s_fpb
== 8) ? ufs_fragtable_8fpb
: ufs_fragtable_other
,
802 1 << (count
- 1 + (uspi
->s_fpb
& 7)));
805 loc
= ubh_scanc(uspi
, UCPI_UBH(ucpi
), ucpi
->c_freeoff
, length
,
806 (uspi
->s_fpb
== 8) ? ufs_fragtable_8fpb
:
808 1 << (count
- 1 + (uspi
->s_fpb
& 7)));
810 ufs_error(sb
, "ufs_bitmap_search",
811 "bitmap corrupted on cg %u, start %u,"
812 " length %u, count %u, freeoff %u\n",
813 ucpi
->c_cgx
, start
, length
, count
,
819 result
= (start
+ length
- loc
) << 3;
820 ucpi
->c_frotor
= result
;
823 * found the byte in the map
826 for (end
= result
+ 8; result
< end
; result
+= uspi
->s_fpb
) {
827 blockmap
= ubh_blkmap(UCPI_UBH(ucpi
), ucpi
->c_freeoff
, result
);
829 mask
= mask_arr
[count
];
830 want
= want_arr
[count
];
831 for (pos
= 0; pos
<= uspi
->s_fpb
- count
; pos
++) {
832 if ((blockmap
& mask
) == want
) {
833 UFSD("EXIT, result %llu\n",
834 (unsigned long long)result
);
842 ufs_error(sb
, "ufs_bitmap_search", "block not in map on cg %u\n",
844 UFSD("EXIT (FAILED)\n");
848 static void ufs_clusteracct(struct super_block
* sb
,
849 struct ufs_cg_private_info
* ucpi
, unsigned blkno
, int cnt
)
851 struct ufs_sb_private_info
* uspi
;
852 int i
, start
, end
, forw
, back
;
854 uspi
= UFS_SB(sb
)->s_uspi
;
855 if (uspi
->s_contigsumsize
<= 0)
859 ubh_setbit(UCPI_UBH(ucpi
), ucpi
->c_clusteroff
, blkno
);
861 ubh_clrbit(UCPI_UBH(ucpi
), ucpi
->c_clusteroff
, blkno
);
864 * Find the size of the cluster going forward.
867 end
= start
+ uspi
->s_contigsumsize
;
868 if ( end
>= ucpi
->c_nclusterblks
)
869 end
= ucpi
->c_nclusterblks
;
870 i
= ubh_find_next_zero_bit (UCPI_UBH(ucpi
), ucpi
->c_clusteroff
, end
, start
);
876 * Find the size of the cluster going backward.
879 end
= start
- uspi
->s_contigsumsize
;
882 i
= ubh_find_last_zero_bit (UCPI_UBH(ucpi
), ucpi
->c_clusteroff
, start
, end
);
888 * Account for old cluster and the possibly new forward and
892 if (i
> uspi
->s_contigsumsize
)
893 i
= uspi
->s_contigsumsize
;
894 fs32_add(sb
, (__fs32
*)ubh_get_addr(UCPI_UBH(ucpi
), ucpi
->c_clustersumoff
+ (i
<< 2)), cnt
);
896 fs32_sub(sb
, (__fs32
*)ubh_get_addr(UCPI_UBH(ucpi
), ucpi
->c_clustersumoff
+ (back
<< 2)), cnt
);
898 fs32_sub(sb
, (__fs32
*)ubh_get_addr(UCPI_UBH(ucpi
), ucpi
->c_clustersumoff
+ (forw
<< 2)), cnt
);
902 static unsigned char ufs_fragtable_8fpb
[] = {
903 0x00, 0x01, 0x01, 0x02, 0x01, 0x01, 0x02, 0x04, 0x01, 0x01, 0x01, 0x03, 0x02, 0x03, 0x04, 0x08,
904 0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x02, 0x03, 0x03, 0x02, 0x04, 0x05, 0x08, 0x10,
905 0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x01, 0x01, 0x01, 0x03, 0x03, 0x03, 0x05, 0x09,
906 0x02, 0x03, 0x03, 0x02, 0x03, 0x03, 0x02, 0x06, 0x04, 0x05, 0x05, 0x06, 0x08, 0x09, 0x10, 0x20,
907 0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x01, 0x01, 0x01, 0x03, 0x03, 0x03, 0x05, 0x09,
908 0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x03, 0x03, 0x03, 0x03, 0x05, 0x05, 0x09, 0x11,
909 0x02, 0x03, 0x03, 0x02, 0x03, 0x03, 0x02, 0x06, 0x03, 0x03, 0x03, 0x03, 0x02, 0x03, 0x06, 0x0A,
910 0x04, 0x05, 0x05, 0x06, 0x05, 0x05, 0x06, 0x04, 0x08, 0x09, 0x09, 0x0A, 0x10, 0x11, 0x20, 0x40,
911 0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x01, 0x01, 0x01, 0x03, 0x03, 0x03, 0x05, 0x09,
912 0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x03, 0x03, 0x03, 0x03, 0x05, 0x05, 0x09, 0x11,
913 0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x01, 0x01, 0x01, 0x03, 0x03, 0x03, 0x05, 0x09,
914 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x07, 0x05, 0x05, 0x05, 0x07, 0x09, 0x09, 0x11, 0x21,
915 0x02, 0x03, 0x03, 0x02, 0x03, 0x03, 0x02, 0x06, 0x03, 0x03, 0x03, 0x03, 0x02, 0x03, 0x06, 0x0A,
916 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x07, 0x02, 0x03, 0x03, 0x02, 0x06, 0x07, 0x0A, 0x12,
917 0x04, 0x05, 0x05, 0x06, 0x05, 0x05, 0x06, 0x04, 0x05, 0x05, 0x05, 0x07, 0x06, 0x07, 0x04, 0x0C,
918 0x08, 0x09, 0x09, 0x0A, 0x09, 0x09, 0x0A, 0x0C, 0x10, 0x11, 0x11, 0x12, 0x20, 0x21, 0x40, 0x80,
921 static unsigned char ufs_fragtable_other
[] = {
922 0x00, 0x16, 0x16, 0x2A, 0x16, 0x16, 0x26, 0x4E, 0x16, 0x16, 0x16, 0x3E, 0x2A, 0x3E, 0x4E, 0x8A,
923 0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
924 0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
925 0x2A, 0x3E, 0x3E, 0x2A, 0x3E, 0x3E, 0x2E, 0x6E, 0x3E, 0x3E, 0x3E, 0x3E, 0x2A, 0x3E, 0x6E, 0xAA,
926 0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
927 0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
928 0x26, 0x36, 0x36, 0x2E, 0x36, 0x36, 0x26, 0x6E, 0x36, 0x36, 0x36, 0x3E, 0x2E, 0x3E, 0x6E, 0xAE,
929 0x4E, 0x5E, 0x5E, 0x6E, 0x5E, 0x5E, 0x6E, 0x4E, 0x5E, 0x5E, 0x5E, 0x7E, 0x6E, 0x7E, 0x4E, 0xCE,
930 0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
931 0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
932 0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
933 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x7E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x7E, 0xBE,
934 0x2A, 0x3E, 0x3E, 0x2A, 0x3E, 0x3E, 0x2E, 0x6E, 0x3E, 0x3E, 0x3E, 0x3E, 0x2A, 0x3E, 0x6E, 0xAA,
935 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x7E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x7E, 0xBE,
936 0x4E, 0x5E, 0x5E, 0x6E, 0x5E, 0x5E, 0x6E, 0x4E, 0x5E, 0x5E, 0x5E, 0x7E, 0x6E, 0x7E, 0x4E, 0xCE,
937 0x8A, 0x9E, 0x9E, 0xAA, 0x9E, 0x9E, 0xAE, 0xCE, 0x9E, 0x9E, 0x9E, 0xBE, 0xAA, 0xBE, 0xCE, 0x8A,