2 * linux/fs/ufs/balloc.c
5 * Daniel Pirkl <daniel.pirkl@email.cz>
6 * Charles University, Faculty of Mathematics and Physics
8 * UFS2 write support Evgeniy Dushistov <dushistov@mail.ru>, 2007
12 #include <linux/stat.h>
13 #include <linux/time.h>
14 #include <linux/string.h>
15 #include <linux/buffer_head.h>
16 #include <linux/capability.h>
17 #include <linux/bitops.h>
18 #include <linux/bio.h>
19 #include <asm/byteorder.h>
26 #define INVBLOCK ((u64)-1L)
28 static u64
ufs_add_fragments(struct inode
*, u64
, unsigned, unsigned);
29 static u64
ufs_alloc_fragments(struct inode
*, unsigned, u64
, unsigned, int *);
30 static u64
ufs_alloccg_block(struct inode
*, struct ufs_cg_private_info
*, u64
, int *);
31 static u64
ufs_bitmap_search (struct super_block
*, struct ufs_cg_private_info
*, u64
, unsigned);
32 static unsigned char ufs_fragtable_8fpb
[], ufs_fragtable_other
[];
33 static void ufs_clusteracct(struct super_block
*, struct ufs_cg_private_info
*, unsigned, int);
36 * Free 'count' fragments from fragment number 'fragment'
38 void ufs_free_fragments(struct inode
*inode
, u64 fragment
, unsigned count
)
40 struct super_block
* sb
;
41 struct ufs_sb_private_info
* uspi
;
42 struct ufs_cg_private_info
* ucpi
;
43 struct ufs_cylinder_group
* ucg
;
44 unsigned cgno
, bit
, end_bit
, bbase
, blkmap
, i
;
48 uspi
= UFS_SB(sb
)->s_uspi
;
50 UFSD("ENTER, fragment %llu, count %u\n",
51 (unsigned long long)fragment
, count
);
53 if (ufs_fragnum(fragment
) + count
> uspi
->s_fpg
)
54 ufs_error (sb
, "ufs_free_fragments", "internal error");
56 mutex_lock(&UFS_SB(sb
)->s_lock
);
58 cgno
= ufs_dtog(uspi
, fragment
);
59 bit
= ufs_dtogd(uspi
, fragment
);
60 if (cgno
>= uspi
->s_ncg
) {
61 ufs_panic (sb
, "ufs_free_fragments", "freeing blocks are outside device");
65 ucpi
= ufs_load_cylinder (sb
, cgno
);
68 ucg
= ubh_get_ucg (UCPI_UBH(ucpi
));
69 if (!ufs_cg_chkmagic(sb
, ucg
)) {
70 ufs_panic (sb
, "ufs_free_fragments", "internal error, bad magic number on cg %u", cgno
);
74 end_bit
= bit
+ count
;
75 bbase
= ufs_blknum (bit
);
76 blkmap
= ubh_blkmap (UCPI_UBH(ucpi
), ucpi
->c_freeoff
, bbase
);
77 ufs_fragacct (sb
, blkmap
, ucg
->cg_frsum
, -1);
78 for (i
= bit
; i
< end_bit
; i
++) {
79 if (ubh_isclr (UCPI_UBH(ucpi
), ucpi
->c_freeoff
, i
))
80 ubh_setbit (UCPI_UBH(ucpi
), ucpi
->c_freeoff
, i
);
82 ufs_error (sb
, "ufs_free_fragments",
83 "bit already cleared for fragment %u", i
);
86 fs32_add(sb
, &ucg
->cg_cs
.cs_nffree
, count
);
87 uspi
->cs_total
.cs_nffree
+= count
;
88 fs32_add(sb
, &UFS_SB(sb
)->fs_cs(cgno
).cs_nffree
, count
);
89 blkmap
= ubh_blkmap (UCPI_UBH(ucpi
), ucpi
->c_freeoff
, bbase
);
90 ufs_fragacct(sb
, blkmap
, ucg
->cg_frsum
, 1);
93 * Trying to reassemble free fragments into block
95 blkno
= ufs_fragstoblks (bbase
);
96 if (ubh_isblockset(UCPI_UBH(ucpi
), ucpi
->c_freeoff
, blkno
)) {
97 fs32_sub(sb
, &ucg
->cg_cs
.cs_nffree
, uspi
->s_fpb
);
98 uspi
->cs_total
.cs_nffree
-= uspi
->s_fpb
;
99 fs32_sub(sb
, &UFS_SB(sb
)->fs_cs(cgno
).cs_nffree
, uspi
->s_fpb
);
100 if ((UFS_SB(sb
)->s_flags
& UFS_CG_MASK
) == UFS_CG_44BSD
)
101 ufs_clusteracct (sb
, ucpi
, blkno
, 1);
102 fs32_add(sb
, &ucg
->cg_cs
.cs_nbfree
, 1);
103 uspi
->cs_total
.cs_nbfree
++;
104 fs32_add(sb
, &UFS_SB(sb
)->fs_cs(cgno
).cs_nbfree
, 1);
105 if (uspi
->fs_magic
!= UFS2_MAGIC
) {
106 unsigned cylno
= ufs_cbtocylno (bbase
);
108 fs16_add(sb
, &ubh_cg_blks(ucpi
, cylno
,
109 ufs_cbtorpos(bbase
)), 1);
110 fs32_add(sb
, &ubh_cg_blktot(ucpi
, cylno
), 1);
114 ubh_mark_buffer_dirty (USPI_UBH(uspi
));
115 ubh_mark_buffer_dirty (UCPI_UBH(ucpi
));
116 if (sb
->s_flags
& MS_SYNCHRONOUS
)
117 ubh_sync_block(UCPI_UBH(ucpi
));
118 ufs_mark_sb_dirty(sb
);
120 mutex_unlock(&UFS_SB(sb
)->s_lock
);
125 mutex_unlock(&UFS_SB(sb
)->s_lock
);
126 UFSD("EXIT (FAILED)\n");
131 * Free 'count' fragments from fragment number 'fragment' (free whole blocks)
133 void ufs_free_blocks(struct inode
*inode
, u64 fragment
, unsigned count
)
135 struct super_block
* sb
;
136 struct ufs_sb_private_info
* uspi
;
137 struct ufs_cg_private_info
* ucpi
;
138 struct ufs_cylinder_group
* ucg
;
139 unsigned overflow
, cgno
, bit
, end_bit
, i
;
143 uspi
= UFS_SB(sb
)->s_uspi
;
145 UFSD("ENTER, fragment %llu, count %u\n",
146 (unsigned long long)fragment
, count
);
148 if ((fragment
& uspi
->s_fpbmask
) || (count
& uspi
->s_fpbmask
)) {
149 ufs_error (sb
, "ufs_free_blocks", "internal error, "
150 "fragment %llu, count %u\n",
151 (unsigned long long)fragment
, count
);
155 mutex_lock(&UFS_SB(sb
)->s_lock
);
159 cgno
= ufs_dtog(uspi
, fragment
);
160 bit
= ufs_dtogd(uspi
, fragment
);
161 if (cgno
>= uspi
->s_ncg
) {
162 ufs_panic (sb
, "ufs_free_blocks", "freeing blocks are outside device");
165 end_bit
= bit
+ count
;
166 if (end_bit
> uspi
->s_fpg
) {
167 overflow
= bit
+ count
- uspi
->s_fpg
;
172 ucpi
= ufs_load_cylinder (sb
, cgno
);
175 ucg
= ubh_get_ucg (UCPI_UBH(ucpi
));
176 if (!ufs_cg_chkmagic(sb
, ucg
)) {
177 ufs_panic (sb
, "ufs_free_blocks", "internal error, bad magic number on cg %u", cgno
);
181 for (i
= bit
; i
< end_bit
; i
+= uspi
->s_fpb
) {
182 blkno
= ufs_fragstoblks(i
);
183 if (ubh_isblockset(UCPI_UBH(ucpi
), ucpi
->c_freeoff
, blkno
)) {
184 ufs_error(sb
, "ufs_free_blocks", "freeing free fragment");
186 ubh_setblock(UCPI_UBH(ucpi
), ucpi
->c_freeoff
, blkno
);
187 if ((UFS_SB(sb
)->s_flags
& UFS_CG_MASK
) == UFS_CG_44BSD
)
188 ufs_clusteracct (sb
, ucpi
, blkno
, 1);
190 fs32_add(sb
, &ucg
->cg_cs
.cs_nbfree
, 1);
191 uspi
->cs_total
.cs_nbfree
++;
192 fs32_add(sb
, &UFS_SB(sb
)->fs_cs(cgno
).cs_nbfree
, 1);
194 if (uspi
->fs_magic
!= UFS2_MAGIC
) {
195 unsigned cylno
= ufs_cbtocylno(i
);
197 fs16_add(sb
, &ubh_cg_blks(ucpi
, cylno
,
198 ufs_cbtorpos(i
)), 1);
199 fs32_add(sb
, &ubh_cg_blktot(ucpi
, cylno
), 1);
203 ubh_mark_buffer_dirty (USPI_UBH(uspi
));
204 ubh_mark_buffer_dirty (UCPI_UBH(ucpi
));
205 if (sb
->s_flags
& MS_SYNCHRONOUS
)
206 ubh_sync_block(UCPI_UBH(ucpi
));
214 ufs_mark_sb_dirty(sb
);
215 mutex_unlock(&UFS_SB(sb
)->s_lock
);
220 mutex_unlock(&UFS_SB(sb
)->s_lock
);
222 UFSD("EXIT (FAILED)\n");
227 * Modify inode page cache in such way:
228 * have - blocks with b_blocknr equal to oldb...oldb+count-1
229 * get - blocks with b_blocknr equal to newb...newb+count-1
230 * also we suppose that oldb...oldb+count-1 blocks
231 * situated at the end of file.
233 * We can come here from ufs_writepage or ufs_prepare_write,
234 * locked_page is argument of these functions, so we already lock it.
236 static void ufs_change_blocknr(struct inode
*inode
, sector_t beg
,
237 unsigned int count
, sector_t oldb
,
238 sector_t newb
, struct page
*locked_page
)
240 const unsigned blks_per_page
=
241 1 << (PAGE_SHIFT
- inode
->i_blkbits
);
242 const unsigned mask
= blks_per_page
- 1;
243 struct address_space
* const mapping
= inode
->i_mapping
;
244 pgoff_t index
, cur_index
, last_index
;
245 unsigned pos
, j
, lblock
;
248 struct buffer_head
*head
, *bh
;
250 UFSD("ENTER, ino %lu, count %u, oldb %llu, newb %llu\n",
252 (unsigned long long)oldb
, (unsigned long long)newb
);
254 BUG_ON(!locked_page
);
255 BUG_ON(!PageLocked(locked_page
));
257 cur_index
= locked_page
->index
;
259 last_index
= end
>> (PAGE_SHIFT
- inode
->i_blkbits
);
260 for (i
= beg
; i
< end
; i
= (i
| mask
) + 1) {
261 index
= i
>> (PAGE_SHIFT
- inode
->i_blkbits
);
263 if (likely(cur_index
!= index
)) {
264 page
= ufs_get_locked_page(mapping
, index
);
265 if (!page
)/* it was truncated */
267 if (IS_ERR(page
)) {/* or EIO */
268 ufs_error(inode
->i_sb
, __func__
,
269 "read of page %llu failed\n",
270 (unsigned long long)index
);
276 head
= page_buffers(page
);
279 for (j
= 0; j
< pos
; ++j
)
280 bh
= bh
->b_this_page
;
283 if (unlikely(index
== last_index
))
286 lblock
= blks_per_page
;
293 if (!buffer_mapped(bh
))
294 map_bh(bh
, inode
->i_sb
, oldb
+ pos
);
295 if (!buffer_uptodate(bh
)) {
296 ll_rw_block(REQ_OP_READ
, 0, 1, &bh
);
298 if (!buffer_uptodate(bh
)) {
299 ufs_error(inode
->i_sb
, __func__
,
300 "read of block failed\n");
305 UFSD(" change from %llu to %llu, pos %u\n",
306 (unsigned long long)(pos
+ oldb
),
307 (unsigned long long)(pos
+ newb
), pos
);
309 bh
->b_blocknr
= newb
+ pos
;
310 clean_bdev_bh_alias(bh
);
311 mark_buffer_dirty(bh
);
313 bh
= bh
->b_this_page
;
314 } while (bh
!= head
);
316 if (likely(cur_index
!= index
))
317 ufs_put_locked_page(page
);
322 static void ufs_clear_frags(struct inode
*inode
, sector_t beg
, unsigned int n
,
325 struct buffer_head
*bh
;
326 sector_t end
= beg
+ n
;
328 for (; beg
< end
; ++beg
) {
329 bh
= sb_getblk(inode
->i_sb
, beg
);
331 memset(bh
->b_data
, 0, inode
->i_sb
->s_blocksize
);
332 set_buffer_uptodate(bh
);
333 mark_buffer_dirty(bh
);
335 if (IS_SYNC(inode
) || sync
)
336 sync_dirty_buffer(bh
);
341 u64
ufs_new_fragments(struct inode
*inode
, void *p
, u64 fragment
,
342 u64 goal
, unsigned count
, int *err
,
343 struct page
*locked_page
)
345 struct super_block
* sb
;
346 struct ufs_sb_private_info
* uspi
;
347 struct ufs_super_block_first
* usb1
;
348 unsigned cgno
, oldcount
, newcount
;
349 u64 tmp
, request
, result
;
351 UFSD("ENTER, ino %lu, fragment %llu, goal %llu, count %u\n",
352 inode
->i_ino
, (unsigned long long)fragment
,
353 (unsigned long long)goal
, count
);
356 uspi
= UFS_SB(sb
)->s_uspi
;
357 usb1
= ubh_get_usb_first(uspi
);
360 mutex_lock(&UFS_SB(sb
)->s_lock
);
361 tmp
= ufs_data_ptr_to_cpu(sb
, p
);
363 if (count
+ ufs_fragnum(fragment
) > uspi
->s_fpb
) {
364 ufs_warning(sb
, "ufs_new_fragments", "internal warning"
365 " fragment %llu, count %u",
366 (unsigned long long)fragment
, count
);
367 count
= uspi
->s_fpb
- ufs_fragnum(fragment
);
369 oldcount
= ufs_fragnum (fragment
);
370 newcount
= oldcount
+ count
;
373 * Somebody else has just allocated our fragments
377 ufs_error(sb
, "ufs_new_fragments", "internal error, "
378 "fragment %llu, tmp %llu\n",
379 (unsigned long long)fragment
,
380 (unsigned long long)tmp
);
381 mutex_unlock(&UFS_SB(sb
)->s_lock
);
384 if (fragment
< UFS_I(inode
)->i_lastfrag
) {
385 UFSD("EXIT (ALREADY ALLOCATED)\n");
386 mutex_unlock(&UFS_SB(sb
)->s_lock
);
392 UFSD("EXIT (ALREADY ALLOCATED)\n");
393 mutex_unlock(&UFS_SB(sb
)->s_lock
);
399 * There is not enough space for user on the device
401 if (!capable(CAP_SYS_RESOURCE
) && ufs_freespace(uspi
, UFS_MINFREE
) <= 0) {
402 mutex_unlock(&UFS_SB(sb
)->s_lock
);
403 UFSD("EXIT (FAILED)\n");
407 if (goal
>= uspi
->s_size
)
410 cgno
= ufs_inotocg (inode
->i_ino
);
412 cgno
= ufs_dtog(uspi
, goal
);
415 * allocate new fragment
418 result
= ufs_alloc_fragments (inode
, cgno
, goal
, count
, err
);
420 ufs_clear_frags(inode
, result
+ oldcount
,
421 newcount
- oldcount
, locked_page
!= NULL
);
422 write_seqlock(&UFS_I(inode
)->meta_lock
);
423 ufs_cpu_to_data_ptr(sb
, p
, result
);
424 write_sequnlock(&UFS_I(inode
)->meta_lock
);
426 UFS_I(inode
)->i_lastfrag
=
427 max(UFS_I(inode
)->i_lastfrag
, fragment
+ count
);
429 mutex_unlock(&UFS_SB(sb
)->s_lock
);
430 UFSD("EXIT, result %llu\n", (unsigned long long)result
);
437 result
= ufs_add_fragments(inode
, tmp
, oldcount
, newcount
);
440 UFS_I(inode
)->i_lastfrag
= max(UFS_I(inode
)->i_lastfrag
,
442 ufs_clear_frags(inode
, result
+ oldcount
, newcount
- oldcount
,
443 locked_page
!= NULL
);
444 mutex_unlock(&UFS_SB(sb
)->s_lock
);
445 UFSD("EXIT, result %llu\n", (unsigned long long)result
);
450 * allocate new block and move data
452 switch (fs32_to_cpu(sb
, usb1
->fs_optim
)) {
455 if (uspi
->s_minfree
< 5 || uspi
->cs_total
.cs_nffree
456 > uspi
->s_dsize
* uspi
->s_minfree
/ (2 * 100))
458 usb1
->fs_optim
= cpu_to_fs32(sb
, UFS_OPTTIME
);
461 usb1
->fs_optim
= cpu_to_fs32(sb
, UFS_OPTTIME
);
464 request
= uspi
->s_fpb
;
465 if (uspi
->cs_total
.cs_nffree
< uspi
->s_dsize
*
466 (uspi
->s_minfree
- 2) / 100)
468 usb1
->fs_optim
= cpu_to_fs32(sb
, UFS_OPTTIME
);
471 result
= ufs_alloc_fragments (inode
, cgno
, goal
, request
, err
);
473 ufs_clear_frags(inode
, result
+ oldcount
, newcount
- oldcount
,
474 locked_page
!= NULL
);
475 ufs_change_blocknr(inode
, fragment
- oldcount
, oldcount
,
476 uspi
->s_sbbase
+ tmp
,
477 uspi
->s_sbbase
+ result
, locked_page
);
478 write_seqlock(&UFS_I(inode
)->meta_lock
);
479 ufs_cpu_to_data_ptr(sb
, p
, result
);
480 write_sequnlock(&UFS_I(inode
)->meta_lock
);
482 UFS_I(inode
)->i_lastfrag
= max(UFS_I(inode
)->i_lastfrag
,
484 mutex_unlock(&UFS_SB(sb
)->s_lock
);
485 if (newcount
< request
)
486 ufs_free_fragments (inode
, result
+ newcount
, request
- newcount
);
487 ufs_free_fragments (inode
, tmp
, oldcount
);
488 UFSD("EXIT, result %llu\n", (unsigned long long)result
);
492 mutex_unlock(&UFS_SB(sb
)->s_lock
);
493 UFSD("EXIT (FAILED)\n");
497 static u64
ufs_add_fragments(struct inode
*inode
, u64 fragment
,
498 unsigned oldcount
, unsigned newcount
)
500 struct super_block
* sb
;
501 struct ufs_sb_private_info
* uspi
;
502 struct ufs_cg_private_info
* ucpi
;
503 struct ufs_cylinder_group
* ucg
;
504 unsigned cgno
, fragno
, fragoff
, count
, fragsize
, i
;
506 UFSD("ENTER, fragment %llu, oldcount %u, newcount %u\n",
507 (unsigned long long)fragment
, oldcount
, newcount
);
510 uspi
= UFS_SB(sb
)->s_uspi
;
511 count
= newcount
- oldcount
;
513 cgno
= ufs_dtog(uspi
, fragment
);
514 if (fs32_to_cpu(sb
, UFS_SB(sb
)->fs_cs(cgno
).cs_nffree
) < count
)
516 if ((ufs_fragnum (fragment
) + newcount
) > uspi
->s_fpb
)
518 ucpi
= ufs_load_cylinder (sb
, cgno
);
521 ucg
= ubh_get_ucg (UCPI_UBH(ucpi
));
522 if (!ufs_cg_chkmagic(sb
, ucg
)) {
523 ufs_panic (sb
, "ufs_add_fragments",
524 "internal error, bad magic number on cg %u", cgno
);
528 fragno
= ufs_dtogd(uspi
, fragment
);
529 fragoff
= ufs_fragnum (fragno
);
530 for (i
= oldcount
; i
< newcount
; i
++)
531 if (ubh_isclr (UCPI_UBH(ucpi
), ucpi
->c_freeoff
, fragno
+ i
))
534 * Block can be extended
536 ucg
->cg_time
= cpu_to_fs32(sb
, get_seconds());
537 for (i
= newcount
; i
< (uspi
->s_fpb
- fragoff
); i
++)
538 if (ubh_isclr (UCPI_UBH(ucpi
), ucpi
->c_freeoff
, fragno
+ i
))
540 fragsize
= i
- oldcount
;
541 if (!fs32_to_cpu(sb
, ucg
->cg_frsum
[fragsize
]))
542 ufs_panic (sb
, "ufs_add_fragments",
543 "internal error or corrupted bitmap on cg %u", cgno
);
544 fs32_sub(sb
, &ucg
->cg_frsum
[fragsize
], 1);
545 if (fragsize
!= count
)
546 fs32_add(sb
, &ucg
->cg_frsum
[fragsize
- count
], 1);
547 for (i
= oldcount
; i
< newcount
; i
++)
548 ubh_clrbit (UCPI_UBH(ucpi
), ucpi
->c_freeoff
, fragno
+ i
);
550 fs32_sub(sb
, &ucg
->cg_cs
.cs_nffree
, count
);
551 fs32_sub(sb
, &UFS_SB(sb
)->fs_cs(cgno
).cs_nffree
, count
);
552 uspi
->cs_total
.cs_nffree
-= count
;
554 ubh_mark_buffer_dirty (USPI_UBH(uspi
));
555 ubh_mark_buffer_dirty (UCPI_UBH(ucpi
));
556 if (sb
->s_flags
& MS_SYNCHRONOUS
)
557 ubh_sync_block(UCPI_UBH(ucpi
));
558 ufs_mark_sb_dirty(sb
);
560 UFSD("EXIT, fragment %llu\n", (unsigned long long)fragment
);
565 #define UFS_TEST_FREE_SPACE_CG \
566 ucg = (struct ufs_cylinder_group *) UFS_SB(sb)->s_ucg[cgno]->b_data; \
567 if (fs32_to_cpu(sb, ucg->cg_cs.cs_nbfree)) \
569 for (k = count; k < uspi->s_fpb; k++) \
570 if (fs32_to_cpu(sb, ucg->cg_frsum[k])) \
573 static u64
ufs_alloc_fragments(struct inode
*inode
, unsigned cgno
,
574 u64 goal
, unsigned count
, int *err
)
576 struct super_block
* sb
;
577 struct ufs_sb_private_info
* uspi
;
578 struct ufs_cg_private_info
* ucpi
;
579 struct ufs_cylinder_group
* ucg
;
580 unsigned oldcg
, i
, j
, k
, allocsize
;
583 UFSD("ENTER, ino %lu, cgno %u, goal %llu, count %u\n",
584 inode
->i_ino
, cgno
, (unsigned long long)goal
, count
);
587 uspi
= UFS_SB(sb
)->s_uspi
;
591 * 1. searching on preferred cylinder group
593 UFS_TEST_FREE_SPACE_CG
596 * 2. quadratic rehash
598 for (j
= 1; j
< uspi
->s_ncg
; j
*= 2) {
600 if (cgno
>= uspi
->s_ncg
)
602 UFS_TEST_FREE_SPACE_CG
606 * 3. brute force search
607 * We start at i = 2 ( 0 is checked at 1.step, 1 at 2.step )
609 cgno
= (oldcg
+ 1) % uspi
->s_ncg
;
610 for (j
= 2; j
< uspi
->s_ncg
; j
++) {
612 if (cgno
>= uspi
->s_ncg
)
614 UFS_TEST_FREE_SPACE_CG
617 UFSD("EXIT (FAILED)\n");
621 ucpi
= ufs_load_cylinder (sb
, cgno
);
624 ucg
= ubh_get_ucg (UCPI_UBH(ucpi
));
625 if (!ufs_cg_chkmagic(sb
, ucg
))
626 ufs_panic (sb
, "ufs_alloc_fragments",
627 "internal error, bad magic number on cg %u", cgno
);
628 ucg
->cg_time
= cpu_to_fs32(sb
, get_seconds());
630 if (count
== uspi
->s_fpb
) {
631 result
= ufs_alloccg_block (inode
, ucpi
, goal
, err
);
632 if (result
== INVBLOCK
)
637 for (allocsize
= count
; allocsize
< uspi
->s_fpb
; allocsize
++)
638 if (fs32_to_cpu(sb
, ucg
->cg_frsum
[allocsize
]) != 0)
641 if (allocsize
== uspi
->s_fpb
) {
642 result
= ufs_alloccg_block (inode
, ucpi
, goal
, err
);
643 if (result
== INVBLOCK
)
645 goal
= ufs_dtogd(uspi
, result
);
646 for (i
= count
; i
< uspi
->s_fpb
; i
++)
647 ubh_setbit (UCPI_UBH(ucpi
), ucpi
->c_freeoff
, goal
+ i
);
648 i
= uspi
->s_fpb
- count
;
650 fs32_add(sb
, &ucg
->cg_cs
.cs_nffree
, i
);
651 uspi
->cs_total
.cs_nffree
+= i
;
652 fs32_add(sb
, &UFS_SB(sb
)->fs_cs(cgno
).cs_nffree
, i
);
653 fs32_add(sb
, &ucg
->cg_frsum
[i
], 1);
657 result
= ufs_bitmap_search (sb
, ucpi
, goal
, allocsize
);
658 if (result
== INVBLOCK
)
660 for (i
= 0; i
< count
; i
++)
661 ubh_clrbit (UCPI_UBH(ucpi
), ucpi
->c_freeoff
, result
+ i
);
663 fs32_sub(sb
, &ucg
->cg_cs
.cs_nffree
, count
);
664 uspi
->cs_total
.cs_nffree
-= count
;
665 fs32_sub(sb
, &UFS_SB(sb
)->fs_cs(cgno
).cs_nffree
, count
);
666 fs32_sub(sb
, &ucg
->cg_frsum
[allocsize
], 1);
668 if (count
!= allocsize
)
669 fs32_add(sb
, &ucg
->cg_frsum
[allocsize
- count
], 1);
672 ubh_mark_buffer_dirty (USPI_UBH(uspi
));
673 ubh_mark_buffer_dirty (UCPI_UBH(ucpi
));
674 if (sb
->s_flags
& MS_SYNCHRONOUS
)
675 ubh_sync_block(UCPI_UBH(ucpi
));
676 ufs_mark_sb_dirty(sb
);
678 result
+= cgno
* uspi
->s_fpg
;
679 UFSD("EXIT3, result %llu\n", (unsigned long long)result
);
683 static u64
ufs_alloccg_block(struct inode
*inode
,
684 struct ufs_cg_private_info
*ucpi
,
687 struct super_block
* sb
;
688 struct ufs_sb_private_info
* uspi
;
689 struct ufs_cylinder_group
* ucg
;
692 UFSD("ENTER, goal %llu\n", (unsigned long long)goal
);
695 uspi
= UFS_SB(sb
)->s_uspi
;
696 ucg
= ubh_get_ucg(UCPI_UBH(ucpi
));
699 goal
= ucpi
->c_rotor
;
702 goal
= ufs_blknum (goal
);
703 goal
= ufs_dtogd(uspi
, goal
);
706 * If the requested block is available, use it.
708 if (ubh_isblockset(UCPI_UBH(ucpi
), ucpi
->c_freeoff
, ufs_fragstoblks(goal
))) {
714 result
= ufs_bitmap_search (sb
, ucpi
, goal
, uspi
->s_fpb
);
715 if (result
== INVBLOCK
)
717 ucpi
->c_rotor
= result
;
719 blkno
= ufs_fragstoblks(result
);
720 ubh_clrblock (UCPI_UBH(ucpi
), ucpi
->c_freeoff
, blkno
);
721 if ((UFS_SB(sb
)->s_flags
& UFS_CG_MASK
) == UFS_CG_44BSD
)
722 ufs_clusteracct (sb
, ucpi
, blkno
, -1);
724 fs32_sub(sb
, &ucg
->cg_cs
.cs_nbfree
, 1);
725 uspi
->cs_total
.cs_nbfree
--;
726 fs32_sub(sb
, &UFS_SB(sb
)->fs_cs(ucpi
->c_cgx
).cs_nbfree
, 1);
728 if (uspi
->fs_magic
!= UFS2_MAGIC
) {
729 unsigned cylno
= ufs_cbtocylno((unsigned)result
);
731 fs16_sub(sb
, &ubh_cg_blks(ucpi
, cylno
,
732 ufs_cbtorpos((unsigned)result
)), 1);
733 fs32_sub(sb
, &ubh_cg_blktot(ucpi
, cylno
), 1);
736 UFSD("EXIT, result %llu\n", (unsigned long long)result
);
741 static unsigned ubh_scanc(struct ufs_sb_private_info
*uspi
,
742 struct ufs_buffer_head
*ubh
,
743 unsigned begin
, unsigned size
,
744 unsigned char *table
, unsigned char mask
)
746 unsigned rest
, offset
;
750 offset
= begin
& ~uspi
->s_fmask
;
751 begin
>>= uspi
->s_fshift
;
753 if ((offset
+ size
) < uspi
->s_fsize
)
756 rest
= uspi
->s_fsize
- offset
;
758 cp
= ubh
->bh
[begin
]->b_data
+ offset
;
759 while ((table
[*cp
++] & mask
) == 0 && --rest
)
766 return (size
+ rest
);
770 * Find a block of the specified size in the specified cylinder group.
771 * @sp: pointer to super block
772 * @ucpi: pointer to cylinder group info
773 * @goal: near which block we want find new one
774 * @count: specified size
776 static u64
ufs_bitmap_search(struct super_block
*sb
,
777 struct ufs_cg_private_info
*ucpi
,
778 u64 goal
, unsigned count
)
781 * Bit patterns for identifying fragments in the block map
782 * used as ((map & mask_arr) == want_arr)
784 static const int mask_arr
[9] = {
785 0x3, 0x7, 0xf, 0x1f, 0x3f, 0x7f, 0xff, 0x1ff, 0x3ff
787 static const int want_arr
[9] = {
788 0x0, 0x2, 0x6, 0xe, 0x1e, 0x3e, 0x7e, 0xfe, 0x1fe
790 struct ufs_sb_private_info
*uspi
= UFS_SB(sb
)->s_uspi
;
791 unsigned start
, length
, loc
;
792 unsigned pos
, want
, blockmap
, mask
, end
;
795 UFSD("ENTER, cg %u, goal %llu, count %u\n", ucpi
->c_cgx
,
796 (unsigned long long)goal
, count
);
799 start
= ufs_dtogd(uspi
, goal
) >> 3;
801 start
= ucpi
->c_frotor
>> 3;
803 length
= ((uspi
->s_fpg
+ 7) >> 3) - start
;
804 loc
= ubh_scanc(uspi
, UCPI_UBH(ucpi
), ucpi
->c_freeoff
+ start
, length
,
805 (uspi
->s_fpb
== 8) ? ufs_fragtable_8fpb
: ufs_fragtable_other
,
806 1 << (count
- 1 + (uspi
->s_fpb
& 7)));
809 loc
= ubh_scanc(uspi
, UCPI_UBH(ucpi
), ucpi
->c_freeoff
, length
,
810 (uspi
->s_fpb
== 8) ? ufs_fragtable_8fpb
:
812 1 << (count
- 1 + (uspi
->s_fpb
& 7)));
814 ufs_error(sb
, "ufs_bitmap_search",
815 "bitmap corrupted on cg %u, start %u,"
816 " length %u, count %u, freeoff %u\n",
817 ucpi
->c_cgx
, start
, length
, count
,
823 result
= (start
+ length
- loc
) << 3;
824 ucpi
->c_frotor
= result
;
827 * found the byte in the map
830 for (end
= result
+ 8; result
< end
; result
+= uspi
->s_fpb
) {
831 blockmap
= ubh_blkmap(UCPI_UBH(ucpi
), ucpi
->c_freeoff
, result
);
833 mask
= mask_arr
[count
];
834 want
= want_arr
[count
];
835 for (pos
= 0; pos
<= uspi
->s_fpb
- count
; pos
++) {
836 if ((blockmap
& mask
) == want
) {
837 UFSD("EXIT, result %llu\n",
838 (unsigned long long)result
);
846 ufs_error(sb
, "ufs_bitmap_search", "block not in map on cg %u\n",
848 UFSD("EXIT (FAILED)\n");
852 static void ufs_clusteracct(struct super_block
* sb
,
853 struct ufs_cg_private_info
* ucpi
, unsigned blkno
, int cnt
)
855 struct ufs_sb_private_info
* uspi
;
856 int i
, start
, end
, forw
, back
;
858 uspi
= UFS_SB(sb
)->s_uspi
;
859 if (uspi
->s_contigsumsize
<= 0)
863 ubh_setbit(UCPI_UBH(ucpi
), ucpi
->c_clusteroff
, blkno
);
865 ubh_clrbit(UCPI_UBH(ucpi
), ucpi
->c_clusteroff
, blkno
);
868 * Find the size of the cluster going forward.
871 end
= start
+ uspi
->s_contigsumsize
;
872 if ( end
>= ucpi
->c_nclusterblks
)
873 end
= ucpi
->c_nclusterblks
;
874 i
= ubh_find_next_zero_bit (UCPI_UBH(ucpi
), ucpi
->c_clusteroff
, end
, start
);
880 * Find the size of the cluster going backward.
883 end
= start
- uspi
->s_contigsumsize
;
886 i
= ubh_find_last_zero_bit (UCPI_UBH(ucpi
), ucpi
->c_clusteroff
, start
, end
);
892 * Account for old cluster and the possibly new forward and
896 if (i
> uspi
->s_contigsumsize
)
897 i
= uspi
->s_contigsumsize
;
898 fs32_add(sb
, (__fs32
*)ubh_get_addr(UCPI_UBH(ucpi
), ucpi
->c_clustersumoff
+ (i
<< 2)), cnt
);
900 fs32_sub(sb
, (__fs32
*)ubh_get_addr(UCPI_UBH(ucpi
), ucpi
->c_clustersumoff
+ (back
<< 2)), cnt
);
902 fs32_sub(sb
, (__fs32
*)ubh_get_addr(UCPI_UBH(ucpi
), ucpi
->c_clustersumoff
+ (forw
<< 2)), cnt
);
906 static unsigned char ufs_fragtable_8fpb
[] = {
907 0x00, 0x01, 0x01, 0x02, 0x01, 0x01, 0x02, 0x04, 0x01, 0x01, 0x01, 0x03, 0x02, 0x03, 0x04, 0x08,
908 0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x02, 0x03, 0x03, 0x02, 0x04, 0x05, 0x08, 0x10,
909 0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x01, 0x01, 0x01, 0x03, 0x03, 0x03, 0x05, 0x09,
910 0x02, 0x03, 0x03, 0x02, 0x03, 0x03, 0x02, 0x06, 0x04, 0x05, 0x05, 0x06, 0x08, 0x09, 0x10, 0x20,
911 0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x01, 0x01, 0x01, 0x03, 0x03, 0x03, 0x05, 0x09,
912 0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x03, 0x03, 0x03, 0x03, 0x05, 0x05, 0x09, 0x11,
913 0x02, 0x03, 0x03, 0x02, 0x03, 0x03, 0x02, 0x06, 0x03, 0x03, 0x03, 0x03, 0x02, 0x03, 0x06, 0x0A,
914 0x04, 0x05, 0x05, 0x06, 0x05, 0x05, 0x06, 0x04, 0x08, 0x09, 0x09, 0x0A, 0x10, 0x11, 0x20, 0x40,
915 0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x01, 0x01, 0x01, 0x03, 0x03, 0x03, 0x05, 0x09,
916 0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x03, 0x03, 0x03, 0x03, 0x05, 0x05, 0x09, 0x11,
917 0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x01, 0x01, 0x01, 0x03, 0x03, 0x03, 0x05, 0x09,
918 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x07, 0x05, 0x05, 0x05, 0x07, 0x09, 0x09, 0x11, 0x21,
919 0x02, 0x03, 0x03, 0x02, 0x03, 0x03, 0x02, 0x06, 0x03, 0x03, 0x03, 0x03, 0x02, 0x03, 0x06, 0x0A,
920 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x07, 0x02, 0x03, 0x03, 0x02, 0x06, 0x07, 0x0A, 0x12,
921 0x04, 0x05, 0x05, 0x06, 0x05, 0x05, 0x06, 0x04, 0x05, 0x05, 0x05, 0x07, 0x06, 0x07, 0x04, 0x0C,
922 0x08, 0x09, 0x09, 0x0A, 0x09, 0x09, 0x0A, 0x0C, 0x10, 0x11, 0x11, 0x12, 0x20, 0x21, 0x40, 0x80,
925 static unsigned char ufs_fragtable_other
[] = {
926 0x00, 0x16, 0x16, 0x2A, 0x16, 0x16, 0x26, 0x4E, 0x16, 0x16, 0x16, 0x3E, 0x2A, 0x3E, 0x4E, 0x8A,
927 0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
928 0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
929 0x2A, 0x3E, 0x3E, 0x2A, 0x3E, 0x3E, 0x2E, 0x6E, 0x3E, 0x3E, 0x3E, 0x3E, 0x2A, 0x3E, 0x6E, 0xAA,
930 0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
931 0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
932 0x26, 0x36, 0x36, 0x2E, 0x36, 0x36, 0x26, 0x6E, 0x36, 0x36, 0x36, 0x3E, 0x2E, 0x3E, 0x6E, 0xAE,
933 0x4E, 0x5E, 0x5E, 0x6E, 0x5E, 0x5E, 0x6E, 0x4E, 0x5E, 0x5E, 0x5E, 0x7E, 0x6E, 0x7E, 0x4E, 0xCE,
934 0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
935 0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
936 0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
937 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x7E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x7E, 0xBE,
938 0x2A, 0x3E, 0x3E, 0x2A, 0x3E, 0x3E, 0x2E, 0x6E, 0x3E, 0x3E, 0x3E, 0x3E, 0x2A, 0x3E, 0x6E, 0xAA,
939 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x7E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x7E, 0xBE,
940 0x4E, 0x5E, 0x5E, 0x6E, 0x5E, 0x5E, 0x6E, 0x4E, 0x5E, 0x5E, 0x5E, 0x7E, 0x6E, 0x7E, 0x4E, 0xCE,
941 0x8A, 0x9E, 0x9E, 0xAA, 0x9E, 0x9E, 0xAE, 0xCE, 0x9E, 0x9E, 0x9E, 0xBE, 0xAA, 0xBE, 0xCE, 0x8A,