2 * linux/fs/ufs/balloc.c
5 * Daniel Pirkl <daniel.pirkl@email.cz>
6 * Charles University, Faculty of Mathematics and Physics
8 * UFS2 write support Evgeniy Dushistov <dushistov@mail.ru>, 2007
12 #include <linux/stat.h>
13 #include <linux/time.h>
14 #include <linux/string.h>
15 #include <linux/buffer_head.h>
16 #include <linux/capability.h>
17 #include <linux/bitops.h>
18 #include <asm/byteorder.h>
25 #define INVBLOCK ((u64)-1L)
27 static u64
ufs_add_fragments(struct inode
*, u64
, unsigned, unsigned, int *);
28 static u64
ufs_alloc_fragments(struct inode
*, unsigned, u64
, unsigned, int *);
29 static u64
ufs_alloccg_block(struct inode
*, struct ufs_cg_private_info
*, u64
, int *);
30 static u64
ufs_bitmap_search (struct super_block
*, struct ufs_cg_private_info
*, u64
, unsigned);
31 static unsigned char ufs_fragtable_8fpb
[], ufs_fragtable_other
[];
32 static void ufs_clusteracct(struct super_block
*, struct ufs_cg_private_info
*, unsigned, int);
35 * Free 'count' fragments from fragment number 'fragment'
37 void ufs_free_fragments(struct inode
*inode
, u64 fragment
, unsigned count
)
39 struct super_block
* sb
;
40 struct ufs_sb_private_info
* uspi
;
41 struct ufs_super_block_first
* usb1
;
42 struct ufs_cg_private_info
* ucpi
;
43 struct ufs_cylinder_group
* ucg
;
44 unsigned cgno
, bit
, end_bit
, bbase
, blkmap
, i
;
48 uspi
= UFS_SB(sb
)->s_uspi
;
49 usb1
= ubh_get_usb_first(uspi
);
51 UFSD("ENTER, fragment %llu, count %u\n",
52 (unsigned long long)fragment
, count
);
54 if (ufs_fragnum(fragment
) + count
> uspi
->s_fpg
)
55 ufs_error (sb
, "ufs_free_fragments", "internal error");
59 cgno
= ufs_dtog(uspi
, fragment
);
60 bit
= ufs_dtogd(uspi
, fragment
);
61 if (cgno
>= uspi
->s_ncg
) {
62 ufs_panic (sb
, "ufs_free_fragments", "freeing blocks are outside device");
66 ucpi
= ufs_load_cylinder (sb
, cgno
);
69 ucg
= ubh_get_ucg (UCPI_UBH(ucpi
));
70 if (!ufs_cg_chkmagic(sb
, ucg
)) {
71 ufs_panic (sb
, "ufs_free_fragments", "internal error, bad magic number on cg %u", cgno
);
75 end_bit
= bit
+ count
;
76 bbase
= ufs_blknum (bit
);
77 blkmap
= ubh_blkmap (UCPI_UBH(ucpi
), ucpi
->c_freeoff
, bbase
);
78 ufs_fragacct (sb
, blkmap
, ucg
->cg_frsum
, -1);
79 for (i
= bit
; i
< end_bit
; i
++) {
80 if (ubh_isclr (UCPI_UBH(ucpi
), ucpi
->c_freeoff
, i
))
81 ubh_setbit (UCPI_UBH(ucpi
), ucpi
->c_freeoff
, i
);
83 ufs_error (sb
, "ufs_free_fragments",
84 "bit already cleared for fragment %u", i
);
87 fs32_add(sb
, &ucg
->cg_cs
.cs_nffree
, count
);
88 uspi
->cs_total
.cs_nffree
+= count
;
89 fs32_add(sb
, &UFS_SB(sb
)->fs_cs(cgno
).cs_nffree
, count
);
90 blkmap
= ubh_blkmap (UCPI_UBH(ucpi
), ucpi
->c_freeoff
, bbase
);
91 ufs_fragacct(sb
, blkmap
, ucg
->cg_frsum
, 1);
94 * Trying to reassemble free fragments into block
96 blkno
= ufs_fragstoblks (bbase
);
97 if (ubh_isblockset(UCPI_UBH(ucpi
), ucpi
->c_freeoff
, blkno
)) {
98 fs32_sub(sb
, &ucg
->cg_cs
.cs_nffree
, uspi
->s_fpb
);
99 uspi
->cs_total
.cs_nffree
-= uspi
->s_fpb
;
100 fs32_sub(sb
, &UFS_SB(sb
)->fs_cs(cgno
).cs_nffree
, uspi
->s_fpb
);
101 if ((UFS_SB(sb
)->s_flags
& UFS_CG_MASK
) == UFS_CG_44BSD
)
102 ufs_clusteracct (sb
, ucpi
, blkno
, 1);
103 fs32_add(sb
, &ucg
->cg_cs
.cs_nbfree
, 1);
104 uspi
->cs_total
.cs_nbfree
++;
105 fs32_add(sb
, &UFS_SB(sb
)->fs_cs(cgno
).cs_nbfree
, 1);
106 if (uspi
->fs_magic
!= UFS2_MAGIC
) {
107 unsigned cylno
= ufs_cbtocylno (bbase
);
109 fs16_add(sb
, &ubh_cg_blks(ucpi
, cylno
,
110 ufs_cbtorpos(bbase
)), 1);
111 fs32_add(sb
, &ubh_cg_blktot(ucpi
, cylno
), 1);
115 ubh_mark_buffer_dirty (USPI_UBH(uspi
));
116 ubh_mark_buffer_dirty (UCPI_UBH(ucpi
));
117 if (sb
->s_flags
& MS_SYNCHRONOUS
) {
118 ubh_ll_rw_block(SWRITE
, UCPI_UBH(ucpi
));
119 ubh_wait_on_buffer (UCPI_UBH(ucpi
));
129 UFSD("EXIT (FAILED)\n");
134 * Free 'count' fragments from fragment number 'fragment' (free whole blocks)
136 void ufs_free_blocks(struct inode
*inode
, u64 fragment
, unsigned count
)
138 struct super_block
* sb
;
139 struct ufs_sb_private_info
* uspi
;
140 struct ufs_super_block_first
* usb1
;
141 struct ufs_cg_private_info
* ucpi
;
142 struct ufs_cylinder_group
* ucg
;
143 unsigned overflow
, cgno
, bit
, end_bit
, i
;
147 uspi
= UFS_SB(sb
)->s_uspi
;
148 usb1
= ubh_get_usb_first(uspi
);
150 UFSD("ENTER, fragment %llu, count %u\n",
151 (unsigned long long)fragment
, count
);
153 if ((fragment
& uspi
->s_fpbmask
) || (count
& uspi
->s_fpbmask
)) {
154 ufs_error (sb
, "ufs_free_blocks", "internal error, "
155 "fragment %llu, count %u\n",
156 (unsigned long long)fragment
, count
);
164 cgno
= ufs_dtog(uspi
, fragment
);
165 bit
= ufs_dtogd(uspi
, fragment
);
166 if (cgno
>= uspi
->s_ncg
) {
167 ufs_panic (sb
, "ufs_free_blocks", "freeing blocks are outside device");
170 end_bit
= bit
+ count
;
171 if (end_bit
> uspi
->s_fpg
) {
172 overflow
= bit
+ count
- uspi
->s_fpg
;
177 ucpi
= ufs_load_cylinder (sb
, cgno
);
180 ucg
= ubh_get_ucg (UCPI_UBH(ucpi
));
181 if (!ufs_cg_chkmagic(sb
, ucg
)) {
182 ufs_panic (sb
, "ufs_free_blocks", "internal error, bad magic number on cg %u", cgno
);
186 for (i
= bit
; i
< end_bit
; i
+= uspi
->s_fpb
) {
187 blkno
= ufs_fragstoblks(i
);
188 if (ubh_isblockset(UCPI_UBH(ucpi
), ucpi
->c_freeoff
, blkno
)) {
189 ufs_error(sb
, "ufs_free_blocks", "freeing free fragment");
191 ubh_setblock(UCPI_UBH(ucpi
), ucpi
->c_freeoff
, blkno
);
192 if ((UFS_SB(sb
)->s_flags
& UFS_CG_MASK
) == UFS_CG_44BSD
)
193 ufs_clusteracct (sb
, ucpi
, blkno
, 1);
195 fs32_add(sb
, &ucg
->cg_cs
.cs_nbfree
, 1);
196 uspi
->cs_total
.cs_nbfree
++;
197 fs32_add(sb
, &UFS_SB(sb
)->fs_cs(cgno
).cs_nbfree
, 1);
199 if (uspi
->fs_magic
!= UFS2_MAGIC
) {
200 unsigned cylno
= ufs_cbtocylno(i
);
202 fs16_add(sb
, &ubh_cg_blks(ucpi
, cylno
,
203 ufs_cbtorpos(i
)), 1);
204 fs32_add(sb
, &ubh_cg_blktot(ucpi
, cylno
), 1);
208 ubh_mark_buffer_dirty (USPI_UBH(uspi
));
209 ubh_mark_buffer_dirty (UCPI_UBH(ucpi
));
210 if (sb
->s_flags
& MS_SYNCHRONOUS
) {
211 ubh_ll_rw_block(SWRITE
, UCPI_UBH(ucpi
));
212 ubh_wait_on_buffer (UCPI_UBH(ucpi
));
229 UFSD("EXIT (FAILED)\n");
234 * Modify inode page cache in such way:
235 * have - blocks with b_blocknr equal to oldb...oldb+count-1
236 * get - blocks with b_blocknr equal to newb...newb+count-1
237 * also we suppose that oldb...oldb+count-1 blocks
238 * situated at the end of file.
240 * We can come here from ufs_writepage or ufs_prepare_write,
241 * locked_page is argument of these functions, so we already lock it.
243 static void ufs_change_blocknr(struct inode
*inode
, sector_t beg
,
244 unsigned int count
, sector_t oldb
,
245 sector_t newb
, struct page
*locked_page
)
247 const unsigned blks_per_page
=
248 1 << (PAGE_CACHE_SHIFT
- inode
->i_blkbits
);
249 const unsigned mask
= blks_per_page
- 1;
250 struct address_space
* const mapping
= inode
->i_mapping
;
251 pgoff_t index
, cur_index
, last_index
;
252 unsigned pos
, j
, lblock
;
255 struct buffer_head
*head
, *bh
;
257 UFSD("ENTER, ino %lu, count %u, oldb %llu, newb %llu\n",
259 (unsigned long long)oldb
, (unsigned long long)newb
);
261 BUG_ON(!locked_page
);
262 BUG_ON(!PageLocked(locked_page
));
264 cur_index
= locked_page
->index
;
266 last_index
= end
>> (PAGE_CACHE_SHIFT
- inode
->i_blkbits
);
267 for (i
= beg
; i
< end
; i
= (i
| mask
) + 1) {
268 index
= i
>> (PAGE_CACHE_SHIFT
- inode
->i_blkbits
);
270 if (likely(cur_index
!= index
)) {
271 page
= ufs_get_locked_page(mapping
, index
);
272 if (!page
)/* it was truncated */
274 if (IS_ERR(page
)) {/* or EIO */
275 ufs_error(inode
->i_sb
, __func__
,
276 "read of page %llu failed\n",
277 (unsigned long long)index
);
283 head
= page_buffers(page
);
286 for (j
= 0; j
< pos
; ++j
)
287 bh
= bh
->b_this_page
;
290 if (unlikely(index
== last_index
))
293 lblock
= blks_per_page
;
300 if (!buffer_mapped(bh
))
301 map_bh(bh
, inode
->i_sb
, oldb
+ pos
);
302 if (!buffer_uptodate(bh
)) {
303 ll_rw_block(READ
, 1, &bh
);
305 if (!buffer_uptodate(bh
)) {
306 ufs_error(inode
->i_sb
, __func__
,
307 "read of block failed\n");
312 UFSD(" change from %llu to %llu, pos %u\n",
313 (unsigned long long)(pos
+ oldb
),
314 (unsigned long long)(pos
+ newb
), pos
);
316 bh
->b_blocknr
= newb
+ pos
;
317 unmap_underlying_metadata(bh
->b_bdev
,
319 mark_buffer_dirty(bh
);
321 bh
= bh
->b_this_page
;
322 } while (bh
!= head
);
324 if (likely(cur_index
!= index
))
325 ufs_put_locked_page(page
);
330 static void ufs_clear_frags(struct inode
*inode
, sector_t beg
, unsigned int n
,
333 struct buffer_head
*bh
;
334 sector_t end
= beg
+ n
;
336 for (; beg
< end
; ++beg
) {
337 bh
= sb_getblk(inode
->i_sb
, beg
);
339 memset(bh
->b_data
, 0, inode
->i_sb
->s_blocksize
);
340 set_buffer_uptodate(bh
);
341 mark_buffer_dirty(bh
);
343 if (IS_SYNC(inode
) || sync
)
344 sync_dirty_buffer(bh
);
349 u64
ufs_new_fragments(struct inode
*inode
, void *p
, u64 fragment
,
350 u64 goal
, unsigned count
, int *err
,
351 struct page
*locked_page
)
353 struct super_block
* sb
;
354 struct ufs_sb_private_info
* uspi
;
355 struct ufs_super_block_first
* usb1
;
356 unsigned cgno
, oldcount
, newcount
;
357 u64 tmp
, request
, result
;
359 UFSD("ENTER, ino %lu, fragment %llu, goal %llu, count %u\n",
360 inode
->i_ino
, (unsigned long long)fragment
,
361 (unsigned long long)goal
, count
);
364 uspi
= UFS_SB(sb
)->s_uspi
;
365 usb1
= ubh_get_usb_first(uspi
);
369 tmp
= ufs_data_ptr_to_cpu(sb
, p
);
371 if (count
+ ufs_fragnum(fragment
) > uspi
->s_fpb
) {
372 ufs_warning(sb
, "ufs_new_fragments", "internal warning"
373 " fragment %llu, count %u",
374 (unsigned long long)fragment
, count
);
375 count
= uspi
->s_fpb
- ufs_fragnum(fragment
);
377 oldcount
= ufs_fragnum (fragment
);
378 newcount
= oldcount
+ count
;
381 * Somebody else has just allocated our fragments
385 ufs_error(sb
, "ufs_new_fragments", "internal error, "
386 "fragment %llu, tmp %llu\n",
387 (unsigned long long)fragment
,
388 (unsigned long long)tmp
);
392 if (fragment
< UFS_I(inode
)->i_lastfrag
) {
393 UFSD("EXIT (ALREADY ALLOCATED)\n");
400 UFSD("EXIT (ALREADY ALLOCATED)\n");
407 * There is not enough space for user on the device
409 if (!capable(CAP_SYS_RESOURCE
) && ufs_freespace(uspi
, UFS_MINFREE
) <= 0) {
411 UFSD("EXIT (FAILED)\n");
415 if (goal
>= uspi
->s_size
)
418 cgno
= ufs_inotocg (inode
->i_ino
);
420 cgno
= ufs_dtog(uspi
, goal
);
423 * allocate new fragment
426 result
= ufs_alloc_fragments (inode
, cgno
, goal
, count
, err
);
428 ufs_cpu_to_data_ptr(sb
, p
, result
);
430 UFS_I(inode
)->i_lastfrag
=
431 max_t(u32
, UFS_I(inode
)->i_lastfrag
,
433 ufs_clear_frags(inode
, result
+ oldcount
,
434 newcount
- oldcount
, locked_page
!= NULL
);
437 UFSD("EXIT, result %llu\n", (unsigned long long)result
);
444 result
= ufs_add_fragments (inode
, tmp
, oldcount
, newcount
, err
);
447 UFS_I(inode
)->i_lastfrag
= max_t(u32
, UFS_I(inode
)->i_lastfrag
, fragment
+ count
);
448 ufs_clear_frags(inode
, result
+ oldcount
, newcount
- oldcount
,
449 locked_page
!= NULL
);
451 UFSD("EXIT, result %llu\n", (unsigned long long)result
);
456 * allocate new block and move data
458 switch (fs32_to_cpu(sb
, usb1
->fs_optim
)) {
461 if (uspi
->s_minfree
< 5 || uspi
->cs_total
.cs_nffree
462 > uspi
->s_dsize
* uspi
->s_minfree
/ (2 * 100))
464 usb1
->fs_optim
= cpu_to_fs32(sb
, UFS_OPTTIME
);
467 usb1
->fs_optim
= cpu_to_fs32(sb
, UFS_OPTTIME
);
470 request
= uspi
->s_fpb
;
471 if (uspi
->cs_total
.cs_nffree
< uspi
->s_dsize
*
472 (uspi
->s_minfree
- 2) / 100)
474 usb1
->fs_optim
= cpu_to_fs32(sb
, UFS_OPTTIME
);
477 result
= ufs_alloc_fragments (inode
, cgno
, goal
, request
, err
);
479 ufs_clear_frags(inode
, result
+ oldcount
, newcount
- oldcount
,
480 locked_page
!= NULL
);
481 ufs_change_blocknr(inode
, fragment
- oldcount
, oldcount
,
482 uspi
->s_sbbase
+ tmp
,
483 uspi
->s_sbbase
+ result
, locked_page
);
484 ufs_cpu_to_data_ptr(sb
, p
, result
);
486 UFS_I(inode
)->i_lastfrag
= max_t(u32
, UFS_I(inode
)->i_lastfrag
, fragment
+ count
);
488 if (newcount
< request
)
489 ufs_free_fragments (inode
, result
+ newcount
, request
- newcount
);
490 ufs_free_fragments (inode
, tmp
, oldcount
);
491 UFSD("EXIT, result %llu\n", (unsigned long long)result
);
496 UFSD("EXIT (FAILED)\n");
500 static u64
ufs_add_fragments(struct inode
*inode
, u64 fragment
,
501 unsigned oldcount
, unsigned newcount
, int *err
)
503 struct super_block
* sb
;
504 struct ufs_sb_private_info
* uspi
;
505 struct ufs_super_block_first
* usb1
;
506 struct ufs_cg_private_info
* ucpi
;
507 struct ufs_cylinder_group
* ucg
;
508 unsigned cgno
, fragno
, fragoff
, count
, fragsize
, i
;
510 UFSD("ENTER, fragment %llu, oldcount %u, newcount %u\n",
511 (unsigned long long)fragment
, oldcount
, newcount
);
514 uspi
= UFS_SB(sb
)->s_uspi
;
515 usb1
= ubh_get_usb_first (uspi
);
516 count
= newcount
- oldcount
;
518 cgno
= ufs_dtog(uspi
, fragment
);
519 if (fs32_to_cpu(sb
, UFS_SB(sb
)->fs_cs(cgno
).cs_nffree
) < count
)
521 if ((ufs_fragnum (fragment
) + newcount
) > uspi
->s_fpb
)
523 ucpi
= ufs_load_cylinder (sb
, cgno
);
526 ucg
= ubh_get_ucg (UCPI_UBH(ucpi
));
527 if (!ufs_cg_chkmagic(sb
, ucg
)) {
528 ufs_panic (sb
, "ufs_add_fragments",
529 "internal error, bad magic number on cg %u", cgno
);
533 fragno
= ufs_dtogd(uspi
, fragment
);
534 fragoff
= ufs_fragnum (fragno
);
535 for (i
= oldcount
; i
< newcount
; i
++)
536 if (ubh_isclr (UCPI_UBH(ucpi
), ucpi
->c_freeoff
, fragno
+ i
))
539 * Block can be extended
541 ucg
->cg_time
= cpu_to_fs32(sb
, get_seconds());
542 for (i
= newcount
; i
< (uspi
->s_fpb
- fragoff
); i
++)
543 if (ubh_isclr (UCPI_UBH(ucpi
), ucpi
->c_freeoff
, fragno
+ i
))
545 fragsize
= i
- oldcount
;
546 if (!fs32_to_cpu(sb
, ucg
->cg_frsum
[fragsize
]))
547 ufs_panic (sb
, "ufs_add_fragments",
548 "internal error or corrupted bitmap on cg %u", cgno
);
549 fs32_sub(sb
, &ucg
->cg_frsum
[fragsize
], 1);
550 if (fragsize
!= count
)
551 fs32_add(sb
, &ucg
->cg_frsum
[fragsize
- count
], 1);
552 for (i
= oldcount
; i
< newcount
; i
++)
553 ubh_clrbit (UCPI_UBH(ucpi
), ucpi
->c_freeoff
, fragno
+ i
);
555 fs32_sub(sb
, &ucg
->cg_cs
.cs_nffree
, count
);
556 fs32_sub(sb
, &UFS_SB(sb
)->fs_cs(cgno
).cs_nffree
, count
);
557 uspi
->cs_total
.cs_nffree
-= count
;
559 ubh_mark_buffer_dirty (USPI_UBH(uspi
));
560 ubh_mark_buffer_dirty (UCPI_UBH(ucpi
));
561 if (sb
->s_flags
& MS_SYNCHRONOUS
) {
562 ubh_ll_rw_block(SWRITE
, UCPI_UBH(ucpi
));
563 ubh_wait_on_buffer (UCPI_UBH(ucpi
));
567 UFSD("EXIT, fragment %llu\n", (unsigned long long)fragment
);
572 #define UFS_TEST_FREE_SPACE_CG \
573 ucg = (struct ufs_cylinder_group *) UFS_SB(sb)->s_ucg[cgno]->b_data; \
574 if (fs32_to_cpu(sb, ucg->cg_cs.cs_nbfree)) \
576 for (k = count; k < uspi->s_fpb; k++) \
577 if (fs32_to_cpu(sb, ucg->cg_frsum[k])) \
580 static u64
ufs_alloc_fragments(struct inode
*inode
, unsigned cgno
,
581 u64 goal
, unsigned count
, int *err
)
583 struct super_block
* sb
;
584 struct ufs_sb_private_info
* uspi
;
585 struct ufs_super_block_first
* usb1
;
586 struct ufs_cg_private_info
* ucpi
;
587 struct ufs_cylinder_group
* ucg
;
588 unsigned oldcg
, i
, j
, k
, allocsize
;
591 UFSD("ENTER, ino %lu, cgno %u, goal %llu, count %u\n",
592 inode
->i_ino
, cgno
, (unsigned long long)goal
, count
);
595 uspi
= UFS_SB(sb
)->s_uspi
;
596 usb1
= ubh_get_usb_first(uspi
);
600 * 1. searching on preferred cylinder group
602 UFS_TEST_FREE_SPACE_CG
605 * 2. quadratic rehash
607 for (j
= 1; j
< uspi
->s_ncg
; j
*= 2) {
609 if (cgno
>= uspi
->s_ncg
)
611 UFS_TEST_FREE_SPACE_CG
615 * 3. brute force search
616 * We start at i = 2 ( 0 is checked at 1.step, 1 at 2.step )
618 cgno
= (oldcg
+ 1) % uspi
->s_ncg
;
619 for (j
= 2; j
< uspi
->s_ncg
; j
++) {
621 if (cgno
>= uspi
->s_ncg
)
623 UFS_TEST_FREE_SPACE_CG
626 UFSD("EXIT (FAILED)\n");
630 ucpi
= ufs_load_cylinder (sb
, cgno
);
633 ucg
= ubh_get_ucg (UCPI_UBH(ucpi
));
634 if (!ufs_cg_chkmagic(sb
, ucg
))
635 ufs_panic (sb
, "ufs_alloc_fragments",
636 "internal error, bad magic number on cg %u", cgno
);
637 ucg
->cg_time
= cpu_to_fs32(sb
, get_seconds());
639 if (count
== uspi
->s_fpb
) {
640 result
= ufs_alloccg_block (inode
, ucpi
, goal
, err
);
641 if (result
== INVBLOCK
)
646 for (allocsize
= count
; allocsize
< uspi
->s_fpb
; allocsize
++)
647 if (fs32_to_cpu(sb
, ucg
->cg_frsum
[allocsize
]) != 0)
650 if (allocsize
== uspi
->s_fpb
) {
651 result
= ufs_alloccg_block (inode
, ucpi
, goal
, err
);
652 if (result
== INVBLOCK
)
654 goal
= ufs_dtogd(uspi
, result
);
655 for (i
= count
; i
< uspi
->s_fpb
; i
++)
656 ubh_setbit (UCPI_UBH(ucpi
), ucpi
->c_freeoff
, goal
+ i
);
657 i
= uspi
->s_fpb
- count
;
659 fs32_add(sb
, &ucg
->cg_cs
.cs_nffree
, i
);
660 uspi
->cs_total
.cs_nffree
+= i
;
661 fs32_add(sb
, &UFS_SB(sb
)->fs_cs(cgno
).cs_nffree
, i
);
662 fs32_add(sb
, &ucg
->cg_frsum
[i
], 1);
666 result
= ufs_bitmap_search (sb
, ucpi
, goal
, allocsize
);
667 if (result
== INVBLOCK
)
669 for (i
= 0; i
< count
; i
++)
670 ubh_clrbit (UCPI_UBH(ucpi
), ucpi
->c_freeoff
, result
+ i
);
672 fs32_sub(sb
, &ucg
->cg_cs
.cs_nffree
, count
);
673 uspi
->cs_total
.cs_nffree
-= count
;
674 fs32_sub(sb
, &UFS_SB(sb
)->fs_cs(cgno
).cs_nffree
, count
);
675 fs32_sub(sb
, &ucg
->cg_frsum
[allocsize
], 1);
677 if (count
!= allocsize
)
678 fs32_add(sb
, &ucg
->cg_frsum
[allocsize
- count
], 1);
681 ubh_mark_buffer_dirty (USPI_UBH(uspi
));
682 ubh_mark_buffer_dirty (UCPI_UBH(ucpi
));
683 if (sb
->s_flags
& MS_SYNCHRONOUS
) {
684 ubh_ll_rw_block(SWRITE
, UCPI_UBH(ucpi
));
685 ubh_wait_on_buffer (UCPI_UBH(ucpi
));
689 result
+= cgno
* uspi
->s_fpg
;
690 UFSD("EXIT3, result %llu\n", (unsigned long long)result
);
694 static u64
ufs_alloccg_block(struct inode
*inode
,
695 struct ufs_cg_private_info
*ucpi
,
698 struct super_block
* sb
;
699 struct ufs_sb_private_info
* uspi
;
700 struct ufs_super_block_first
* usb1
;
701 struct ufs_cylinder_group
* ucg
;
704 UFSD("ENTER, goal %llu\n", (unsigned long long)goal
);
707 uspi
= UFS_SB(sb
)->s_uspi
;
708 usb1
= ubh_get_usb_first(uspi
);
709 ucg
= ubh_get_ucg(UCPI_UBH(ucpi
));
712 goal
= ucpi
->c_rotor
;
715 goal
= ufs_blknum (goal
);
716 goal
= ufs_dtogd(uspi
, goal
);
719 * If the requested block is available, use it.
721 if (ubh_isblockset(UCPI_UBH(ucpi
), ucpi
->c_freeoff
, ufs_fragstoblks(goal
))) {
727 result
= ufs_bitmap_search (sb
, ucpi
, goal
, uspi
->s_fpb
);
728 if (result
== INVBLOCK
)
730 ucpi
->c_rotor
= result
;
732 blkno
= ufs_fragstoblks(result
);
733 ubh_clrblock (UCPI_UBH(ucpi
), ucpi
->c_freeoff
, blkno
);
734 if ((UFS_SB(sb
)->s_flags
& UFS_CG_MASK
) == UFS_CG_44BSD
)
735 ufs_clusteracct (sb
, ucpi
, blkno
, -1);
737 fs32_sub(sb
, &ucg
->cg_cs
.cs_nbfree
, 1);
738 uspi
->cs_total
.cs_nbfree
--;
739 fs32_sub(sb
, &UFS_SB(sb
)->fs_cs(ucpi
->c_cgx
).cs_nbfree
, 1);
741 if (uspi
->fs_magic
!= UFS2_MAGIC
) {
742 unsigned cylno
= ufs_cbtocylno((unsigned)result
);
744 fs16_sub(sb
, &ubh_cg_blks(ucpi
, cylno
,
745 ufs_cbtorpos((unsigned)result
)), 1);
746 fs32_sub(sb
, &ubh_cg_blktot(ucpi
, cylno
), 1);
749 UFSD("EXIT, result %llu\n", (unsigned long long)result
);
754 static unsigned ubh_scanc(struct ufs_sb_private_info
*uspi
,
755 struct ufs_buffer_head
*ubh
,
756 unsigned begin
, unsigned size
,
757 unsigned char *table
, unsigned char mask
)
759 unsigned rest
, offset
;
763 offset
= begin
& ~uspi
->s_fmask
;
764 begin
>>= uspi
->s_fshift
;
766 if ((offset
+ size
) < uspi
->s_fsize
)
769 rest
= uspi
->s_fsize
- offset
;
771 cp
= ubh
->bh
[begin
]->b_data
+ offset
;
772 while ((table
[*cp
++] & mask
) == 0 && --rest
)
779 return (size
+ rest
);
783 * Find a block of the specified size in the specified cylinder group.
784 * @sp: pointer to super block
785 * @ucpi: pointer to cylinder group info
786 * @goal: near which block we want find new one
787 * @count: specified size
789 static u64
ufs_bitmap_search(struct super_block
*sb
,
790 struct ufs_cg_private_info
*ucpi
,
791 u64 goal
, unsigned count
)
794 * Bit patterns for identifying fragments in the block map
795 * used as ((map & mask_arr) == want_arr)
797 static const int mask_arr
[9] = {
798 0x3, 0x7, 0xf, 0x1f, 0x3f, 0x7f, 0xff, 0x1ff, 0x3ff
800 static const int want_arr
[9] = {
801 0x0, 0x2, 0x6, 0xe, 0x1e, 0x3e, 0x7e, 0xfe, 0x1fe
803 struct ufs_sb_private_info
*uspi
= UFS_SB(sb
)->s_uspi
;
804 struct ufs_super_block_first
*usb1
;
805 struct ufs_cylinder_group
*ucg
;
806 unsigned start
, length
, loc
;
807 unsigned pos
, want
, blockmap
, mask
, end
;
810 UFSD("ENTER, cg %u, goal %llu, count %u\n", ucpi
->c_cgx
,
811 (unsigned long long)goal
, count
);
813 usb1
= ubh_get_usb_first (uspi
);
814 ucg
= ubh_get_ucg(UCPI_UBH(ucpi
));
817 start
= ufs_dtogd(uspi
, goal
) >> 3;
819 start
= ucpi
->c_frotor
>> 3;
821 length
= ((uspi
->s_fpg
+ 7) >> 3) - start
;
822 loc
= ubh_scanc(uspi
, UCPI_UBH(ucpi
), ucpi
->c_freeoff
+ start
, length
,
823 (uspi
->s_fpb
== 8) ? ufs_fragtable_8fpb
: ufs_fragtable_other
,
824 1 << (count
- 1 + (uspi
->s_fpb
& 7)));
827 loc
= ubh_scanc(uspi
, UCPI_UBH(ucpi
), ucpi
->c_freeoff
, length
,
828 (uspi
->s_fpb
== 8) ? ufs_fragtable_8fpb
:
830 1 << (count
- 1 + (uspi
->s_fpb
& 7)));
832 ufs_error(sb
, "ufs_bitmap_search",
833 "bitmap corrupted on cg %u, start %u,"
834 " length %u, count %u, freeoff %u\n",
835 ucpi
->c_cgx
, start
, length
, count
,
841 result
= (start
+ length
- loc
) << 3;
842 ucpi
->c_frotor
= result
;
845 * found the byte in the map
848 for (end
= result
+ 8; result
< end
; result
+= uspi
->s_fpb
) {
849 blockmap
= ubh_blkmap(UCPI_UBH(ucpi
), ucpi
->c_freeoff
, result
);
851 mask
= mask_arr
[count
];
852 want
= want_arr
[count
];
853 for (pos
= 0; pos
<= uspi
->s_fpb
- count
; pos
++) {
854 if ((blockmap
& mask
) == want
) {
855 UFSD("EXIT, result %llu\n",
856 (unsigned long long)result
);
864 ufs_error(sb
, "ufs_bitmap_search", "block not in map on cg %u\n",
866 UFSD("EXIT (FAILED)\n");
870 static void ufs_clusteracct(struct super_block
* sb
,
871 struct ufs_cg_private_info
* ucpi
, unsigned blkno
, int cnt
)
873 struct ufs_sb_private_info
* uspi
;
874 int i
, start
, end
, forw
, back
;
876 uspi
= UFS_SB(sb
)->s_uspi
;
877 if (uspi
->s_contigsumsize
<= 0)
881 ubh_setbit(UCPI_UBH(ucpi
), ucpi
->c_clusteroff
, blkno
);
883 ubh_clrbit(UCPI_UBH(ucpi
), ucpi
->c_clusteroff
, blkno
);
886 * Find the size of the cluster going forward.
889 end
= start
+ uspi
->s_contigsumsize
;
890 if ( end
>= ucpi
->c_nclusterblks
)
891 end
= ucpi
->c_nclusterblks
;
892 i
= ubh_find_next_zero_bit (UCPI_UBH(ucpi
), ucpi
->c_clusteroff
, end
, start
);
898 * Find the size of the cluster going backward.
901 end
= start
- uspi
->s_contigsumsize
;
904 i
= ubh_find_last_zero_bit (UCPI_UBH(ucpi
), ucpi
->c_clusteroff
, start
, end
);
910 * Account for old cluster and the possibly new forward and
914 if (i
> uspi
->s_contigsumsize
)
915 i
= uspi
->s_contigsumsize
;
916 fs32_add(sb
, (__fs32
*)ubh_get_addr(UCPI_UBH(ucpi
), ucpi
->c_clustersumoff
+ (i
<< 2)), cnt
);
918 fs32_sub(sb
, (__fs32
*)ubh_get_addr(UCPI_UBH(ucpi
), ucpi
->c_clustersumoff
+ (back
<< 2)), cnt
);
920 fs32_sub(sb
, (__fs32
*)ubh_get_addr(UCPI_UBH(ucpi
), ucpi
->c_clustersumoff
+ (forw
<< 2)), cnt
);
924 static unsigned char ufs_fragtable_8fpb
[] = {
925 0x00, 0x01, 0x01, 0x02, 0x01, 0x01, 0x02, 0x04, 0x01, 0x01, 0x01, 0x03, 0x02, 0x03, 0x04, 0x08,
926 0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x02, 0x03, 0x03, 0x02, 0x04, 0x05, 0x08, 0x10,
927 0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x01, 0x01, 0x01, 0x03, 0x03, 0x03, 0x05, 0x09,
928 0x02, 0x03, 0x03, 0x02, 0x03, 0x03, 0x02, 0x06, 0x04, 0x05, 0x05, 0x06, 0x08, 0x09, 0x10, 0x20,
929 0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x01, 0x01, 0x01, 0x03, 0x03, 0x03, 0x05, 0x09,
930 0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x03, 0x03, 0x03, 0x03, 0x05, 0x05, 0x09, 0x11,
931 0x02, 0x03, 0x03, 0x02, 0x03, 0x03, 0x02, 0x06, 0x03, 0x03, 0x03, 0x03, 0x02, 0x03, 0x06, 0x0A,
932 0x04, 0x05, 0x05, 0x06, 0x05, 0x05, 0x06, 0x04, 0x08, 0x09, 0x09, 0x0A, 0x10, 0x11, 0x20, 0x40,
933 0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x01, 0x01, 0x01, 0x03, 0x03, 0x03, 0x05, 0x09,
934 0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x03, 0x03, 0x03, 0x03, 0x05, 0x05, 0x09, 0x11,
935 0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x01, 0x01, 0x01, 0x03, 0x03, 0x03, 0x05, 0x09,
936 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x07, 0x05, 0x05, 0x05, 0x07, 0x09, 0x09, 0x11, 0x21,
937 0x02, 0x03, 0x03, 0x02, 0x03, 0x03, 0x02, 0x06, 0x03, 0x03, 0x03, 0x03, 0x02, 0x03, 0x06, 0x0A,
938 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x07, 0x02, 0x03, 0x03, 0x02, 0x06, 0x07, 0x0A, 0x12,
939 0x04, 0x05, 0x05, 0x06, 0x05, 0x05, 0x06, 0x04, 0x05, 0x05, 0x05, 0x07, 0x06, 0x07, 0x04, 0x0C,
940 0x08, 0x09, 0x09, 0x0A, 0x09, 0x09, 0x0A, 0x0C, 0x10, 0x11, 0x11, 0x12, 0x20, 0x21, 0x40, 0x80,
943 static unsigned char ufs_fragtable_other
[] = {
944 0x00, 0x16, 0x16, 0x2A, 0x16, 0x16, 0x26, 0x4E, 0x16, 0x16, 0x16, 0x3E, 0x2A, 0x3E, 0x4E, 0x8A,
945 0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
946 0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
947 0x2A, 0x3E, 0x3E, 0x2A, 0x3E, 0x3E, 0x2E, 0x6E, 0x3E, 0x3E, 0x3E, 0x3E, 0x2A, 0x3E, 0x6E, 0xAA,
948 0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
949 0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
950 0x26, 0x36, 0x36, 0x2E, 0x36, 0x36, 0x26, 0x6E, 0x36, 0x36, 0x36, 0x3E, 0x2E, 0x3E, 0x6E, 0xAE,
951 0x4E, 0x5E, 0x5E, 0x6E, 0x5E, 0x5E, 0x6E, 0x4E, 0x5E, 0x5E, 0x5E, 0x7E, 0x6E, 0x7E, 0x4E, 0xCE,
952 0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
953 0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
954 0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
955 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x7E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x7E, 0xBE,
956 0x2A, 0x3E, 0x3E, 0x2A, 0x3E, 0x3E, 0x2E, 0x6E, 0x3E, 0x3E, 0x3E, 0x3E, 0x2A, 0x3E, 0x6E, 0xAA,
957 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x7E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x7E, 0xBE,
958 0x4E, 0x5E, 0x5E, 0x6E, 0x5E, 0x5E, 0x6E, 0x4E, 0x5E, 0x5E, 0x5E, 0x7E, 0x6E, 0x7E, 0x4E, 0xCE,
959 0x8A, 0x9E, 0x9E, 0xAA, 0x9E, 0x9E, 0xAE, 0xCE, 0x9E, 0x9E, 0x9E, 0xBE, 0xAA, 0xBE, 0xCE, 0x8A,