2 * linux/fs/ufs/balloc.c
5 * Daniel Pirkl <daniel.pirkl@email.cz>
6 * Charles University, Faculty of Mathematics and Physics
8 * UFS2 write support Evgeniy Dushistov <dushistov@mail.ru>, 2007
12 #include <linux/stat.h>
13 #include <linux/time.h>
14 #include <linux/string.h>
15 #include <linux/buffer_head.h>
16 #include <linux/capability.h>
17 #include <linux/bitops.h>
18 #include <asm/byteorder.h>
25 #define INVBLOCK ((u64)-1L)
27 static u64
ufs_add_fragments(struct inode
*, u64
, unsigned, unsigned);
28 static u64
ufs_alloc_fragments(struct inode
*, unsigned, u64
, unsigned, int *);
29 static u64
ufs_alloccg_block(struct inode
*, struct ufs_cg_private_info
*, u64
, int *);
30 static u64
ufs_bitmap_search (struct super_block
*, struct ufs_cg_private_info
*, u64
, unsigned);
31 static unsigned char ufs_fragtable_8fpb
[], ufs_fragtable_other
[];
32 static void ufs_clusteracct(struct super_block
*, struct ufs_cg_private_info
*, unsigned, int);
35 * Free 'count' fragments from fragment number 'fragment'
37 void ufs_free_fragments(struct inode
*inode
, u64 fragment
, unsigned count
)
39 struct super_block
* sb
;
40 struct ufs_sb_private_info
* uspi
;
41 struct ufs_cg_private_info
* ucpi
;
42 struct ufs_cylinder_group
* ucg
;
43 unsigned cgno
, bit
, end_bit
, bbase
, blkmap
, i
;
47 uspi
= UFS_SB(sb
)->s_uspi
;
49 UFSD("ENTER, fragment %llu, count %u\n",
50 (unsigned long long)fragment
, count
);
52 if (ufs_fragnum(fragment
) + count
> uspi
->s_fpg
)
53 ufs_error (sb
, "ufs_free_fragments", "internal error");
55 mutex_lock(&UFS_SB(sb
)->s_lock
);
57 cgno
= ufs_dtog(uspi
, fragment
);
58 bit
= ufs_dtogd(uspi
, fragment
);
59 if (cgno
>= uspi
->s_ncg
) {
60 ufs_panic (sb
, "ufs_free_fragments", "freeing blocks are outside device");
64 ucpi
= ufs_load_cylinder (sb
, cgno
);
67 ucg
= ubh_get_ucg (UCPI_UBH(ucpi
));
68 if (!ufs_cg_chkmagic(sb
, ucg
)) {
69 ufs_panic (sb
, "ufs_free_fragments", "internal error, bad magic number on cg %u", cgno
);
73 end_bit
= bit
+ count
;
74 bbase
= ufs_blknum (bit
);
75 blkmap
= ubh_blkmap (UCPI_UBH(ucpi
), ucpi
->c_freeoff
, bbase
);
76 ufs_fragacct (sb
, blkmap
, ucg
->cg_frsum
, -1);
77 for (i
= bit
; i
< end_bit
; i
++) {
78 if (ubh_isclr (UCPI_UBH(ucpi
), ucpi
->c_freeoff
, i
))
79 ubh_setbit (UCPI_UBH(ucpi
), ucpi
->c_freeoff
, i
);
81 ufs_error (sb
, "ufs_free_fragments",
82 "bit already cleared for fragment %u", i
);
85 inode_sub_bytes(inode
, count
<< uspi
->s_fshift
);
86 fs32_add(sb
, &ucg
->cg_cs
.cs_nffree
, count
);
87 uspi
->cs_total
.cs_nffree
+= count
;
88 fs32_add(sb
, &UFS_SB(sb
)->fs_cs(cgno
).cs_nffree
, count
);
89 blkmap
= ubh_blkmap (UCPI_UBH(ucpi
), ucpi
->c_freeoff
, bbase
);
90 ufs_fragacct(sb
, blkmap
, ucg
->cg_frsum
, 1);
93 * Trying to reassemble free fragments into block
95 blkno
= ufs_fragstoblks (bbase
);
96 if (ubh_isblockset(UCPI_UBH(ucpi
), ucpi
->c_freeoff
, blkno
)) {
97 fs32_sub(sb
, &ucg
->cg_cs
.cs_nffree
, uspi
->s_fpb
);
98 uspi
->cs_total
.cs_nffree
-= uspi
->s_fpb
;
99 fs32_sub(sb
, &UFS_SB(sb
)->fs_cs(cgno
).cs_nffree
, uspi
->s_fpb
);
100 if ((UFS_SB(sb
)->s_flags
& UFS_CG_MASK
) == UFS_CG_44BSD
)
101 ufs_clusteracct (sb
, ucpi
, blkno
, 1);
102 fs32_add(sb
, &ucg
->cg_cs
.cs_nbfree
, 1);
103 uspi
->cs_total
.cs_nbfree
++;
104 fs32_add(sb
, &UFS_SB(sb
)->fs_cs(cgno
).cs_nbfree
, 1);
105 if (uspi
->fs_magic
!= UFS2_MAGIC
) {
106 unsigned cylno
= ufs_cbtocylno (bbase
);
108 fs16_add(sb
, &ubh_cg_blks(ucpi
, cylno
,
109 ufs_cbtorpos(bbase
)), 1);
110 fs32_add(sb
, &ubh_cg_blktot(ucpi
, cylno
), 1);
114 ubh_mark_buffer_dirty (USPI_UBH(uspi
));
115 ubh_mark_buffer_dirty (UCPI_UBH(ucpi
));
116 if (sb
->s_flags
& MS_SYNCHRONOUS
)
117 ubh_sync_block(UCPI_UBH(ucpi
));
118 ufs_mark_sb_dirty(sb
);
120 mutex_unlock(&UFS_SB(sb
)->s_lock
);
125 mutex_unlock(&UFS_SB(sb
)->s_lock
);
126 UFSD("EXIT (FAILED)\n");
131 * Free 'count' fragments from fragment number 'fragment' (free whole blocks)
133 void ufs_free_blocks(struct inode
*inode
, u64 fragment
, unsigned count
)
135 struct super_block
* sb
;
136 struct ufs_sb_private_info
* uspi
;
137 struct ufs_cg_private_info
* ucpi
;
138 struct ufs_cylinder_group
* ucg
;
139 unsigned overflow
, cgno
, bit
, end_bit
, i
;
143 uspi
= UFS_SB(sb
)->s_uspi
;
145 UFSD("ENTER, fragment %llu, count %u\n",
146 (unsigned long long)fragment
, count
);
148 if ((fragment
& uspi
->s_fpbmask
) || (count
& uspi
->s_fpbmask
)) {
149 ufs_error (sb
, "ufs_free_blocks", "internal error, "
150 "fragment %llu, count %u\n",
151 (unsigned long long)fragment
, count
);
155 mutex_lock(&UFS_SB(sb
)->s_lock
);
159 cgno
= ufs_dtog(uspi
, fragment
);
160 bit
= ufs_dtogd(uspi
, fragment
);
161 if (cgno
>= uspi
->s_ncg
) {
162 ufs_panic (sb
, "ufs_free_blocks", "freeing blocks are outside device");
165 end_bit
= bit
+ count
;
166 if (end_bit
> uspi
->s_fpg
) {
167 overflow
= bit
+ count
- uspi
->s_fpg
;
172 ucpi
= ufs_load_cylinder (sb
, cgno
);
175 ucg
= ubh_get_ucg (UCPI_UBH(ucpi
));
176 if (!ufs_cg_chkmagic(sb
, ucg
)) {
177 ufs_panic (sb
, "ufs_free_blocks", "internal error, bad magic number on cg %u", cgno
);
181 for (i
= bit
; i
< end_bit
; i
+= uspi
->s_fpb
) {
182 blkno
= ufs_fragstoblks(i
);
183 if (ubh_isblockset(UCPI_UBH(ucpi
), ucpi
->c_freeoff
, blkno
)) {
184 ufs_error(sb
, "ufs_free_blocks", "freeing free fragment");
186 ubh_setblock(UCPI_UBH(ucpi
), ucpi
->c_freeoff
, blkno
);
187 inode_sub_bytes(inode
, uspi
->s_fpb
<< uspi
->s_fshift
);
188 if ((UFS_SB(sb
)->s_flags
& UFS_CG_MASK
) == UFS_CG_44BSD
)
189 ufs_clusteracct (sb
, ucpi
, blkno
, 1);
191 fs32_add(sb
, &ucg
->cg_cs
.cs_nbfree
, 1);
192 uspi
->cs_total
.cs_nbfree
++;
193 fs32_add(sb
, &UFS_SB(sb
)->fs_cs(cgno
).cs_nbfree
, 1);
195 if (uspi
->fs_magic
!= UFS2_MAGIC
) {
196 unsigned cylno
= ufs_cbtocylno(i
);
198 fs16_add(sb
, &ubh_cg_blks(ucpi
, cylno
,
199 ufs_cbtorpos(i
)), 1);
200 fs32_add(sb
, &ubh_cg_blktot(ucpi
, cylno
), 1);
204 ubh_mark_buffer_dirty (USPI_UBH(uspi
));
205 ubh_mark_buffer_dirty (UCPI_UBH(ucpi
));
206 if (sb
->s_flags
& MS_SYNCHRONOUS
)
207 ubh_sync_block(UCPI_UBH(ucpi
));
215 ufs_mark_sb_dirty(sb
);
216 mutex_unlock(&UFS_SB(sb
)->s_lock
);
221 mutex_unlock(&UFS_SB(sb
)->s_lock
);
223 UFSD("EXIT (FAILED)\n");
228 * Modify inode page cache in such way:
229 * have - blocks with b_blocknr equal to oldb...oldb+count-1
230 * get - blocks with b_blocknr equal to newb...newb+count-1
231 * also we suppose that oldb...oldb+count-1 blocks
232 * situated at the end of file.
234 * We can come here from ufs_writepage or ufs_prepare_write,
235 * locked_page is argument of these functions, so we already lock it.
237 static void ufs_change_blocknr(struct inode
*inode
, sector_t beg
,
238 unsigned int count
, sector_t oldb
,
239 sector_t newb
, struct page
*locked_page
)
241 const unsigned blks_per_page
=
242 1 << (PAGE_SHIFT
- inode
->i_blkbits
);
243 const unsigned mask
= blks_per_page
- 1;
244 struct address_space
* const mapping
= inode
->i_mapping
;
245 pgoff_t index
, cur_index
, last_index
;
246 unsigned pos
, j
, lblock
;
249 struct buffer_head
*head
, *bh
;
251 UFSD("ENTER, ino %lu, count %u, oldb %llu, newb %llu\n",
253 (unsigned long long)oldb
, (unsigned long long)newb
);
255 BUG_ON(!locked_page
);
256 BUG_ON(!PageLocked(locked_page
));
258 cur_index
= locked_page
->index
;
260 last_index
= end
>> (PAGE_SHIFT
- inode
->i_blkbits
);
261 for (i
= beg
; i
< end
; i
= (i
| mask
) + 1) {
262 index
= i
>> (PAGE_SHIFT
- inode
->i_blkbits
);
264 if (likely(cur_index
!= index
)) {
265 page
= ufs_get_locked_page(mapping
, index
);
266 if (!page
)/* it was truncated */
268 if (IS_ERR(page
)) {/* or EIO */
269 ufs_error(inode
->i_sb
, __func__
,
270 "read of page %llu failed\n",
271 (unsigned long long)index
);
277 head
= page_buffers(page
);
280 for (j
= 0; j
< pos
; ++j
)
281 bh
= bh
->b_this_page
;
284 if (unlikely(index
== last_index
))
287 lblock
= blks_per_page
;
294 if (!buffer_mapped(bh
))
295 map_bh(bh
, inode
->i_sb
, oldb
+ pos
);
296 if (!buffer_uptodate(bh
)) {
297 ll_rw_block(REQ_OP_READ
, 0, 1, &bh
);
299 if (!buffer_uptodate(bh
)) {
300 ufs_error(inode
->i_sb
, __func__
,
301 "read of block failed\n");
306 UFSD(" change from %llu to %llu, pos %u\n",
307 (unsigned long long)(pos
+ oldb
),
308 (unsigned long long)(pos
+ newb
), pos
);
310 bh
->b_blocknr
= newb
+ pos
;
311 unmap_underlying_metadata(bh
->b_bdev
,
313 mark_buffer_dirty(bh
);
315 bh
= bh
->b_this_page
;
316 } while (bh
!= head
);
318 if (likely(cur_index
!= index
))
319 ufs_put_locked_page(page
);
324 static void ufs_clear_frags(struct inode
*inode
, sector_t beg
, unsigned int n
,
327 struct buffer_head
*bh
;
328 sector_t end
= beg
+ n
;
330 for (; beg
< end
; ++beg
) {
331 bh
= sb_getblk(inode
->i_sb
, beg
);
333 memset(bh
->b_data
, 0, inode
->i_sb
->s_blocksize
);
334 set_buffer_uptodate(bh
);
335 mark_buffer_dirty(bh
);
337 if (IS_SYNC(inode
) || sync
)
338 sync_dirty_buffer(bh
);
343 u64
ufs_new_fragments(struct inode
*inode
, void *p
, u64 fragment
,
344 u64 goal
, unsigned count
, int *err
,
345 struct page
*locked_page
)
347 struct super_block
* sb
;
348 struct ufs_sb_private_info
* uspi
;
349 struct ufs_super_block_first
* usb1
;
350 unsigned cgno
, oldcount
, newcount
;
351 u64 tmp
, request
, result
;
353 UFSD("ENTER, ino %lu, fragment %llu, goal %llu, count %u\n",
354 inode
->i_ino
, (unsigned long long)fragment
,
355 (unsigned long long)goal
, count
);
358 uspi
= UFS_SB(sb
)->s_uspi
;
359 usb1
= ubh_get_usb_first(uspi
);
362 mutex_lock(&UFS_SB(sb
)->s_lock
);
363 tmp
= ufs_data_ptr_to_cpu(sb
, p
);
365 if (count
+ ufs_fragnum(fragment
) > uspi
->s_fpb
) {
366 ufs_warning(sb
, "ufs_new_fragments", "internal warning"
367 " fragment %llu, count %u",
368 (unsigned long long)fragment
, count
);
369 count
= uspi
->s_fpb
- ufs_fragnum(fragment
);
371 oldcount
= ufs_fragnum (fragment
);
372 newcount
= oldcount
+ count
;
375 * Somebody else has just allocated our fragments
379 ufs_error(sb
, "ufs_new_fragments", "internal error, "
380 "fragment %llu, tmp %llu\n",
381 (unsigned long long)fragment
,
382 (unsigned long long)tmp
);
383 mutex_unlock(&UFS_SB(sb
)->s_lock
);
386 if (fragment
< UFS_I(inode
)->i_lastfrag
) {
387 UFSD("EXIT (ALREADY ALLOCATED)\n");
388 mutex_unlock(&UFS_SB(sb
)->s_lock
);
394 UFSD("EXIT (ALREADY ALLOCATED)\n");
395 mutex_unlock(&UFS_SB(sb
)->s_lock
);
401 * There is not enough space for user on the device
403 if (!capable(CAP_SYS_RESOURCE
) && ufs_freespace(uspi
, UFS_MINFREE
) <= 0) {
404 mutex_unlock(&UFS_SB(sb
)->s_lock
);
405 UFSD("EXIT (FAILED)\n");
409 if (goal
>= uspi
->s_size
)
412 cgno
= ufs_inotocg (inode
->i_ino
);
414 cgno
= ufs_dtog(uspi
, goal
);
417 * allocate new fragment
420 result
= ufs_alloc_fragments (inode
, cgno
, goal
, count
, err
);
422 ufs_clear_frags(inode
, result
+ oldcount
,
423 newcount
- oldcount
, locked_page
!= NULL
);
424 write_seqlock(&UFS_I(inode
)->meta_lock
);
425 ufs_cpu_to_data_ptr(sb
, p
, result
);
426 write_sequnlock(&UFS_I(inode
)->meta_lock
);
428 UFS_I(inode
)->i_lastfrag
=
429 max(UFS_I(inode
)->i_lastfrag
, fragment
+ count
);
431 mutex_unlock(&UFS_SB(sb
)->s_lock
);
432 UFSD("EXIT, result %llu\n", (unsigned long long)result
);
439 result
= ufs_add_fragments(inode
, tmp
, oldcount
, newcount
);
442 UFS_I(inode
)->i_lastfrag
= max(UFS_I(inode
)->i_lastfrag
,
444 ufs_clear_frags(inode
, result
+ oldcount
, newcount
- oldcount
,
445 locked_page
!= NULL
);
446 mutex_unlock(&UFS_SB(sb
)->s_lock
);
447 UFSD("EXIT, result %llu\n", (unsigned long long)result
);
452 * allocate new block and move data
454 switch (fs32_to_cpu(sb
, usb1
->fs_optim
)) {
457 if (uspi
->s_minfree
< 5 || uspi
->cs_total
.cs_nffree
458 > uspi
->s_dsize
* uspi
->s_minfree
/ (2 * 100))
460 usb1
->fs_optim
= cpu_to_fs32(sb
, UFS_OPTTIME
);
463 usb1
->fs_optim
= cpu_to_fs32(sb
, UFS_OPTTIME
);
466 request
= uspi
->s_fpb
;
467 if (uspi
->cs_total
.cs_nffree
< uspi
->s_dsize
*
468 (uspi
->s_minfree
- 2) / 100)
470 usb1
->fs_optim
= cpu_to_fs32(sb
, UFS_OPTTIME
);
473 result
= ufs_alloc_fragments (inode
, cgno
, goal
, request
, err
);
475 ufs_clear_frags(inode
, result
+ oldcount
, newcount
- oldcount
,
476 locked_page
!= NULL
);
477 ufs_change_blocknr(inode
, fragment
- oldcount
, oldcount
,
478 uspi
->s_sbbase
+ tmp
,
479 uspi
->s_sbbase
+ result
, locked_page
);
480 write_seqlock(&UFS_I(inode
)->meta_lock
);
481 ufs_cpu_to_data_ptr(sb
, p
, result
);
482 write_sequnlock(&UFS_I(inode
)->meta_lock
);
484 UFS_I(inode
)->i_lastfrag
= max(UFS_I(inode
)->i_lastfrag
,
486 mutex_unlock(&UFS_SB(sb
)->s_lock
);
487 if (newcount
< request
)
488 ufs_free_fragments (inode
, result
+ newcount
, request
- newcount
);
489 ufs_free_fragments (inode
, tmp
, oldcount
);
490 UFSD("EXIT, result %llu\n", (unsigned long long)result
);
494 mutex_unlock(&UFS_SB(sb
)->s_lock
);
495 UFSD("EXIT (FAILED)\n");
499 static bool try_add_frags(struct inode
*inode
, unsigned frags
)
501 unsigned size
= frags
* i_blocksize(inode
);
502 spin_lock(&inode
->i_lock
);
503 __inode_add_bytes(inode
, size
);
504 if (unlikely((u32
)inode
->i_blocks
!= inode
->i_blocks
)) {
505 __inode_sub_bytes(inode
, size
);
506 spin_unlock(&inode
->i_lock
);
509 spin_unlock(&inode
->i_lock
);
513 static u64
ufs_add_fragments(struct inode
*inode
, u64 fragment
,
514 unsigned oldcount
, unsigned newcount
)
516 struct super_block
* sb
;
517 struct ufs_sb_private_info
* uspi
;
518 struct ufs_cg_private_info
* ucpi
;
519 struct ufs_cylinder_group
* ucg
;
520 unsigned cgno
, fragno
, fragoff
, count
, fragsize
, i
;
522 UFSD("ENTER, fragment %llu, oldcount %u, newcount %u\n",
523 (unsigned long long)fragment
, oldcount
, newcount
);
526 uspi
= UFS_SB(sb
)->s_uspi
;
527 count
= newcount
- oldcount
;
529 cgno
= ufs_dtog(uspi
, fragment
);
530 if (fs32_to_cpu(sb
, UFS_SB(sb
)->fs_cs(cgno
).cs_nffree
) < count
)
532 if ((ufs_fragnum (fragment
) + newcount
) > uspi
->s_fpb
)
534 ucpi
= ufs_load_cylinder (sb
, cgno
);
537 ucg
= ubh_get_ucg (UCPI_UBH(ucpi
));
538 if (!ufs_cg_chkmagic(sb
, ucg
)) {
539 ufs_panic (sb
, "ufs_add_fragments",
540 "internal error, bad magic number on cg %u", cgno
);
544 fragno
= ufs_dtogd(uspi
, fragment
);
545 fragoff
= ufs_fragnum (fragno
);
546 for (i
= oldcount
; i
< newcount
; i
++)
547 if (ubh_isclr (UCPI_UBH(ucpi
), ucpi
->c_freeoff
, fragno
+ i
))
550 if (!try_add_frags(inode
, count
))
553 * Block can be extended
555 ucg
->cg_time
= cpu_to_fs32(sb
, get_seconds());
556 for (i
= newcount
; i
< (uspi
->s_fpb
- fragoff
); i
++)
557 if (ubh_isclr (UCPI_UBH(ucpi
), ucpi
->c_freeoff
, fragno
+ i
))
559 fragsize
= i
- oldcount
;
560 if (!fs32_to_cpu(sb
, ucg
->cg_frsum
[fragsize
]))
561 ufs_panic (sb
, "ufs_add_fragments",
562 "internal error or corrupted bitmap on cg %u", cgno
);
563 fs32_sub(sb
, &ucg
->cg_frsum
[fragsize
], 1);
564 if (fragsize
!= count
)
565 fs32_add(sb
, &ucg
->cg_frsum
[fragsize
- count
], 1);
566 for (i
= oldcount
; i
< newcount
; i
++)
567 ubh_clrbit (UCPI_UBH(ucpi
), ucpi
->c_freeoff
, fragno
+ i
);
569 fs32_sub(sb
, &ucg
->cg_cs
.cs_nffree
, count
);
570 fs32_sub(sb
, &UFS_SB(sb
)->fs_cs(cgno
).cs_nffree
, count
);
571 uspi
->cs_total
.cs_nffree
-= count
;
573 ubh_mark_buffer_dirty (USPI_UBH(uspi
));
574 ubh_mark_buffer_dirty (UCPI_UBH(ucpi
));
575 if (sb
->s_flags
& MS_SYNCHRONOUS
)
576 ubh_sync_block(UCPI_UBH(ucpi
));
577 ufs_mark_sb_dirty(sb
);
579 UFSD("EXIT, fragment %llu\n", (unsigned long long)fragment
);
584 #define UFS_TEST_FREE_SPACE_CG \
585 ucg = (struct ufs_cylinder_group *) UFS_SB(sb)->s_ucg[cgno]->b_data; \
586 if (fs32_to_cpu(sb, ucg->cg_cs.cs_nbfree)) \
588 for (k = count; k < uspi->s_fpb; k++) \
589 if (fs32_to_cpu(sb, ucg->cg_frsum[k])) \
592 static u64
ufs_alloc_fragments(struct inode
*inode
, unsigned cgno
,
593 u64 goal
, unsigned count
, int *err
)
595 struct super_block
* sb
;
596 struct ufs_sb_private_info
* uspi
;
597 struct ufs_cg_private_info
* ucpi
;
598 struct ufs_cylinder_group
* ucg
;
599 unsigned oldcg
, i
, j
, k
, allocsize
;
602 UFSD("ENTER, ino %lu, cgno %u, goal %llu, count %u\n",
603 inode
->i_ino
, cgno
, (unsigned long long)goal
, count
);
606 uspi
= UFS_SB(sb
)->s_uspi
;
610 * 1. searching on preferred cylinder group
612 UFS_TEST_FREE_SPACE_CG
615 * 2. quadratic rehash
617 for (j
= 1; j
< uspi
->s_ncg
; j
*= 2) {
619 if (cgno
>= uspi
->s_ncg
)
621 UFS_TEST_FREE_SPACE_CG
625 * 3. brute force search
626 * We start at i = 2 ( 0 is checked at 1.step, 1 at 2.step )
628 cgno
= (oldcg
+ 1) % uspi
->s_ncg
;
629 for (j
= 2; j
< uspi
->s_ncg
; j
++) {
631 if (cgno
>= uspi
->s_ncg
)
633 UFS_TEST_FREE_SPACE_CG
636 UFSD("EXIT (FAILED)\n");
640 ucpi
= ufs_load_cylinder (sb
, cgno
);
643 ucg
= ubh_get_ucg (UCPI_UBH(ucpi
));
644 if (!ufs_cg_chkmagic(sb
, ucg
))
645 ufs_panic (sb
, "ufs_alloc_fragments",
646 "internal error, bad magic number on cg %u", cgno
);
647 ucg
->cg_time
= cpu_to_fs32(sb
, get_seconds());
649 if (count
== uspi
->s_fpb
) {
650 result
= ufs_alloccg_block (inode
, ucpi
, goal
, err
);
651 if (result
== INVBLOCK
)
656 for (allocsize
= count
; allocsize
< uspi
->s_fpb
; allocsize
++)
657 if (fs32_to_cpu(sb
, ucg
->cg_frsum
[allocsize
]) != 0)
660 if (allocsize
== uspi
->s_fpb
) {
661 result
= ufs_alloccg_block (inode
, ucpi
, goal
, err
);
662 if (result
== INVBLOCK
)
664 goal
= ufs_dtogd(uspi
, result
);
665 for (i
= count
; i
< uspi
->s_fpb
; i
++)
666 ubh_setbit (UCPI_UBH(ucpi
), ucpi
->c_freeoff
, goal
+ i
);
667 i
= uspi
->s_fpb
- count
;
669 inode_sub_bytes(inode
, i
<< uspi
->s_fshift
);
670 fs32_add(sb
, &ucg
->cg_cs
.cs_nffree
, i
);
671 uspi
->cs_total
.cs_nffree
+= i
;
672 fs32_add(sb
, &UFS_SB(sb
)->fs_cs(cgno
).cs_nffree
, i
);
673 fs32_add(sb
, &ucg
->cg_frsum
[i
], 1);
677 result
= ufs_bitmap_search (sb
, ucpi
, goal
, allocsize
);
678 if (result
== INVBLOCK
)
680 if (!try_add_frags(inode
, count
))
682 for (i
= 0; i
< count
; i
++)
683 ubh_clrbit (UCPI_UBH(ucpi
), ucpi
->c_freeoff
, result
+ i
);
685 fs32_sub(sb
, &ucg
->cg_cs
.cs_nffree
, count
);
686 uspi
->cs_total
.cs_nffree
-= count
;
687 fs32_sub(sb
, &UFS_SB(sb
)->fs_cs(cgno
).cs_nffree
, count
);
688 fs32_sub(sb
, &ucg
->cg_frsum
[allocsize
], 1);
690 if (count
!= allocsize
)
691 fs32_add(sb
, &ucg
->cg_frsum
[allocsize
- count
], 1);
694 ubh_mark_buffer_dirty (USPI_UBH(uspi
));
695 ubh_mark_buffer_dirty (UCPI_UBH(ucpi
));
696 if (sb
->s_flags
& MS_SYNCHRONOUS
)
697 ubh_sync_block(UCPI_UBH(ucpi
));
698 ufs_mark_sb_dirty(sb
);
700 result
+= cgno
* uspi
->s_fpg
;
701 UFSD("EXIT3, result %llu\n", (unsigned long long)result
);
705 static u64
ufs_alloccg_block(struct inode
*inode
,
706 struct ufs_cg_private_info
*ucpi
,
709 struct super_block
* sb
;
710 struct ufs_sb_private_info
* uspi
;
711 struct ufs_cylinder_group
* ucg
;
714 UFSD("ENTER, goal %llu\n", (unsigned long long)goal
);
717 uspi
= UFS_SB(sb
)->s_uspi
;
718 ucg
= ubh_get_ucg(UCPI_UBH(ucpi
));
721 goal
= ucpi
->c_rotor
;
724 goal
= ufs_blknum (goal
);
725 goal
= ufs_dtogd(uspi
, goal
);
728 * If the requested block is available, use it.
730 if (ubh_isblockset(UCPI_UBH(ucpi
), ucpi
->c_freeoff
, ufs_fragstoblks(goal
))) {
736 result
= ufs_bitmap_search (sb
, ucpi
, goal
, uspi
->s_fpb
);
737 if (result
== INVBLOCK
)
739 ucpi
->c_rotor
= result
;
741 if (!try_add_frags(inode
, uspi
->s_fpb
))
743 blkno
= ufs_fragstoblks(result
);
744 ubh_clrblock (UCPI_UBH(ucpi
), ucpi
->c_freeoff
, blkno
);
745 if ((UFS_SB(sb
)->s_flags
& UFS_CG_MASK
) == UFS_CG_44BSD
)
746 ufs_clusteracct (sb
, ucpi
, blkno
, -1);
748 fs32_sub(sb
, &ucg
->cg_cs
.cs_nbfree
, 1);
749 uspi
->cs_total
.cs_nbfree
--;
750 fs32_sub(sb
, &UFS_SB(sb
)->fs_cs(ucpi
->c_cgx
).cs_nbfree
, 1);
752 if (uspi
->fs_magic
!= UFS2_MAGIC
) {
753 unsigned cylno
= ufs_cbtocylno((unsigned)result
);
755 fs16_sub(sb
, &ubh_cg_blks(ucpi
, cylno
,
756 ufs_cbtorpos((unsigned)result
)), 1);
757 fs32_sub(sb
, &ubh_cg_blktot(ucpi
, cylno
), 1);
760 UFSD("EXIT, result %llu\n", (unsigned long long)result
);
765 static unsigned ubh_scanc(struct ufs_sb_private_info
*uspi
,
766 struct ufs_buffer_head
*ubh
,
767 unsigned begin
, unsigned size
,
768 unsigned char *table
, unsigned char mask
)
770 unsigned rest
, offset
;
774 offset
= begin
& ~uspi
->s_fmask
;
775 begin
>>= uspi
->s_fshift
;
777 if ((offset
+ size
) < uspi
->s_fsize
)
780 rest
= uspi
->s_fsize
- offset
;
782 cp
= ubh
->bh
[begin
]->b_data
+ offset
;
783 while ((table
[*cp
++] & mask
) == 0 && --rest
)
790 return (size
+ rest
);
794 * Find a block of the specified size in the specified cylinder group.
795 * @sp: pointer to super block
796 * @ucpi: pointer to cylinder group info
797 * @goal: near which block we want find new one
798 * @count: specified size
800 static u64
ufs_bitmap_search(struct super_block
*sb
,
801 struct ufs_cg_private_info
*ucpi
,
802 u64 goal
, unsigned count
)
805 * Bit patterns for identifying fragments in the block map
806 * used as ((map & mask_arr) == want_arr)
808 static const int mask_arr
[9] = {
809 0x3, 0x7, 0xf, 0x1f, 0x3f, 0x7f, 0xff, 0x1ff, 0x3ff
811 static const int want_arr
[9] = {
812 0x0, 0x2, 0x6, 0xe, 0x1e, 0x3e, 0x7e, 0xfe, 0x1fe
814 struct ufs_sb_private_info
*uspi
= UFS_SB(sb
)->s_uspi
;
815 unsigned start
, length
, loc
;
816 unsigned pos
, want
, blockmap
, mask
, end
;
819 UFSD("ENTER, cg %u, goal %llu, count %u\n", ucpi
->c_cgx
,
820 (unsigned long long)goal
, count
);
823 start
= ufs_dtogd(uspi
, goal
) >> 3;
825 start
= ucpi
->c_frotor
>> 3;
827 length
= ((uspi
->s_fpg
+ 7) >> 3) - start
;
828 loc
= ubh_scanc(uspi
, UCPI_UBH(ucpi
), ucpi
->c_freeoff
+ start
, length
,
829 (uspi
->s_fpb
== 8) ? ufs_fragtable_8fpb
: ufs_fragtable_other
,
830 1 << (count
- 1 + (uspi
->s_fpb
& 7)));
833 loc
= ubh_scanc(uspi
, UCPI_UBH(ucpi
), ucpi
->c_freeoff
, length
,
834 (uspi
->s_fpb
== 8) ? ufs_fragtable_8fpb
:
836 1 << (count
- 1 + (uspi
->s_fpb
& 7)));
838 ufs_error(sb
, "ufs_bitmap_search",
839 "bitmap corrupted on cg %u, start %u,"
840 " length %u, count %u, freeoff %u\n",
841 ucpi
->c_cgx
, start
, length
, count
,
847 result
= (start
+ length
- loc
) << 3;
848 ucpi
->c_frotor
= result
;
851 * found the byte in the map
854 for (end
= result
+ 8; result
< end
; result
+= uspi
->s_fpb
) {
855 blockmap
= ubh_blkmap(UCPI_UBH(ucpi
), ucpi
->c_freeoff
, result
);
857 mask
= mask_arr
[count
];
858 want
= want_arr
[count
];
859 for (pos
= 0; pos
<= uspi
->s_fpb
- count
; pos
++) {
860 if ((blockmap
& mask
) == want
) {
861 UFSD("EXIT, result %llu\n",
862 (unsigned long long)result
);
870 ufs_error(sb
, "ufs_bitmap_search", "block not in map on cg %u\n",
872 UFSD("EXIT (FAILED)\n");
876 static void ufs_clusteracct(struct super_block
* sb
,
877 struct ufs_cg_private_info
* ucpi
, unsigned blkno
, int cnt
)
879 struct ufs_sb_private_info
* uspi
;
880 int i
, start
, end
, forw
, back
;
882 uspi
= UFS_SB(sb
)->s_uspi
;
883 if (uspi
->s_contigsumsize
<= 0)
887 ubh_setbit(UCPI_UBH(ucpi
), ucpi
->c_clusteroff
, blkno
);
889 ubh_clrbit(UCPI_UBH(ucpi
), ucpi
->c_clusteroff
, blkno
);
892 * Find the size of the cluster going forward.
895 end
= start
+ uspi
->s_contigsumsize
;
896 if ( end
>= ucpi
->c_nclusterblks
)
897 end
= ucpi
->c_nclusterblks
;
898 i
= ubh_find_next_zero_bit (UCPI_UBH(ucpi
), ucpi
->c_clusteroff
, end
, start
);
904 * Find the size of the cluster going backward.
907 end
= start
- uspi
->s_contigsumsize
;
910 i
= ubh_find_last_zero_bit (UCPI_UBH(ucpi
), ucpi
->c_clusteroff
, start
, end
);
916 * Account for old cluster and the possibly new forward and
920 if (i
> uspi
->s_contigsumsize
)
921 i
= uspi
->s_contigsumsize
;
922 fs32_add(sb
, (__fs32
*)ubh_get_addr(UCPI_UBH(ucpi
), ucpi
->c_clustersumoff
+ (i
<< 2)), cnt
);
924 fs32_sub(sb
, (__fs32
*)ubh_get_addr(UCPI_UBH(ucpi
), ucpi
->c_clustersumoff
+ (back
<< 2)), cnt
);
926 fs32_sub(sb
, (__fs32
*)ubh_get_addr(UCPI_UBH(ucpi
), ucpi
->c_clustersumoff
+ (forw
<< 2)), cnt
);
930 static unsigned char ufs_fragtable_8fpb
[] = {
931 0x00, 0x01, 0x01, 0x02, 0x01, 0x01, 0x02, 0x04, 0x01, 0x01, 0x01, 0x03, 0x02, 0x03, 0x04, 0x08,
932 0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x02, 0x03, 0x03, 0x02, 0x04, 0x05, 0x08, 0x10,
933 0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x01, 0x01, 0x01, 0x03, 0x03, 0x03, 0x05, 0x09,
934 0x02, 0x03, 0x03, 0x02, 0x03, 0x03, 0x02, 0x06, 0x04, 0x05, 0x05, 0x06, 0x08, 0x09, 0x10, 0x20,
935 0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x01, 0x01, 0x01, 0x03, 0x03, 0x03, 0x05, 0x09,
936 0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x03, 0x03, 0x03, 0x03, 0x05, 0x05, 0x09, 0x11,
937 0x02, 0x03, 0x03, 0x02, 0x03, 0x03, 0x02, 0x06, 0x03, 0x03, 0x03, 0x03, 0x02, 0x03, 0x06, 0x0A,
938 0x04, 0x05, 0x05, 0x06, 0x05, 0x05, 0x06, 0x04, 0x08, 0x09, 0x09, 0x0A, 0x10, 0x11, 0x20, 0x40,
939 0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x01, 0x01, 0x01, 0x03, 0x03, 0x03, 0x05, 0x09,
940 0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x03, 0x03, 0x03, 0x03, 0x05, 0x05, 0x09, 0x11,
941 0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x01, 0x01, 0x01, 0x03, 0x03, 0x03, 0x05, 0x09,
942 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x07, 0x05, 0x05, 0x05, 0x07, 0x09, 0x09, 0x11, 0x21,
943 0x02, 0x03, 0x03, 0x02, 0x03, 0x03, 0x02, 0x06, 0x03, 0x03, 0x03, 0x03, 0x02, 0x03, 0x06, 0x0A,
944 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x07, 0x02, 0x03, 0x03, 0x02, 0x06, 0x07, 0x0A, 0x12,
945 0x04, 0x05, 0x05, 0x06, 0x05, 0x05, 0x06, 0x04, 0x05, 0x05, 0x05, 0x07, 0x06, 0x07, 0x04, 0x0C,
946 0x08, 0x09, 0x09, 0x0A, 0x09, 0x09, 0x0A, 0x0C, 0x10, 0x11, 0x11, 0x12, 0x20, 0x21, 0x40, 0x80,
949 static unsigned char ufs_fragtable_other
[] = {
950 0x00, 0x16, 0x16, 0x2A, 0x16, 0x16, 0x26, 0x4E, 0x16, 0x16, 0x16, 0x3E, 0x2A, 0x3E, 0x4E, 0x8A,
951 0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
952 0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
953 0x2A, 0x3E, 0x3E, 0x2A, 0x3E, 0x3E, 0x2E, 0x6E, 0x3E, 0x3E, 0x3E, 0x3E, 0x2A, 0x3E, 0x6E, 0xAA,
954 0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
955 0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
956 0x26, 0x36, 0x36, 0x2E, 0x36, 0x36, 0x26, 0x6E, 0x36, 0x36, 0x36, 0x3E, 0x2E, 0x3E, 0x6E, 0xAE,
957 0x4E, 0x5E, 0x5E, 0x6E, 0x5E, 0x5E, 0x6E, 0x4E, 0x5E, 0x5E, 0x5E, 0x7E, 0x6E, 0x7E, 0x4E, 0xCE,
958 0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
959 0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
960 0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
961 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x7E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x7E, 0xBE,
962 0x2A, 0x3E, 0x3E, 0x2A, 0x3E, 0x3E, 0x2E, 0x6E, 0x3E, 0x3E, 0x3E, 0x3E, 0x2A, 0x3E, 0x6E, 0xAA,
963 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x7E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x7E, 0xBE,
964 0x4E, 0x5E, 0x5E, 0x6E, 0x5E, 0x5E, 0x6E, 0x4E, 0x5E, 0x5E, 0x5E, 0x7E, 0x6E, 0x7E, 0x4E, 0xCE,
965 0x8A, 0x9E, 0x9E, 0xAA, 0x9E, 0x9E, 0xAE, 0xCE, 0x9E, 0x9E, 0x9E, 0xBE, 0xAA, 0xBE, 0xCE, 0x8A,