2 * Copyright (C) 2004, OGAWA Hirofumi
3 * Released under GPL v2.
6 #include <linux/blkdev.h>
7 #include <linux/sched/signal.h>
10 struct fatent_operations
{
11 void (*ent_blocknr
)(struct super_block
*, int, int *, sector_t
*);
12 void (*ent_set_ptr
)(struct fat_entry
*, int);
13 int (*ent_bread
)(struct super_block
*, struct fat_entry
*,
15 int (*ent_get
)(struct fat_entry
*);
16 void (*ent_put
)(struct fat_entry
*, int);
17 int (*ent_next
)(struct fat_entry
*);
20 static DEFINE_SPINLOCK(fat12_entry_lock
);
22 static void fat12_ent_blocknr(struct super_block
*sb
, int entry
,
23 int *offset
, sector_t
*blocknr
)
25 struct msdos_sb_info
*sbi
= MSDOS_SB(sb
);
26 int bytes
= entry
+ (entry
>> 1);
27 WARN_ON(!fat_valid_entry(sbi
, entry
));
28 *offset
= bytes
& (sb
->s_blocksize
- 1);
29 *blocknr
= sbi
->fat_start
+ (bytes
>> sb
->s_blocksize_bits
);
32 static void fat_ent_blocknr(struct super_block
*sb
, int entry
,
33 int *offset
, sector_t
*blocknr
)
35 struct msdos_sb_info
*sbi
= MSDOS_SB(sb
);
36 int bytes
= (entry
<< sbi
->fatent_shift
);
37 WARN_ON(!fat_valid_entry(sbi
, entry
));
38 *offset
= bytes
& (sb
->s_blocksize
- 1);
39 *blocknr
= sbi
->fat_start
+ (bytes
>> sb
->s_blocksize_bits
);
42 static void fat12_ent_set_ptr(struct fat_entry
*fatent
, int offset
)
44 struct buffer_head
**bhs
= fatent
->bhs
;
45 if (fatent
->nr_bhs
== 1) {
46 WARN_ON(offset
>= (bhs
[0]->b_size
- 1));
47 fatent
->u
.ent12_p
[0] = bhs
[0]->b_data
+ offset
;
48 fatent
->u
.ent12_p
[1] = bhs
[0]->b_data
+ (offset
+ 1);
50 WARN_ON(offset
!= (bhs
[0]->b_size
- 1));
51 fatent
->u
.ent12_p
[0] = bhs
[0]->b_data
+ offset
;
52 fatent
->u
.ent12_p
[1] = bhs
[1]->b_data
;
56 static void fat16_ent_set_ptr(struct fat_entry
*fatent
, int offset
)
58 WARN_ON(offset
& (2 - 1));
59 fatent
->u
.ent16_p
= (__le16
*)(fatent
->bhs
[0]->b_data
+ offset
);
62 static void fat32_ent_set_ptr(struct fat_entry
*fatent
, int offset
)
64 WARN_ON(offset
& (4 - 1));
65 fatent
->u
.ent32_p
= (__le32
*)(fatent
->bhs
[0]->b_data
+ offset
);
68 static int fat12_ent_bread(struct super_block
*sb
, struct fat_entry
*fatent
,
69 int offset
, sector_t blocknr
)
71 struct buffer_head
**bhs
= fatent
->bhs
;
73 WARN_ON(blocknr
< MSDOS_SB(sb
)->fat_start
);
74 fatent
->fat_inode
= MSDOS_SB(sb
)->fat_inode
;
76 bhs
[0] = sb_bread(sb
, blocknr
);
80 if ((offset
+ 1) < sb
->s_blocksize
)
83 /* This entry is block boundary, it needs the next block */
85 bhs
[1] = sb_bread(sb
, blocknr
);
90 fat12_ent_set_ptr(fatent
, offset
);
96 fat_msg(sb
, KERN_ERR
, "FAT read failed (blocknr %llu)", (llu
)blocknr
);
100 static int fat_ent_bread(struct super_block
*sb
, struct fat_entry
*fatent
,
101 int offset
, sector_t blocknr
)
103 const struct fatent_operations
*ops
= MSDOS_SB(sb
)->fatent_ops
;
105 WARN_ON(blocknr
< MSDOS_SB(sb
)->fat_start
);
106 fatent
->fat_inode
= MSDOS_SB(sb
)->fat_inode
;
107 fatent
->bhs
[0] = sb_bread(sb
, blocknr
);
108 if (!fatent
->bhs
[0]) {
109 fat_msg(sb
, KERN_ERR
, "FAT read failed (blocknr %llu)",
114 ops
->ent_set_ptr(fatent
, offset
);
118 static int fat12_ent_get(struct fat_entry
*fatent
)
120 u8
**ent12_p
= fatent
->u
.ent12_p
;
123 spin_lock(&fat12_entry_lock
);
124 if (fatent
->entry
& 1)
125 next
= (*ent12_p
[0] >> 4) | (*ent12_p
[1] << 4);
127 next
= (*ent12_p
[1] << 8) | *ent12_p
[0];
128 spin_unlock(&fat12_entry_lock
);
131 if (next
>= BAD_FAT12
)
136 static int fat16_ent_get(struct fat_entry
*fatent
)
138 int next
= le16_to_cpu(*fatent
->u
.ent16_p
);
139 WARN_ON((unsigned long)fatent
->u
.ent16_p
& (2 - 1));
140 if (next
>= BAD_FAT16
)
145 static int fat32_ent_get(struct fat_entry
*fatent
)
147 int next
= le32_to_cpu(*fatent
->u
.ent32_p
) & 0x0fffffff;
148 WARN_ON((unsigned long)fatent
->u
.ent32_p
& (4 - 1));
149 if (next
>= BAD_FAT32
)
154 static void fat12_ent_put(struct fat_entry
*fatent
, int new)
156 u8
**ent12_p
= fatent
->u
.ent12_p
;
158 if (new == FAT_ENT_EOF
)
161 spin_lock(&fat12_entry_lock
);
162 if (fatent
->entry
& 1) {
163 *ent12_p
[0] = (new << 4) | (*ent12_p
[0] & 0x0f);
164 *ent12_p
[1] = new >> 4;
166 *ent12_p
[0] = new & 0xff;
167 *ent12_p
[1] = (*ent12_p
[1] & 0xf0) | (new >> 8);
169 spin_unlock(&fat12_entry_lock
);
171 mark_buffer_dirty_inode(fatent
->bhs
[0], fatent
->fat_inode
);
172 if (fatent
->nr_bhs
== 2)
173 mark_buffer_dirty_inode(fatent
->bhs
[1], fatent
->fat_inode
);
176 static void fat16_ent_put(struct fat_entry
*fatent
, int new)
178 if (new == FAT_ENT_EOF
)
181 *fatent
->u
.ent16_p
= cpu_to_le16(new);
182 mark_buffer_dirty_inode(fatent
->bhs
[0], fatent
->fat_inode
);
185 static void fat32_ent_put(struct fat_entry
*fatent
, int new)
187 WARN_ON(new & 0xf0000000);
188 new |= le32_to_cpu(*fatent
->u
.ent32_p
) & ~0x0fffffff;
189 *fatent
->u
.ent32_p
= cpu_to_le32(new);
190 mark_buffer_dirty_inode(fatent
->bhs
[0], fatent
->fat_inode
);
193 static int fat12_ent_next(struct fat_entry
*fatent
)
195 u8
**ent12_p
= fatent
->u
.ent12_p
;
196 struct buffer_head
**bhs
= fatent
->bhs
;
197 u8
*nextp
= ent12_p
[1] + 1 + (fatent
->entry
& 1);
200 if (fatent
->nr_bhs
== 1) {
201 WARN_ON(ent12_p
[0] > (u8
*)(bhs
[0]->b_data
+
202 (bhs
[0]->b_size
- 2)));
203 WARN_ON(ent12_p
[1] > (u8
*)(bhs
[0]->b_data
+
204 (bhs
[0]->b_size
- 1)));
205 if (nextp
< (u8
*)(bhs
[0]->b_data
+ (bhs
[0]->b_size
- 1))) {
206 ent12_p
[0] = nextp
- 1;
211 WARN_ON(ent12_p
[0] != (u8
*)(bhs
[0]->b_data
+
212 (bhs
[0]->b_size
- 1)));
213 WARN_ON(ent12_p
[1] != (u8
*)bhs
[1]->b_data
);
214 ent12_p
[0] = nextp
- 1;
226 static int fat16_ent_next(struct fat_entry
*fatent
)
228 const struct buffer_head
*bh
= fatent
->bhs
[0];
230 if (fatent
->u
.ent16_p
< (__le16
*)(bh
->b_data
+ (bh
->b_size
- 2))) {
234 fatent
->u
.ent16_p
= NULL
;
238 static int fat32_ent_next(struct fat_entry
*fatent
)
240 const struct buffer_head
*bh
= fatent
->bhs
[0];
242 if (fatent
->u
.ent32_p
< (__le32
*)(bh
->b_data
+ (bh
->b_size
- 4))) {
246 fatent
->u
.ent32_p
= NULL
;
250 static const struct fatent_operations fat12_ops
= {
251 .ent_blocknr
= fat12_ent_blocknr
,
252 .ent_set_ptr
= fat12_ent_set_ptr
,
253 .ent_bread
= fat12_ent_bread
,
254 .ent_get
= fat12_ent_get
,
255 .ent_put
= fat12_ent_put
,
256 .ent_next
= fat12_ent_next
,
259 static const struct fatent_operations fat16_ops
= {
260 .ent_blocknr
= fat_ent_blocknr
,
261 .ent_set_ptr
= fat16_ent_set_ptr
,
262 .ent_bread
= fat_ent_bread
,
263 .ent_get
= fat16_ent_get
,
264 .ent_put
= fat16_ent_put
,
265 .ent_next
= fat16_ent_next
,
268 static const struct fatent_operations fat32_ops
= {
269 .ent_blocknr
= fat_ent_blocknr
,
270 .ent_set_ptr
= fat32_ent_set_ptr
,
271 .ent_bread
= fat_ent_bread
,
272 .ent_get
= fat32_ent_get
,
273 .ent_put
= fat32_ent_put
,
274 .ent_next
= fat32_ent_next
,
277 static inline void lock_fat(struct msdos_sb_info
*sbi
)
279 mutex_lock(&sbi
->fat_lock
);
282 static inline void unlock_fat(struct msdos_sb_info
*sbi
)
284 mutex_unlock(&sbi
->fat_lock
);
287 void fat_ent_access_init(struct super_block
*sb
)
289 struct msdos_sb_info
*sbi
= MSDOS_SB(sb
);
291 mutex_init(&sbi
->fat_lock
);
293 switch (sbi
->fat_bits
) {
295 sbi
->fatent_shift
= 2;
296 sbi
->fatent_ops
= &fat32_ops
;
299 sbi
->fatent_shift
= 1;
300 sbi
->fatent_ops
= &fat16_ops
;
303 sbi
->fatent_shift
= -1;
304 sbi
->fatent_ops
= &fat12_ops
;
309 static void mark_fsinfo_dirty(struct super_block
*sb
)
311 struct msdos_sb_info
*sbi
= MSDOS_SB(sb
);
313 if (sb_rdonly(sb
) || sbi
->fat_bits
!= 32)
316 __mark_inode_dirty(sbi
->fsinfo_inode
, I_DIRTY_SYNC
);
319 static inline int fat_ent_update_ptr(struct super_block
*sb
,
320 struct fat_entry
*fatent
,
321 int offset
, sector_t blocknr
)
323 struct msdos_sb_info
*sbi
= MSDOS_SB(sb
);
324 const struct fatent_operations
*ops
= sbi
->fatent_ops
;
325 struct buffer_head
**bhs
= fatent
->bhs
;
327 /* Is this fatent's blocks including this entry? */
328 if (!fatent
->nr_bhs
|| bhs
[0]->b_blocknr
!= blocknr
)
330 if (sbi
->fat_bits
== 12) {
331 if ((offset
+ 1) < sb
->s_blocksize
) {
332 /* This entry is on bhs[0]. */
333 if (fatent
->nr_bhs
== 2) {
338 /* This entry needs the next block. */
339 if (fatent
->nr_bhs
!= 2)
341 if (bhs
[1]->b_blocknr
!= (blocknr
+ 1))
345 ops
->ent_set_ptr(fatent
, offset
);
349 int fat_ent_read(struct inode
*inode
, struct fat_entry
*fatent
, int entry
)
351 struct super_block
*sb
= inode
->i_sb
;
352 struct msdos_sb_info
*sbi
= MSDOS_SB(inode
->i_sb
);
353 const struct fatent_operations
*ops
= sbi
->fatent_ops
;
357 if (!fat_valid_entry(sbi
, entry
)) {
358 fatent_brelse(fatent
);
359 fat_fs_error(sb
, "invalid access to FAT (entry 0x%08x)", entry
);
363 fatent_set_entry(fatent
, entry
);
364 ops
->ent_blocknr(sb
, entry
, &offset
, &blocknr
);
366 if (!fat_ent_update_ptr(sb
, fatent
, offset
, blocknr
)) {
367 fatent_brelse(fatent
);
368 err
= ops
->ent_bread(sb
, fatent
, offset
, blocknr
);
372 return ops
->ent_get(fatent
);
375 /* FIXME: We can write the blocks as more big chunk. */
376 static int fat_mirror_bhs(struct super_block
*sb
, struct buffer_head
**bhs
,
379 struct msdos_sb_info
*sbi
= MSDOS_SB(sb
);
380 struct buffer_head
*c_bh
;
384 for (copy
= 1; copy
< sbi
->fats
; copy
++) {
385 sector_t backup_fat
= sbi
->fat_length
* copy
;
387 for (n
= 0; n
< nr_bhs
; n
++) {
388 c_bh
= sb_getblk(sb
, backup_fat
+ bhs
[n
]->b_blocknr
);
393 /* Avoid race with userspace read via bdev */
395 memcpy(c_bh
->b_data
, bhs
[n
]->b_data
, sb
->s_blocksize
);
396 set_buffer_uptodate(c_bh
);
398 mark_buffer_dirty_inode(c_bh
, sbi
->fat_inode
);
399 if (sb
->s_flags
& SB_SYNCHRONOUS
)
400 err
= sync_dirty_buffer(c_bh
);
410 int fat_ent_write(struct inode
*inode
, struct fat_entry
*fatent
,
413 struct super_block
*sb
= inode
->i_sb
;
414 const struct fatent_operations
*ops
= MSDOS_SB(sb
)->fatent_ops
;
417 ops
->ent_put(fatent
, new);
419 err
= fat_sync_bhs(fatent
->bhs
, fatent
->nr_bhs
);
423 return fat_mirror_bhs(sb
, fatent
->bhs
, fatent
->nr_bhs
);
426 static inline int fat_ent_next(struct msdos_sb_info
*sbi
,
427 struct fat_entry
*fatent
)
429 if (sbi
->fatent_ops
->ent_next(fatent
)) {
430 if (fatent
->entry
< sbi
->max_cluster
)
436 static inline int fat_ent_read_block(struct super_block
*sb
,
437 struct fat_entry
*fatent
)
439 const struct fatent_operations
*ops
= MSDOS_SB(sb
)->fatent_ops
;
443 fatent_brelse(fatent
);
444 ops
->ent_blocknr(sb
, fatent
->entry
, &offset
, &blocknr
);
445 return ops
->ent_bread(sb
, fatent
, offset
, blocknr
);
448 static void fat_collect_bhs(struct buffer_head
**bhs
, int *nr_bhs
,
449 struct fat_entry
*fatent
)
453 for (n
= 0; n
< fatent
->nr_bhs
; n
++) {
454 for (i
= 0; i
< *nr_bhs
; i
++) {
455 if (fatent
->bhs
[n
] == bhs
[i
])
459 get_bh(fatent
->bhs
[n
]);
460 bhs
[i
] = fatent
->bhs
[n
];
466 int fat_alloc_clusters(struct inode
*inode
, int *cluster
, int nr_cluster
)
468 struct super_block
*sb
= inode
->i_sb
;
469 struct msdos_sb_info
*sbi
= MSDOS_SB(sb
);
470 const struct fatent_operations
*ops
= sbi
->fatent_ops
;
471 struct fat_entry fatent
, prev_ent
;
472 struct buffer_head
*bhs
[MAX_BUF_PER_PAGE
];
473 int i
, count
, err
, nr_bhs
, idx_clus
;
475 BUG_ON(nr_cluster
> (MAX_BUF_PER_PAGE
/ 2)); /* fixed limit */
478 if (sbi
->free_clusters
!= -1 && sbi
->free_clus_valid
&&
479 sbi
->free_clusters
< nr_cluster
) {
484 err
= nr_bhs
= idx_clus
= 0;
485 count
= FAT_START_ENT
;
486 fatent_init(&prev_ent
);
487 fatent_init(&fatent
);
488 fatent_set_entry(&fatent
, sbi
->prev_free
+ 1);
489 while (count
< sbi
->max_cluster
) {
490 if (fatent
.entry
>= sbi
->max_cluster
)
491 fatent
.entry
= FAT_START_ENT
;
492 fatent_set_entry(&fatent
, fatent
.entry
);
493 err
= fat_ent_read_block(sb
, &fatent
);
497 /* Find the free entries in a block */
499 if (ops
->ent_get(&fatent
) == FAT_ENT_FREE
) {
500 int entry
= fatent
.entry
;
502 /* make the cluster chain */
503 ops
->ent_put(&fatent
, FAT_ENT_EOF
);
505 ops
->ent_put(&prev_ent
, entry
);
507 fat_collect_bhs(bhs
, &nr_bhs
, &fatent
);
509 sbi
->prev_free
= entry
;
510 if (sbi
->free_clusters
!= -1)
511 sbi
->free_clusters
--;
513 cluster
[idx_clus
] = entry
;
515 if (idx_clus
== nr_cluster
)
519 * fat_collect_bhs() gets ref-count of bhs,
520 * so we can still use the prev_ent.
525 if (count
== sbi
->max_cluster
)
527 } while (fat_ent_next(sbi
, &fatent
));
530 /* Couldn't allocate the free entries */
531 sbi
->free_clusters
= 0;
532 sbi
->free_clus_valid
= 1;
537 mark_fsinfo_dirty(sb
);
538 fatent_brelse(&fatent
);
540 if (inode_needs_sync(inode
))
541 err
= fat_sync_bhs(bhs
, nr_bhs
);
543 err
= fat_mirror_bhs(sb
, bhs
, nr_bhs
);
545 for (i
= 0; i
< nr_bhs
; i
++)
549 fat_free_clusters(inode
, cluster
[0]);
554 int fat_free_clusters(struct inode
*inode
, int cluster
)
556 struct super_block
*sb
= inode
->i_sb
;
557 struct msdos_sb_info
*sbi
= MSDOS_SB(sb
);
558 const struct fatent_operations
*ops
= sbi
->fatent_ops
;
559 struct fat_entry fatent
;
560 struct buffer_head
*bhs
[MAX_BUF_PER_PAGE
];
562 int first_cl
= cluster
, dirty_fsinfo
= 0;
565 fatent_init(&fatent
);
568 cluster
= fat_ent_read(inode
, &fatent
, cluster
);
572 } else if (cluster
== FAT_ENT_FREE
) {
573 fat_fs_error(sb
, "%s: deleting FAT entry beyond EOF",
579 if (sbi
->options
.discard
) {
581 * Issue discard for the sectors we no longer
582 * care about, batching contiguous clusters
585 if (cluster
!= fatent
.entry
+ 1) {
586 int nr_clus
= fatent
.entry
- first_cl
+ 1;
589 fat_clus_to_blknr(sbi
, first_cl
),
590 nr_clus
* sbi
->sec_per_clus
,
597 ops
->ent_put(&fatent
, FAT_ENT_FREE
);
598 if (sbi
->free_clusters
!= -1) {
599 sbi
->free_clusters
++;
603 if (nr_bhs
+ fatent
.nr_bhs
> MAX_BUF_PER_PAGE
) {
604 if (sb
->s_flags
& SB_SYNCHRONOUS
) {
605 err
= fat_sync_bhs(bhs
, nr_bhs
);
609 err
= fat_mirror_bhs(sb
, bhs
, nr_bhs
);
612 for (i
= 0; i
< nr_bhs
; i
++)
616 fat_collect_bhs(bhs
, &nr_bhs
, &fatent
);
617 } while (cluster
!= FAT_ENT_EOF
);
619 if (sb
->s_flags
& SB_SYNCHRONOUS
) {
620 err
= fat_sync_bhs(bhs
, nr_bhs
);
624 err
= fat_mirror_bhs(sb
, bhs
, nr_bhs
);
626 fatent_brelse(&fatent
);
627 for (i
= 0; i
< nr_bhs
; i
++)
631 mark_fsinfo_dirty(sb
);
635 EXPORT_SYMBOL_GPL(fat_free_clusters
);
637 /* 128kb is the whole sectors for FAT12 and FAT16 */
638 #define FAT_READA_SIZE (128 * 1024)
640 static void fat_ent_reada(struct super_block
*sb
, struct fat_entry
*fatent
,
641 unsigned long reada_blocks
)
643 const struct fatent_operations
*ops
= MSDOS_SB(sb
)->fatent_ops
;
647 ops
->ent_blocknr(sb
, fatent
->entry
, &offset
, &blocknr
);
649 for (i
= 0; i
< reada_blocks
; i
++)
650 sb_breadahead(sb
, blocknr
+ i
);
653 int fat_count_free_clusters(struct super_block
*sb
)
655 struct msdos_sb_info
*sbi
= MSDOS_SB(sb
);
656 const struct fatent_operations
*ops
= sbi
->fatent_ops
;
657 struct fat_entry fatent
;
658 unsigned long reada_blocks
, reada_mask
, cur_block
;
662 if (sbi
->free_clusters
!= -1 && sbi
->free_clus_valid
)
665 reada_blocks
= FAT_READA_SIZE
>> sb
->s_blocksize_bits
;
666 reada_mask
= reada_blocks
- 1;
670 fatent_init(&fatent
);
671 fatent_set_entry(&fatent
, FAT_START_ENT
);
672 while (fatent
.entry
< sbi
->max_cluster
) {
673 /* readahead of fat blocks */
674 if ((cur_block
& reada_mask
) == 0) {
675 unsigned long rest
= sbi
->fat_length
- cur_block
;
676 fat_ent_reada(sb
, &fatent
, min(reada_blocks
, rest
));
680 err
= fat_ent_read_block(sb
, &fatent
);
685 if (ops
->ent_get(&fatent
) == FAT_ENT_FREE
)
687 } while (fat_ent_next(sbi
, &fatent
));
690 sbi
->free_clusters
= free
;
691 sbi
->free_clus_valid
= 1;
692 mark_fsinfo_dirty(sb
);
693 fatent_brelse(&fatent
);
699 static int fat_trim_clusters(struct super_block
*sb
, u32 clus
, u32 nr_clus
)
701 struct msdos_sb_info
*sbi
= MSDOS_SB(sb
);
702 return sb_issue_discard(sb
, fat_clus_to_blknr(sbi
, clus
),
703 nr_clus
* sbi
->sec_per_clus
, GFP_NOFS
, 0);
706 int fat_trim_fs(struct inode
*inode
, struct fstrim_range
*range
)
708 struct super_block
*sb
= inode
->i_sb
;
709 struct msdos_sb_info
*sbi
= MSDOS_SB(sb
);
710 const struct fatent_operations
*ops
= sbi
->fatent_ops
;
711 struct fat_entry fatent
;
712 u64 ent_start
, ent_end
, minlen
, trimmed
= 0;
714 unsigned long reada_blocks
, reada_mask
, cur_block
= 0;
718 * FAT data is organized as clusters, trim at the granulary of cluster.
720 * fstrim_range is in byte, convert vaules to cluster index.
721 * Treat sectors before data region as all used, not to trim them.
723 ent_start
= max_t(u64
, range
->start
>>sbi
->cluster_bits
, FAT_START_ENT
);
724 ent_end
= ent_start
+ (range
->len
>> sbi
->cluster_bits
) - 1;
725 minlen
= range
->minlen
>> sbi
->cluster_bits
;
727 if (ent_start
>= sbi
->max_cluster
|| range
->len
< sbi
->cluster_size
)
729 if (ent_end
>= sbi
->max_cluster
)
730 ent_end
= sbi
->max_cluster
- 1;
732 reada_blocks
= FAT_READA_SIZE
>> sb
->s_blocksize_bits
;
733 reada_mask
= reada_blocks
- 1;
735 fatent_init(&fatent
);
737 fatent_set_entry(&fatent
, ent_start
);
738 while (fatent
.entry
<= ent_end
) {
739 /* readahead of fat blocks */
740 if ((cur_block
& reada_mask
) == 0) {
741 unsigned long rest
= sbi
->fat_length
- cur_block
;
742 fat_ent_reada(sb
, &fatent
, min(reada_blocks
, rest
));
746 err
= fat_ent_read_block(sb
, &fatent
);
750 if (ops
->ent_get(&fatent
) == FAT_ENT_FREE
) {
753 if (free
>= minlen
) {
754 u32 clus
= fatent
.entry
- free
;
756 err
= fat_trim_clusters(sb
, clus
, free
);
757 if (err
&& err
!= -EOPNOTSUPP
)
765 } while (fat_ent_next(sbi
, &fatent
) && fatent
.entry
<= ent_end
);
767 if (fatal_signal_pending(current
)) {
772 if (need_resched()) {
773 fatent_brelse(&fatent
);
779 /* handle scenario when tail entries are all free */
780 if (free
&& free
>= minlen
) {
781 u32 clus
= fatent
.entry
- free
;
783 err
= fat_trim_clusters(sb
, clus
, free
);
784 if (err
&& err
!= -EOPNOTSUPP
)
792 fatent_brelse(&fatent
);
795 range
->len
= trimmed
<< sbi
->cluster_bits
;