1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2004, OGAWA Hirofumi
6 #include <linux/blkdev.h>
7 #include <linux/sched/signal.h>
8 #include <linux/backing-dev-defs.h>
11 struct fatent_operations
{
12 void (*ent_blocknr
)(struct super_block
*, int, int *, sector_t
*);
13 void (*ent_set_ptr
)(struct fat_entry
*, int);
14 int (*ent_bread
)(struct super_block
*, struct fat_entry
*,
16 int (*ent_get
)(struct fat_entry
*);
17 void (*ent_put
)(struct fat_entry
*, int);
18 int (*ent_next
)(struct fat_entry
*);
21 static DEFINE_SPINLOCK(fat12_entry_lock
);
23 static void fat12_ent_blocknr(struct super_block
*sb
, int entry
,
24 int *offset
, sector_t
*blocknr
)
26 struct msdos_sb_info
*sbi
= MSDOS_SB(sb
);
27 int bytes
= entry
+ (entry
>> 1);
28 WARN_ON(!fat_valid_entry(sbi
, entry
));
29 *offset
= bytes
& (sb
->s_blocksize
- 1);
30 *blocknr
= sbi
->fat_start
+ (bytes
>> sb
->s_blocksize_bits
);
33 static void fat_ent_blocknr(struct super_block
*sb
, int entry
,
34 int *offset
, sector_t
*blocknr
)
36 struct msdos_sb_info
*sbi
= MSDOS_SB(sb
);
37 int bytes
= (entry
<< sbi
->fatent_shift
);
38 WARN_ON(!fat_valid_entry(sbi
, entry
));
39 *offset
= bytes
& (sb
->s_blocksize
- 1);
40 *blocknr
= sbi
->fat_start
+ (bytes
>> sb
->s_blocksize_bits
);
43 static void fat12_ent_set_ptr(struct fat_entry
*fatent
, int offset
)
45 struct buffer_head
**bhs
= fatent
->bhs
;
46 if (fatent
->nr_bhs
== 1) {
47 WARN_ON(offset
>= (bhs
[0]->b_size
- 1));
48 fatent
->u
.ent12_p
[0] = bhs
[0]->b_data
+ offset
;
49 fatent
->u
.ent12_p
[1] = bhs
[0]->b_data
+ (offset
+ 1);
51 WARN_ON(offset
!= (bhs
[0]->b_size
- 1));
52 fatent
->u
.ent12_p
[0] = bhs
[0]->b_data
+ offset
;
53 fatent
->u
.ent12_p
[1] = bhs
[1]->b_data
;
57 static void fat16_ent_set_ptr(struct fat_entry
*fatent
, int offset
)
59 WARN_ON(offset
& (2 - 1));
60 fatent
->u
.ent16_p
= (__le16
*)(fatent
->bhs
[0]->b_data
+ offset
);
63 static void fat32_ent_set_ptr(struct fat_entry
*fatent
, int offset
)
65 WARN_ON(offset
& (4 - 1));
66 fatent
->u
.ent32_p
= (__le32
*)(fatent
->bhs
[0]->b_data
+ offset
);
69 static int fat12_ent_bread(struct super_block
*sb
, struct fat_entry
*fatent
,
70 int offset
, sector_t blocknr
)
72 struct buffer_head
**bhs
= fatent
->bhs
;
74 WARN_ON(blocknr
< MSDOS_SB(sb
)->fat_start
);
75 fatent
->fat_inode
= MSDOS_SB(sb
)->fat_inode
;
77 bhs
[0] = sb_bread(sb
, blocknr
);
81 if ((offset
+ 1) < sb
->s_blocksize
)
84 /* This entry is block boundary, it needs the next block */
86 bhs
[1] = sb_bread(sb
, blocknr
);
91 fat12_ent_set_ptr(fatent
, offset
);
97 fat_msg_ratelimit(sb
, KERN_ERR
, "FAT read failed (blocknr %llu)",
102 static int fat_ent_bread(struct super_block
*sb
, struct fat_entry
*fatent
,
103 int offset
, sector_t blocknr
)
105 const struct fatent_operations
*ops
= MSDOS_SB(sb
)->fatent_ops
;
107 WARN_ON(blocknr
< MSDOS_SB(sb
)->fat_start
);
108 fatent
->fat_inode
= MSDOS_SB(sb
)->fat_inode
;
109 fatent
->bhs
[0] = sb_bread(sb
, blocknr
);
110 if (!fatent
->bhs
[0]) {
111 fat_msg_ratelimit(sb
, KERN_ERR
, "FAT read failed (blocknr %llu)",
116 ops
->ent_set_ptr(fatent
, offset
);
120 static int fat12_ent_get(struct fat_entry
*fatent
)
122 u8
**ent12_p
= fatent
->u
.ent12_p
;
125 spin_lock(&fat12_entry_lock
);
126 if (fatent
->entry
& 1)
127 next
= (*ent12_p
[0] >> 4) | (*ent12_p
[1] << 4);
129 next
= (*ent12_p
[1] << 8) | *ent12_p
[0];
130 spin_unlock(&fat12_entry_lock
);
133 if (next
>= BAD_FAT12
)
138 static int fat16_ent_get(struct fat_entry
*fatent
)
140 int next
= le16_to_cpu(*fatent
->u
.ent16_p
);
141 WARN_ON((unsigned long)fatent
->u
.ent16_p
& (2 - 1));
142 if (next
>= BAD_FAT16
)
147 static int fat32_ent_get(struct fat_entry
*fatent
)
149 int next
= le32_to_cpu(*fatent
->u
.ent32_p
) & 0x0fffffff;
150 WARN_ON((unsigned long)fatent
->u
.ent32_p
& (4 - 1));
151 if (next
>= BAD_FAT32
)
156 static void fat12_ent_put(struct fat_entry
*fatent
, int new)
158 u8
**ent12_p
= fatent
->u
.ent12_p
;
160 if (new == FAT_ENT_EOF
)
163 spin_lock(&fat12_entry_lock
);
164 if (fatent
->entry
& 1) {
165 *ent12_p
[0] = (new << 4) | (*ent12_p
[0] & 0x0f);
166 *ent12_p
[1] = new >> 4;
168 *ent12_p
[0] = new & 0xff;
169 *ent12_p
[1] = (*ent12_p
[1] & 0xf0) | (new >> 8);
171 spin_unlock(&fat12_entry_lock
);
173 mark_buffer_dirty_inode(fatent
->bhs
[0], fatent
->fat_inode
);
174 if (fatent
->nr_bhs
== 2)
175 mark_buffer_dirty_inode(fatent
->bhs
[1], fatent
->fat_inode
);
178 static void fat16_ent_put(struct fat_entry
*fatent
, int new)
180 if (new == FAT_ENT_EOF
)
183 *fatent
->u
.ent16_p
= cpu_to_le16(new);
184 mark_buffer_dirty_inode(fatent
->bhs
[0], fatent
->fat_inode
);
187 static void fat32_ent_put(struct fat_entry
*fatent
, int new)
189 WARN_ON(new & 0xf0000000);
190 new |= le32_to_cpu(*fatent
->u
.ent32_p
) & ~0x0fffffff;
191 *fatent
->u
.ent32_p
= cpu_to_le32(new);
192 mark_buffer_dirty_inode(fatent
->bhs
[0], fatent
->fat_inode
);
195 static int fat12_ent_next(struct fat_entry
*fatent
)
197 u8
**ent12_p
= fatent
->u
.ent12_p
;
198 struct buffer_head
**bhs
= fatent
->bhs
;
199 u8
*nextp
= ent12_p
[1] + 1 + (fatent
->entry
& 1);
202 if (fatent
->nr_bhs
== 1) {
203 WARN_ON(ent12_p
[0] > (u8
*)(bhs
[0]->b_data
+
204 (bhs
[0]->b_size
- 2)));
205 WARN_ON(ent12_p
[1] > (u8
*)(bhs
[0]->b_data
+
206 (bhs
[0]->b_size
- 1)));
207 if (nextp
< (u8
*)(bhs
[0]->b_data
+ (bhs
[0]->b_size
- 1))) {
208 ent12_p
[0] = nextp
- 1;
213 WARN_ON(ent12_p
[0] != (u8
*)(bhs
[0]->b_data
+
214 (bhs
[0]->b_size
- 1)));
215 WARN_ON(ent12_p
[1] != (u8
*)bhs
[1]->b_data
);
216 ent12_p
[0] = nextp
- 1;
228 static int fat16_ent_next(struct fat_entry
*fatent
)
230 const struct buffer_head
*bh
= fatent
->bhs
[0];
232 if (fatent
->u
.ent16_p
< (__le16
*)(bh
->b_data
+ (bh
->b_size
- 2))) {
236 fatent
->u
.ent16_p
= NULL
;
240 static int fat32_ent_next(struct fat_entry
*fatent
)
242 const struct buffer_head
*bh
= fatent
->bhs
[0];
244 if (fatent
->u
.ent32_p
< (__le32
*)(bh
->b_data
+ (bh
->b_size
- 4))) {
248 fatent
->u
.ent32_p
= NULL
;
252 static const struct fatent_operations fat12_ops
= {
253 .ent_blocknr
= fat12_ent_blocknr
,
254 .ent_set_ptr
= fat12_ent_set_ptr
,
255 .ent_bread
= fat12_ent_bread
,
256 .ent_get
= fat12_ent_get
,
257 .ent_put
= fat12_ent_put
,
258 .ent_next
= fat12_ent_next
,
261 static const struct fatent_operations fat16_ops
= {
262 .ent_blocknr
= fat_ent_blocknr
,
263 .ent_set_ptr
= fat16_ent_set_ptr
,
264 .ent_bread
= fat_ent_bread
,
265 .ent_get
= fat16_ent_get
,
266 .ent_put
= fat16_ent_put
,
267 .ent_next
= fat16_ent_next
,
270 static const struct fatent_operations fat32_ops
= {
271 .ent_blocknr
= fat_ent_blocknr
,
272 .ent_set_ptr
= fat32_ent_set_ptr
,
273 .ent_bread
= fat_ent_bread
,
274 .ent_get
= fat32_ent_get
,
275 .ent_put
= fat32_ent_put
,
276 .ent_next
= fat32_ent_next
,
279 static inline void lock_fat(struct msdos_sb_info
*sbi
)
281 mutex_lock(&sbi
->fat_lock
);
284 static inline void unlock_fat(struct msdos_sb_info
*sbi
)
286 mutex_unlock(&sbi
->fat_lock
);
289 void fat_ent_access_init(struct super_block
*sb
)
291 struct msdos_sb_info
*sbi
= MSDOS_SB(sb
);
293 mutex_init(&sbi
->fat_lock
);
296 sbi
->fatent_shift
= 2;
297 sbi
->fatent_ops
= &fat32_ops
;
298 } else if (is_fat16(sbi
)) {
299 sbi
->fatent_shift
= 1;
300 sbi
->fatent_ops
= &fat16_ops
;
301 } else if (is_fat12(sbi
)) {
302 sbi
->fatent_shift
= -1;
303 sbi
->fatent_ops
= &fat12_ops
;
305 fat_fs_error(sb
, "invalid FAT variant, %u bits", sbi
->fat_bits
);
309 static void mark_fsinfo_dirty(struct super_block
*sb
)
311 struct msdos_sb_info
*sbi
= MSDOS_SB(sb
);
313 if (sb_rdonly(sb
) || !is_fat32(sbi
))
316 __mark_inode_dirty(sbi
->fsinfo_inode
, I_DIRTY_SYNC
);
319 static inline int fat_ent_update_ptr(struct super_block
*sb
,
320 struct fat_entry
*fatent
,
321 int offset
, sector_t blocknr
)
323 struct msdos_sb_info
*sbi
= MSDOS_SB(sb
);
324 const struct fatent_operations
*ops
= sbi
->fatent_ops
;
325 struct buffer_head
**bhs
= fatent
->bhs
;
327 /* Is this fatent's blocks including this entry? */
328 if (!fatent
->nr_bhs
|| bhs
[0]->b_blocknr
!= blocknr
)
331 if ((offset
+ 1) < sb
->s_blocksize
) {
332 /* This entry is on bhs[0]. */
333 if (fatent
->nr_bhs
== 2) {
338 /* This entry needs the next block. */
339 if (fatent
->nr_bhs
!= 2)
341 if (bhs
[1]->b_blocknr
!= (blocknr
+ 1))
345 ops
->ent_set_ptr(fatent
, offset
);
349 int fat_ent_read(struct inode
*inode
, struct fat_entry
*fatent
, int entry
)
351 struct super_block
*sb
= inode
->i_sb
;
352 struct msdos_sb_info
*sbi
= MSDOS_SB(inode
->i_sb
);
353 const struct fatent_operations
*ops
= sbi
->fatent_ops
;
357 if (!fat_valid_entry(sbi
, entry
)) {
358 fatent_brelse(fatent
);
359 fat_fs_error(sb
, "invalid access to FAT (entry 0x%08x)", entry
);
363 fatent_set_entry(fatent
, entry
);
364 ops
->ent_blocknr(sb
, entry
, &offset
, &blocknr
);
366 if (!fat_ent_update_ptr(sb
, fatent
, offset
, blocknr
)) {
367 fatent_brelse(fatent
);
368 err
= ops
->ent_bread(sb
, fatent
, offset
, blocknr
);
372 return ops
->ent_get(fatent
);
375 /* FIXME: We can write the blocks as more big chunk. */
376 static int fat_mirror_bhs(struct super_block
*sb
, struct buffer_head
**bhs
,
379 struct msdos_sb_info
*sbi
= MSDOS_SB(sb
);
380 struct buffer_head
*c_bh
;
384 for (copy
= 1; copy
< sbi
->fats
; copy
++) {
385 sector_t backup_fat
= sbi
->fat_length
* copy
;
387 for (n
= 0; n
< nr_bhs
; n
++) {
388 c_bh
= sb_getblk(sb
, backup_fat
+ bhs
[n
]->b_blocknr
);
393 /* Avoid race with userspace read via bdev */
395 memcpy(c_bh
->b_data
, bhs
[n
]->b_data
, sb
->s_blocksize
);
396 set_buffer_uptodate(c_bh
);
398 mark_buffer_dirty_inode(c_bh
, sbi
->fat_inode
);
399 if (sb
->s_flags
& SB_SYNCHRONOUS
)
400 err
= sync_dirty_buffer(c_bh
);
410 int fat_ent_write(struct inode
*inode
, struct fat_entry
*fatent
,
413 struct super_block
*sb
= inode
->i_sb
;
414 const struct fatent_operations
*ops
= MSDOS_SB(sb
)->fatent_ops
;
417 ops
->ent_put(fatent
, new);
419 err
= fat_sync_bhs(fatent
->bhs
, fatent
->nr_bhs
);
423 return fat_mirror_bhs(sb
, fatent
->bhs
, fatent
->nr_bhs
);
426 static inline int fat_ent_next(struct msdos_sb_info
*sbi
,
427 struct fat_entry
*fatent
)
429 if (sbi
->fatent_ops
->ent_next(fatent
)) {
430 if (fatent
->entry
< sbi
->max_cluster
)
436 static inline int fat_ent_read_block(struct super_block
*sb
,
437 struct fat_entry
*fatent
)
439 const struct fatent_operations
*ops
= MSDOS_SB(sb
)->fatent_ops
;
443 fatent_brelse(fatent
);
444 ops
->ent_blocknr(sb
, fatent
->entry
, &offset
, &blocknr
);
445 return ops
->ent_bread(sb
, fatent
, offset
, blocknr
);
448 static void fat_collect_bhs(struct buffer_head
**bhs
, int *nr_bhs
,
449 struct fat_entry
*fatent
)
453 for (n
= 0; n
< fatent
->nr_bhs
; n
++) {
454 for (i
= 0; i
< *nr_bhs
; i
++) {
455 if (fatent
->bhs
[n
] == bhs
[i
])
459 get_bh(fatent
->bhs
[n
]);
460 bhs
[i
] = fatent
->bhs
[n
];
466 int fat_alloc_clusters(struct inode
*inode
, int *cluster
, int nr_cluster
)
468 struct super_block
*sb
= inode
->i_sb
;
469 struct msdos_sb_info
*sbi
= MSDOS_SB(sb
);
470 const struct fatent_operations
*ops
= sbi
->fatent_ops
;
471 struct fat_entry fatent
, prev_ent
;
472 struct buffer_head
*bhs
[MAX_BUF_PER_PAGE
];
473 int i
, count
, err
, nr_bhs
, idx_clus
;
475 BUG_ON(nr_cluster
> (MAX_BUF_PER_PAGE
/ 2)); /* fixed limit */
478 if (sbi
->free_clusters
!= -1 && sbi
->free_clus_valid
&&
479 sbi
->free_clusters
< nr_cluster
) {
484 err
= nr_bhs
= idx_clus
= 0;
485 count
= FAT_START_ENT
;
486 fatent_init(&prev_ent
);
487 fatent_init(&fatent
);
488 fatent_set_entry(&fatent
, sbi
->prev_free
+ 1);
489 while (count
< sbi
->max_cluster
) {
490 if (fatent
.entry
>= sbi
->max_cluster
)
491 fatent
.entry
= FAT_START_ENT
;
492 fatent_set_entry(&fatent
, fatent
.entry
);
493 err
= fat_ent_read_block(sb
, &fatent
);
497 /* Find the free entries in a block */
499 if (ops
->ent_get(&fatent
) == FAT_ENT_FREE
) {
500 int entry
= fatent
.entry
;
502 /* make the cluster chain */
503 ops
->ent_put(&fatent
, FAT_ENT_EOF
);
505 ops
->ent_put(&prev_ent
, entry
);
507 fat_collect_bhs(bhs
, &nr_bhs
, &fatent
);
509 sbi
->prev_free
= entry
;
510 if (sbi
->free_clusters
!= -1)
511 sbi
->free_clusters
--;
513 cluster
[idx_clus
] = entry
;
515 if (idx_clus
== nr_cluster
)
519 * fat_collect_bhs() gets ref-count of bhs,
520 * so we can still use the prev_ent.
525 if (count
== sbi
->max_cluster
)
527 } while (fat_ent_next(sbi
, &fatent
));
530 /* Couldn't allocate the free entries */
531 sbi
->free_clusters
= 0;
532 sbi
->free_clus_valid
= 1;
537 mark_fsinfo_dirty(sb
);
538 fatent_brelse(&fatent
);
540 if (inode_needs_sync(inode
))
541 err
= fat_sync_bhs(bhs
, nr_bhs
);
543 err
= fat_mirror_bhs(sb
, bhs
, nr_bhs
);
545 for (i
= 0; i
< nr_bhs
; i
++)
549 fat_free_clusters(inode
, cluster
[0]);
554 int fat_free_clusters(struct inode
*inode
, int cluster
)
556 struct super_block
*sb
= inode
->i_sb
;
557 struct msdos_sb_info
*sbi
= MSDOS_SB(sb
);
558 const struct fatent_operations
*ops
= sbi
->fatent_ops
;
559 struct fat_entry fatent
;
560 struct buffer_head
*bhs
[MAX_BUF_PER_PAGE
];
562 int first_cl
= cluster
, dirty_fsinfo
= 0;
565 fatent_init(&fatent
);
568 cluster
= fat_ent_read(inode
, &fatent
, cluster
);
572 } else if (cluster
== FAT_ENT_FREE
) {
573 fat_fs_error(sb
, "%s: deleting FAT entry beyond EOF",
579 if (sbi
->options
.discard
) {
581 * Issue discard for the sectors we no longer
582 * care about, batching contiguous clusters
585 if (cluster
!= fatent
.entry
+ 1) {
586 int nr_clus
= fatent
.entry
- first_cl
+ 1;
589 fat_clus_to_blknr(sbi
, first_cl
),
590 nr_clus
* sbi
->sec_per_clus
,
597 ops
->ent_put(&fatent
, FAT_ENT_FREE
);
598 if (sbi
->free_clusters
!= -1) {
599 sbi
->free_clusters
++;
603 if (nr_bhs
+ fatent
.nr_bhs
> MAX_BUF_PER_PAGE
) {
604 if (sb
->s_flags
& SB_SYNCHRONOUS
) {
605 err
= fat_sync_bhs(bhs
, nr_bhs
);
609 err
= fat_mirror_bhs(sb
, bhs
, nr_bhs
);
612 for (i
= 0; i
< nr_bhs
; i
++)
616 fat_collect_bhs(bhs
, &nr_bhs
, &fatent
);
617 } while (cluster
!= FAT_ENT_EOF
);
619 if (sb
->s_flags
& SB_SYNCHRONOUS
) {
620 err
= fat_sync_bhs(bhs
, nr_bhs
);
624 err
= fat_mirror_bhs(sb
, bhs
, nr_bhs
);
626 fatent_brelse(&fatent
);
627 for (i
= 0; i
< nr_bhs
; i
++)
631 mark_fsinfo_dirty(sb
);
635 EXPORT_SYMBOL_GPL(fat_free_clusters
);
641 unsigned int ra_blocks
;
647 static void fat_ra_init(struct super_block
*sb
, struct fatent_ra
*ra
,
648 struct fat_entry
*fatent
, int ent_limit
)
650 struct msdos_sb_info
*sbi
= MSDOS_SB(sb
);
651 const struct fatent_operations
*ops
= sbi
->fatent_ops
;
652 sector_t blocknr
, block_end
;
655 * This is the sequential read, so ra_pages * 2 (but try to
656 * align the optimal hardware IO size).
657 * [BTW, 128kb covers the whole sectors for FAT12 and FAT16]
659 unsigned long ra_pages
= sb
->s_bdi
->ra_pages
;
660 unsigned int reada_blocks
;
662 if (fatent
->entry
>= ent_limit
)
665 if (ra_pages
> sb
->s_bdi
->io_pages
)
666 ra_pages
= rounddown(ra_pages
, sb
->s_bdi
->io_pages
);
667 reada_blocks
= ra_pages
<< (PAGE_SHIFT
- sb
->s_blocksize_bits
+ 1);
669 /* Initialize the range for sequential read */
670 ops
->ent_blocknr(sb
, fatent
->entry
, &offset
, &blocknr
);
671 ops
->ent_blocknr(sb
, ent_limit
- 1, &offset
, &block_end
);
673 ra
->limit
= (block_end
+ 1) - blocknr
;
675 /* Advancing the window at half size */
676 ra
->ra_blocks
= reada_blocks
>> 1;
677 ra
->ra_advance
= ra
->cur
;
678 ra
->ra_next
= ra
->cur
;
679 ra
->ra_limit
= ra
->cur
+ min_t(sector_t
, reada_blocks
, ra
->limit
);
682 /* Assuming to be called before reading a new block (increments ->cur). */
683 static void fat_ent_reada(struct super_block
*sb
, struct fatent_ra
*ra
,
684 struct fat_entry
*fatent
)
686 if (ra
->ra_next
>= ra
->ra_limit
)
689 if (ra
->cur
>= ra
->ra_advance
) {
690 struct msdos_sb_info
*sbi
= MSDOS_SB(sb
);
691 const struct fatent_operations
*ops
= sbi
->fatent_ops
;
692 struct blk_plug plug
;
693 sector_t blocknr
, diff
;
696 ops
->ent_blocknr(sb
, fatent
->entry
, &offset
, &blocknr
);
698 diff
= blocknr
- ra
->cur
;
699 blk_start_plug(&plug
);
701 * FIXME: we would want to directly use the bio with
702 * pages to reduce the number of segments.
704 for (; ra
->ra_next
< ra
->ra_limit
; ra
->ra_next
++)
705 sb_breadahead(sb
, ra
->ra_next
+ diff
);
706 blk_finish_plug(&plug
);
708 /* Advance the readahead window */
709 ra
->ra_advance
+= ra
->ra_blocks
;
710 ra
->ra_limit
+= min_t(sector_t
,
711 ra
->ra_blocks
, ra
->limit
- ra
->ra_limit
);
716 int fat_count_free_clusters(struct super_block
*sb
)
718 struct msdos_sb_info
*sbi
= MSDOS_SB(sb
);
719 const struct fatent_operations
*ops
= sbi
->fatent_ops
;
720 struct fat_entry fatent
;
721 struct fatent_ra fatent_ra
;
725 if (sbi
->free_clusters
!= -1 && sbi
->free_clus_valid
)
729 fatent_init(&fatent
);
730 fatent_set_entry(&fatent
, FAT_START_ENT
);
731 fat_ra_init(sb
, &fatent_ra
, &fatent
, sbi
->max_cluster
);
732 while (fatent
.entry
< sbi
->max_cluster
) {
733 /* readahead of fat blocks */
734 fat_ent_reada(sb
, &fatent_ra
, &fatent
);
736 err
= fat_ent_read_block(sb
, &fatent
);
741 if (ops
->ent_get(&fatent
) == FAT_ENT_FREE
)
743 } while (fat_ent_next(sbi
, &fatent
));
746 sbi
->free_clusters
= free
;
747 sbi
->free_clus_valid
= 1;
748 mark_fsinfo_dirty(sb
);
749 fatent_brelse(&fatent
);
755 static int fat_trim_clusters(struct super_block
*sb
, u32 clus
, u32 nr_clus
)
757 struct msdos_sb_info
*sbi
= MSDOS_SB(sb
);
758 return sb_issue_discard(sb
, fat_clus_to_blknr(sbi
, clus
),
759 nr_clus
* sbi
->sec_per_clus
, GFP_NOFS
, 0);
762 int fat_trim_fs(struct inode
*inode
, struct fstrim_range
*range
)
764 struct super_block
*sb
= inode
->i_sb
;
765 struct msdos_sb_info
*sbi
= MSDOS_SB(sb
);
766 const struct fatent_operations
*ops
= sbi
->fatent_ops
;
767 struct fat_entry fatent
;
768 struct fatent_ra fatent_ra
;
769 u64 ent_start
, ent_end
, minlen
, trimmed
= 0;
774 * FAT data is organized as clusters, trim at the granulary of cluster.
776 * fstrim_range is in byte, convert values to cluster index.
777 * Treat sectors before data region as all used, not to trim them.
779 ent_start
= max_t(u64
, range
->start
>>sbi
->cluster_bits
, FAT_START_ENT
);
780 ent_end
= ent_start
+ (range
->len
>> sbi
->cluster_bits
) - 1;
781 minlen
= range
->minlen
>> sbi
->cluster_bits
;
783 if (ent_start
>= sbi
->max_cluster
|| range
->len
< sbi
->cluster_size
)
785 if (ent_end
>= sbi
->max_cluster
)
786 ent_end
= sbi
->max_cluster
- 1;
788 fatent_init(&fatent
);
790 fatent_set_entry(&fatent
, ent_start
);
791 fat_ra_init(sb
, &fatent_ra
, &fatent
, ent_end
+ 1);
792 while (fatent
.entry
<= ent_end
) {
793 /* readahead of fat blocks */
794 fat_ent_reada(sb
, &fatent_ra
, &fatent
);
796 err
= fat_ent_read_block(sb
, &fatent
);
800 if (ops
->ent_get(&fatent
) == FAT_ENT_FREE
) {
803 if (free
>= minlen
) {
804 u32 clus
= fatent
.entry
- free
;
806 err
= fat_trim_clusters(sb
, clus
, free
);
807 if (err
&& err
!= -EOPNOTSUPP
)
815 } while (fat_ent_next(sbi
, &fatent
) && fatent
.entry
<= ent_end
);
817 if (fatal_signal_pending(current
)) {
822 if (need_resched()) {
823 fatent_brelse(&fatent
);
829 /* handle scenario when tail entries are all free */
830 if (free
&& free
>= minlen
) {
831 u32 clus
= fatent
.entry
- free
;
833 err
= fat_trim_clusters(sb
, clus
, free
);
834 if (err
&& err
!= -EOPNOTSUPP
)
842 fatent_brelse(&fatent
);
845 range
->len
= trimmed
<< sbi
->cluster_bits
;