1 // SPDX-License-Identifier: GPL-2.0
5 * Written 1992,1993 by Werner Almesberger
7 * Mar 1999. AV. Changed cache, so that it uses the starting cluster instead
9 * May 1999. AV. Fixed the bogosity with FAT32 (read "FAT28"). Fscking lusers.
12 #include <linux/slab.h>
15 /* this must be > 0. */
16 #define FAT_MAX_CACHE 8
19 struct list_head cache_list
;
20 int nr_contig
; /* number of contiguous clusters */
21 int fcluster
; /* cluster number in the file. */
22 int dcluster
; /* cluster number on disk. */
32 static inline int fat_max_cache(struct inode
*inode
)
37 static struct kmem_cache
*fat_cache_cachep
;
39 static void init_once(void *foo
)
41 struct fat_cache
*cache
= (struct fat_cache
*)foo
;
43 INIT_LIST_HEAD(&cache
->cache_list
);
46 int __init
fat_cache_init(void)
48 fat_cache_cachep
= kmem_cache_create("fat_cache",
49 sizeof(struct fat_cache
),
50 0, SLAB_RECLAIM_ACCOUNT
|SLAB_MEM_SPREAD
,
52 if (fat_cache_cachep
== NULL
)
57 void fat_cache_destroy(void)
59 kmem_cache_destroy(fat_cache_cachep
);
62 static inline struct fat_cache
*fat_cache_alloc(struct inode
*inode
)
64 return kmem_cache_alloc(fat_cache_cachep
, GFP_NOFS
);
67 static inline void fat_cache_free(struct fat_cache
*cache
)
69 BUG_ON(!list_empty(&cache
->cache_list
));
70 kmem_cache_free(fat_cache_cachep
, cache
);
73 static inline void fat_cache_update_lru(struct inode
*inode
,
74 struct fat_cache
*cache
)
76 if (MSDOS_I(inode
)->cache_lru
.next
!= &cache
->cache_list
)
77 list_move(&cache
->cache_list
, &MSDOS_I(inode
)->cache_lru
);
80 static int fat_cache_lookup(struct inode
*inode
, int fclus
,
81 struct fat_cache_id
*cid
,
82 int *cached_fclus
, int *cached_dclus
)
84 static struct fat_cache nohit
= { .fcluster
= 0, };
86 struct fat_cache
*hit
= &nohit
, *p
;
89 spin_lock(&MSDOS_I(inode
)->cache_lru_lock
);
90 list_for_each_entry(p
, &MSDOS_I(inode
)->cache_lru
, cache_list
) {
91 /* Find the cache of "fclus" or nearest cache. */
92 if (p
->fcluster
<= fclus
&& hit
->fcluster
< p
->fcluster
) {
94 if ((hit
->fcluster
+ hit
->nr_contig
) < fclus
) {
95 offset
= hit
->nr_contig
;
97 offset
= fclus
- hit
->fcluster
;
103 fat_cache_update_lru(inode
, hit
);
105 cid
->id
= MSDOS_I(inode
)->cache_valid_id
;
106 cid
->nr_contig
= hit
->nr_contig
;
107 cid
->fcluster
= hit
->fcluster
;
108 cid
->dcluster
= hit
->dcluster
;
109 *cached_fclus
= cid
->fcluster
+ offset
;
110 *cached_dclus
= cid
->dcluster
+ offset
;
112 spin_unlock(&MSDOS_I(inode
)->cache_lru_lock
);
117 static struct fat_cache
*fat_cache_merge(struct inode
*inode
,
118 struct fat_cache_id
*new)
122 list_for_each_entry(p
, &MSDOS_I(inode
)->cache_lru
, cache_list
) {
123 /* Find the same part as "new" in cluster-chain. */
124 if (p
->fcluster
== new->fcluster
) {
125 BUG_ON(p
->dcluster
!= new->dcluster
);
126 if (new->nr_contig
> p
->nr_contig
)
127 p
->nr_contig
= new->nr_contig
;
134 static void fat_cache_add(struct inode
*inode
, struct fat_cache_id
*new)
136 struct fat_cache
*cache
, *tmp
;
138 if (new->fcluster
== -1) /* dummy cache */
141 spin_lock(&MSDOS_I(inode
)->cache_lru_lock
);
142 if (new->id
!= FAT_CACHE_VALID
&&
143 new->id
!= MSDOS_I(inode
)->cache_valid_id
)
144 goto out
; /* this cache was invalidated */
146 cache
= fat_cache_merge(inode
, new);
148 if (MSDOS_I(inode
)->nr_caches
< fat_max_cache(inode
)) {
149 MSDOS_I(inode
)->nr_caches
++;
150 spin_unlock(&MSDOS_I(inode
)->cache_lru_lock
);
152 tmp
= fat_cache_alloc(inode
);
154 spin_lock(&MSDOS_I(inode
)->cache_lru_lock
);
155 MSDOS_I(inode
)->nr_caches
--;
156 spin_unlock(&MSDOS_I(inode
)->cache_lru_lock
);
160 spin_lock(&MSDOS_I(inode
)->cache_lru_lock
);
161 cache
= fat_cache_merge(inode
, new);
163 MSDOS_I(inode
)->nr_caches
--;
169 struct list_head
*p
= MSDOS_I(inode
)->cache_lru
.prev
;
170 cache
= list_entry(p
, struct fat_cache
, cache_list
);
172 cache
->fcluster
= new->fcluster
;
173 cache
->dcluster
= new->dcluster
;
174 cache
->nr_contig
= new->nr_contig
;
177 fat_cache_update_lru(inode
, cache
);
179 spin_unlock(&MSDOS_I(inode
)->cache_lru_lock
);
183 * Cache invalidation occurs rarely, thus the LRU chain is not updated. It
184 * fixes itself after a while.
186 static void __fat_cache_inval_inode(struct inode
*inode
)
188 struct msdos_inode_info
*i
= MSDOS_I(inode
);
189 struct fat_cache
*cache
;
191 while (!list_empty(&i
->cache_lru
)) {
192 cache
= list_entry(i
->cache_lru
.next
,
193 struct fat_cache
, cache_list
);
194 list_del_init(&cache
->cache_list
);
196 fat_cache_free(cache
);
198 /* Update. The copy of caches before this id is discarded. */
200 if (i
->cache_valid_id
== FAT_CACHE_VALID
)
204 void fat_cache_inval_inode(struct inode
*inode
)
206 spin_lock(&MSDOS_I(inode
)->cache_lru_lock
);
207 __fat_cache_inval_inode(inode
);
208 spin_unlock(&MSDOS_I(inode
)->cache_lru_lock
);
211 static inline int cache_contiguous(struct fat_cache_id
*cid
, int dclus
)
214 return ((cid
->dcluster
+ cid
->nr_contig
) == dclus
);
217 static inline void cache_init(struct fat_cache_id
*cid
, int fclus
, int dclus
)
219 cid
->id
= FAT_CACHE_VALID
;
220 cid
->fcluster
= fclus
;
221 cid
->dcluster
= dclus
;
225 int fat_get_cluster(struct inode
*inode
, int cluster
, int *fclus
, int *dclus
)
227 struct super_block
*sb
= inode
->i_sb
;
228 struct msdos_sb_info
*sbi
= MSDOS_SB(sb
);
229 const int limit
= sb
->s_maxbytes
>> sbi
->cluster_bits
;
230 struct fat_entry fatent
;
231 struct fat_cache_id cid
;
234 BUG_ON(MSDOS_I(inode
)->i_start
== 0);
237 *dclus
= MSDOS_I(inode
)->i_start
;
238 if (!fat_valid_entry(sbi
, *dclus
)) {
239 fat_fs_error_ratelimit(sb
,
240 "%s: invalid start cluster (i_pos %lld, start %08x)",
241 __func__
, MSDOS_I(inode
)->i_pos
, *dclus
);
247 if (fat_cache_lookup(inode
, cluster
, &cid
, fclus
, dclus
) < 0) {
249 * dummy, always not contiguous
250 * This is reinitialized by cache_init(), later.
252 cache_init(&cid
, -1, -1);
255 fatent_init(&fatent
);
256 while (*fclus
< cluster
) {
257 /* prevent the infinite loop of cluster chain */
258 if (*fclus
> limit
) {
259 fat_fs_error_ratelimit(sb
,
260 "%s: detected the cluster chain loop (i_pos %lld)",
261 __func__
, MSDOS_I(inode
)->i_pos
);
266 nr
= fat_ent_read(inode
, &fatent
, *dclus
);
269 else if (nr
== FAT_ENT_FREE
) {
270 fat_fs_error_ratelimit(sb
,
271 "%s: invalid cluster chain (i_pos %lld)",
272 __func__
, MSDOS_I(inode
)->i_pos
);
275 } else if (nr
== FAT_ENT_EOF
) {
276 fat_cache_add(inode
, &cid
);
281 if (!cache_contiguous(&cid
, *dclus
))
282 cache_init(&cid
, *fclus
, *dclus
);
285 fat_cache_add(inode
, &cid
);
287 fatent_brelse(&fatent
);
291 static int fat_bmap_cluster(struct inode
*inode
, int cluster
)
293 struct super_block
*sb
= inode
->i_sb
;
294 int ret
, fclus
, dclus
;
296 if (MSDOS_I(inode
)->i_start
== 0)
299 ret
= fat_get_cluster(inode
, cluster
, &fclus
, &dclus
);
302 else if (ret
== FAT_ENT_EOF
) {
303 fat_fs_error(sb
, "%s: request beyond EOF (i_pos %lld)",
304 __func__
, MSDOS_I(inode
)->i_pos
);
310 int fat_get_mapped_cluster(struct inode
*inode
, sector_t sector
,
312 unsigned long *mapped_blocks
, sector_t
*bmap
)
314 struct super_block
*sb
= inode
->i_sb
;
315 struct msdos_sb_info
*sbi
= MSDOS_SB(sb
);
318 cluster
= sector
>> (sbi
->cluster_bits
- sb
->s_blocksize_bits
);
319 offset
= sector
& (sbi
->sec_per_clus
- 1);
320 cluster
= fat_bmap_cluster(inode
, cluster
);
324 *bmap
= fat_clus_to_blknr(sbi
, cluster
) + offset
;
325 *mapped_blocks
= sbi
->sec_per_clus
- offset
;
326 if (*mapped_blocks
> last_block
- sector
)
327 *mapped_blocks
= last_block
- sector
;
333 static int is_exceed_eof(struct inode
*inode
, sector_t sector
,
334 sector_t
*last_block
, int create
)
336 struct super_block
*sb
= inode
->i_sb
;
337 const unsigned long blocksize
= sb
->s_blocksize
;
338 const unsigned char blocksize_bits
= sb
->s_blocksize_bits
;
340 *last_block
= (i_size_read(inode
) + (blocksize
- 1)) >> blocksize_bits
;
341 if (sector
>= *last_block
) {
346 * ->mmu_private can access on only allocation path.
347 * (caller must hold ->i_mutex)
349 *last_block
= (MSDOS_I(inode
)->mmu_private
+ (blocksize
- 1))
351 if (sector
>= *last_block
)
358 int fat_bmap(struct inode
*inode
, sector_t sector
, sector_t
*phys
,
359 unsigned long *mapped_blocks
, int create
, bool from_bmap
)
361 struct msdos_sb_info
*sbi
= MSDOS_SB(inode
->i_sb
);
366 if ((sbi
->fat_bits
!= 32) && (inode
->i_ino
== MSDOS_ROOT_INO
)) {
367 if (sector
< (sbi
->dir_entries
>> sbi
->dir_per_block_bits
)) {
368 *phys
= sector
+ sbi
->dir_start
;
375 if (is_exceed_eof(inode
, sector
, &last_block
, create
))
378 last_block
= inode
->i_blocks
>>
379 (inode
->i_sb
->s_blocksize_bits
- 9);
380 if (sector
>= last_block
)
384 return fat_get_mapped_cluster(inode
, sector
, last_block
, mapped_blocks
,